@@ -293,7 +293,7 @@ u32 iommufd_device_to_id(struct iommufd_device *idev)
293
293
EXPORT_SYMBOL_NS_GPL (iommufd_device_to_id , IOMMUFD );
294
294
295
295
static int iommufd_group_setup_msi (struct iommufd_group * igroup ,
296
- struct iommufd_hw_pagetable * hwpt )
296
+ struct iommufd_hwpt_paging * hwpt_paging )
297
297
{
298
298
phys_addr_t sw_msi_start = igroup -> sw_msi_start ;
299
299
int rc ;
@@ -311,36 +311,39 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup,
311
311
* matches what the IRQ layer actually expects in a newly created
312
312
* domain.
313
313
*/
314
- if (sw_msi_start != PHYS_ADDR_MAX && !hwpt -> msi_cookie ) {
315
- rc = iommu_get_msi_cookie (hwpt -> domain , sw_msi_start );
314
+ if (sw_msi_start != PHYS_ADDR_MAX && !hwpt_paging -> msi_cookie ) {
315
+ rc = iommu_get_msi_cookie (hwpt_paging -> common .domain ,
316
+ sw_msi_start );
316
317
if (rc )
317
318
return rc ;
318
319
319
320
/*
320
321
* iommu_get_msi_cookie() can only be called once per domain,
321
322
* it returns -EBUSY on later calls.
322
323
*/
323
- hwpt -> msi_cookie = true;
324
+ hwpt_paging -> msi_cookie = true;
324
325
}
325
326
return 0 ;
326
327
}
327
328
328
- static int iommufd_hwpt_paging_attach (struct iommufd_hw_pagetable * hwpt ,
329
+ static int iommufd_hwpt_paging_attach (struct iommufd_hwpt_paging * hwpt_paging ,
329
330
struct iommufd_device * idev )
330
331
{
331
332
int rc ;
332
333
333
334
lockdep_assert_held (& idev -> igroup -> lock );
334
335
335
- rc = iopt_table_enforce_dev_resv_regions (& hwpt -> ioas -> iopt , idev -> dev ,
336
+ rc = iopt_table_enforce_dev_resv_regions (& hwpt_paging -> ioas -> iopt ,
337
+ idev -> dev ,
336
338
& idev -> igroup -> sw_msi_start );
337
339
if (rc )
338
340
return rc ;
339
341
340
342
if (list_empty (& idev -> igroup -> device_list )) {
341
- rc = iommufd_group_setup_msi (idev -> igroup , hwpt );
343
+ rc = iommufd_group_setup_msi (idev -> igroup , hwpt_paging );
342
344
if (rc ) {
343
- iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
345
+ iopt_remove_reserved_iova (& hwpt_paging -> ioas -> iopt ,
346
+ idev -> dev );
344
347
return rc ;
345
348
}
346
349
}
@@ -360,7 +363,7 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
360
363
}
361
364
362
365
if (hwpt_is_paging (hwpt )) {
363
- rc = iommufd_hwpt_paging_attach (hwpt , idev );
366
+ rc = iommufd_hwpt_paging_attach (to_hwpt_paging ( hwpt ) , idev );
364
367
if (rc )
365
368
goto err_unlock ;
366
369
}
@@ -384,7 +387,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
384
387
return 0 ;
385
388
err_unresv :
386
389
if (hwpt_is_paging (hwpt ))
387
- iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
390
+ iopt_remove_reserved_iova (& to_hwpt_paging (hwpt )-> ioas -> iopt ,
391
+ idev -> dev );
388
392
err_unlock :
389
393
mutex_unlock (& idev -> igroup -> lock );
390
394
return rc ;
@@ -402,7 +406,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev)
402
406
idev -> igroup -> hwpt = NULL ;
403
407
}
404
408
if (hwpt_is_paging (hwpt ))
405
- iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
409
+ iopt_remove_reserved_iova (& to_hwpt_paging (hwpt )-> ioas -> iopt ,
410
+ idev -> dev );
406
411
mutex_unlock (& idev -> igroup -> lock );
407
412
408
413
/* Caller must destroy hwpt */
@@ -423,41 +428,43 @@ iommufd_device_do_attach(struct iommufd_device *idev,
423
428
424
429
static void
425
430
iommufd_group_remove_reserved_iova (struct iommufd_group * igroup ,
426
- struct iommufd_hw_pagetable * hwpt )
431
+ struct iommufd_hwpt_paging * hwpt_paging )
427
432
{
428
433
struct iommufd_device * cur ;
429
434
430
435
lockdep_assert_held (& igroup -> lock );
431
436
432
437
list_for_each_entry (cur , & igroup -> device_list , group_item )
433
- iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , cur -> dev );
438
+ iopt_remove_reserved_iova (& hwpt_paging -> ioas -> iopt , cur -> dev );
434
439
}
435
440
436
- static int iommufd_group_do_replace_paging (struct iommufd_group * igroup ,
437
- struct iommufd_hw_pagetable * hwpt )
441
+ static int
442
+ iommufd_group_do_replace_paging (struct iommufd_group * igroup ,
443
+ struct iommufd_hwpt_paging * hwpt_paging )
438
444
{
439
445
struct iommufd_hw_pagetable * old_hwpt = igroup -> hwpt ;
440
446
struct iommufd_device * cur ;
441
447
int rc ;
442
448
443
449
lockdep_assert_held (& igroup -> lock );
444
450
445
- if (!hwpt_is_paging (old_hwpt ) || hwpt -> ioas != old_hwpt -> ioas ) {
451
+ if (!hwpt_is_paging (old_hwpt ) ||
452
+ hwpt_paging -> ioas != to_hwpt_paging (old_hwpt )-> ioas ) {
446
453
list_for_each_entry (cur , & igroup -> device_list , group_item ) {
447
454
rc = iopt_table_enforce_dev_resv_regions (
448
- & hwpt -> ioas -> iopt , cur -> dev , NULL );
455
+ & hwpt_paging -> ioas -> iopt , cur -> dev , NULL );
449
456
if (rc )
450
457
goto err_unresv ;
451
458
}
452
459
}
453
460
454
- rc = iommufd_group_setup_msi (igroup , hwpt );
461
+ rc = iommufd_group_setup_msi (igroup , hwpt_paging );
455
462
if (rc )
456
463
goto err_unresv ;
457
464
return 0 ;
458
465
459
466
err_unresv :
460
- iommufd_group_remove_reserved_iova (igroup , hwpt );
467
+ iommufd_group_remove_reserved_iova (igroup , hwpt_paging );
461
468
return rc ;
462
469
}
463
470
@@ -482,8 +489,10 @@ iommufd_device_do_replace(struct iommufd_device *idev,
482
489
return NULL ;
483
490
}
484
491
492
+ old_hwpt = igroup -> hwpt ;
485
493
if (hwpt_is_paging (hwpt )) {
486
- rc = iommufd_group_do_replace_paging (igroup , hwpt );
494
+ rc = iommufd_group_do_replace_paging (igroup ,
495
+ to_hwpt_paging (hwpt ));
487
496
if (rc )
488
497
goto err_unlock ;
489
498
}
@@ -492,10 +501,11 @@ iommufd_device_do_replace(struct iommufd_device *idev,
492
501
if (rc )
493
502
goto err_unresv ;
494
503
495
- old_hwpt = igroup -> hwpt ;
496
504
if (hwpt_is_paging (old_hwpt ) &&
497
- (!hwpt_is_paging (hwpt ) || hwpt -> ioas != old_hwpt -> ioas ))
498
- iommufd_group_remove_reserved_iova (igroup , old_hwpt );
505
+ (!hwpt_is_paging (hwpt ) ||
506
+ to_hwpt_paging (hwpt )-> ioas != to_hwpt_paging (old_hwpt )-> ioas ))
507
+ iommufd_group_remove_reserved_iova (igroup ,
508
+ to_hwpt_paging (old_hwpt ));
499
509
500
510
igroup -> hwpt = hwpt ;
501
511
@@ -514,7 +524,8 @@ iommufd_device_do_replace(struct iommufd_device *idev,
514
524
return old_hwpt ;
515
525
err_unresv :
516
526
if (hwpt_is_paging (hwpt ))
517
- iommufd_group_remove_reserved_iova (igroup , hwpt );
527
+ iommufd_group_remove_reserved_iova (igroup ,
528
+ to_hwpt_paging (old_hwpt ));
518
529
err_unlock :
519
530
mutex_unlock (& idev -> igroup -> lock );
520
531
return ERR_PTR (rc );
@@ -542,6 +553,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
542
553
*/
543
554
bool immediate_attach = do_attach == iommufd_device_do_attach ;
544
555
struct iommufd_hw_pagetable * destroy_hwpt ;
556
+ struct iommufd_hwpt_paging * hwpt_paging ;
545
557
struct iommufd_hw_pagetable * hwpt ;
546
558
547
559
/*
@@ -550,10 +562,11 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
550
562
* other.
551
563
*/
552
564
mutex_lock (& ioas -> mutex );
553
- list_for_each_entry (hwpt , & ioas -> hwpt_list , hwpt_item ) {
554
- if (!hwpt -> auto_domain )
565
+ list_for_each_entry (hwpt_paging , & ioas -> hwpt_list , hwpt_item ) {
566
+ if (!hwpt_paging -> auto_domain )
555
567
continue ;
556
568
569
+ hwpt = & hwpt_paging -> common ;
557
570
if (!iommufd_lock_obj (& hwpt -> obj ))
558
571
continue ;
559
572
destroy_hwpt = (* do_attach )(idev , hwpt );
@@ -574,12 +587,13 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
574
587
goto out_unlock ;
575
588
}
576
589
577
- hwpt = iommufd_hw_pagetable_alloc (idev -> ictx , ioas , idev ,
578
- 0 , immediate_attach );
579
- if (IS_ERR (hwpt )) {
580
- destroy_hwpt = ERR_CAST (hwpt );
590
+ hwpt_paging = iommufd_hwpt_paging_alloc (idev -> ictx , ioas , idev , 0 ,
591
+ immediate_attach );
592
+ if (IS_ERR (hwpt_paging )) {
593
+ destroy_hwpt = ERR_CAST (hwpt_paging );
581
594
goto out_unlock ;
582
595
}
596
+ hwpt = & hwpt_paging -> common ;
583
597
584
598
if (!immediate_attach ) {
585
599
destroy_hwpt = (* do_attach )(idev , hwpt );
@@ -589,7 +603,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev,
589
603
destroy_hwpt = NULL ;
590
604
}
591
605
592
- hwpt -> auto_domain = true;
606
+ hwpt_paging -> auto_domain = true;
593
607
* pt_id = hwpt -> obj .id ;
594
608
595
609
iommufd_object_finalize (idev -> ictx , & hwpt -> obj );
0 commit comments