@@ -325,6 +325,28 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup,
325
325
return 0 ;
326
326
}
327
327
328
+ static int iommufd_hwpt_paging_attach (struct iommufd_hw_pagetable * hwpt ,
329
+ struct iommufd_device * idev )
330
+ {
331
+ int rc ;
332
+
333
+ lockdep_assert_held (& idev -> igroup -> lock );
334
+
335
+ rc = iopt_table_enforce_dev_resv_regions (& hwpt -> ioas -> iopt , idev -> dev ,
336
+ & idev -> igroup -> sw_msi_start );
337
+ if (rc )
338
+ return rc ;
339
+
340
+ if (list_empty (& idev -> igroup -> device_list )) {
341
+ rc = iommufd_group_setup_msi (idev -> igroup , hwpt );
342
+ if (rc ) {
343
+ iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
344
+ return rc ;
345
+ }
346
+ }
347
+ return 0 ;
348
+ }
349
+
328
350
int iommufd_hw_pagetable_attach (struct iommufd_hw_pagetable * hwpt ,
329
351
struct iommufd_device * idev )
330
352
{
@@ -337,10 +359,11 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
337
359
goto err_unlock ;
338
360
}
339
361
340
- rc = iopt_table_enforce_dev_resv_regions (& hwpt -> ioas -> iopt , idev -> dev ,
341
- & idev -> igroup -> sw_msi_start );
342
- if (rc )
343
- goto err_unlock ;
362
+ if (hwpt_is_paging (hwpt )) {
363
+ rc = iommufd_hwpt_paging_attach (hwpt , idev );
364
+ if (rc )
365
+ goto err_unlock ;
366
+ }
344
367
345
368
/*
346
369
* Only attach to the group once for the first device that is in the
@@ -350,10 +373,6 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
350
373
* attachment.
351
374
*/
352
375
if (list_empty (& idev -> igroup -> device_list )) {
353
- rc = iommufd_group_setup_msi (idev -> igroup , hwpt );
354
- if (rc )
355
- goto err_unresv ;
356
-
357
376
rc = iommu_attach_group (hwpt -> domain , idev -> igroup -> group );
358
377
if (rc )
359
378
goto err_unresv ;
@@ -364,7 +383,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
364
383
mutex_unlock (& idev -> igroup -> lock );
365
384
return 0 ;
366
385
err_unresv :
367
- iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
386
+ if (hwpt_is_paging (hwpt ))
387
+ iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
368
388
err_unlock :
369
389
mutex_unlock (& idev -> igroup -> lock );
370
390
return rc ;
@@ -381,7 +401,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev)
381
401
iommu_detach_group (hwpt -> domain , idev -> igroup -> group );
382
402
idev -> igroup -> hwpt = NULL ;
383
403
}
384
- iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
404
+ if (hwpt_is_paging (hwpt ))
405
+ iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , idev -> dev );
385
406
mutex_unlock (& idev -> igroup -> lock );
386
407
387
408
/* Caller must destroy hwpt */
@@ -400,13 +421,52 @@ iommufd_device_do_attach(struct iommufd_device *idev,
400
421
return NULL ;
401
422
}
402
423
424
+ static void
425
+ iommufd_group_remove_reserved_iova (struct iommufd_group * igroup ,
426
+ struct iommufd_hw_pagetable * hwpt )
427
+ {
428
+ struct iommufd_device * cur ;
429
+
430
+ lockdep_assert_held (& igroup -> lock );
431
+
432
+ list_for_each_entry (cur , & igroup -> device_list , group_item )
433
+ iopt_remove_reserved_iova (& hwpt -> ioas -> iopt , cur -> dev );
434
+ }
435
+
436
+ static int iommufd_group_do_replace_paging (struct iommufd_group * igroup ,
437
+ struct iommufd_hw_pagetable * hwpt )
438
+ {
439
+ struct iommufd_hw_pagetable * old_hwpt = igroup -> hwpt ;
440
+ struct iommufd_device * cur ;
441
+ int rc ;
442
+
443
+ lockdep_assert_held (& igroup -> lock );
444
+
445
+ if (!hwpt_is_paging (old_hwpt ) || hwpt -> ioas != old_hwpt -> ioas ) {
446
+ list_for_each_entry (cur , & igroup -> device_list , group_item ) {
447
+ rc = iopt_table_enforce_dev_resv_regions (
448
+ & hwpt -> ioas -> iopt , cur -> dev , NULL );
449
+ if (rc )
450
+ goto err_unresv ;
451
+ }
452
+ }
453
+
454
+ rc = iommufd_group_setup_msi (igroup , hwpt );
455
+ if (rc )
456
+ goto err_unresv ;
457
+ return 0 ;
458
+
459
+ err_unresv :
460
+ iommufd_group_remove_reserved_iova (igroup , hwpt );
461
+ return rc ;
462
+ }
463
+
403
464
static struct iommufd_hw_pagetable *
404
465
iommufd_device_do_replace (struct iommufd_device * idev ,
405
466
struct iommufd_hw_pagetable * hwpt )
406
467
{
407
468
struct iommufd_group * igroup = idev -> igroup ;
408
469
struct iommufd_hw_pagetable * old_hwpt ;
409
- struct iommufd_device * cur ;
410
470
unsigned int num_devices ;
411
471
int rc ;
412
472
@@ -422,29 +482,20 @@ iommufd_device_do_replace(struct iommufd_device *idev,
422
482
return NULL ;
423
483
}
424
484
425
- old_hwpt = igroup -> hwpt ;
426
- if (hwpt -> ioas != old_hwpt -> ioas ) {
427
- list_for_each_entry (cur , & igroup -> device_list , group_item ) {
428
- rc = iopt_table_enforce_dev_resv_regions (
429
- & hwpt -> ioas -> iopt , cur -> dev , NULL );
430
- if (rc )
431
- goto err_unresv ;
432
- }
485
+ if (hwpt_is_paging (hwpt )) {
486
+ rc = iommufd_group_do_replace_paging (igroup , hwpt );
487
+ if (rc )
488
+ goto err_unlock ;
433
489
}
434
490
435
- rc = iommufd_group_setup_msi (idev -> igroup , hwpt );
436
- if (rc )
437
- goto err_unresv ;
438
-
439
491
rc = iommu_group_replace_domain (igroup -> group , hwpt -> domain );
440
492
if (rc )
441
493
goto err_unresv ;
442
494
443
- if (hwpt -> ioas != old_hwpt -> ioas ) {
444
- list_for_each_entry (cur , & igroup -> device_list , group_item )
445
- iopt_remove_reserved_iova (& old_hwpt -> ioas -> iopt ,
446
- cur -> dev );
447
- }
495
+ old_hwpt = igroup -> hwpt ;
496
+ if (hwpt_is_paging (old_hwpt ) &&
497
+ (!hwpt_is_paging (hwpt ) || hwpt -> ioas != old_hwpt -> ioas ))
498
+ iommufd_group_remove_reserved_iova (igroup , old_hwpt );
448
499
449
500
igroup -> hwpt = hwpt ;
450
501
@@ -462,8 +513,8 @@ iommufd_device_do_replace(struct iommufd_device *idev,
462
513
/* Caller must destroy old_hwpt */
463
514
return old_hwpt ;
464
515
err_unresv :
465
- list_for_each_entry ( cur , & igroup -> device_list , group_item )
466
- iopt_remove_reserved_iova ( & hwpt -> ioas -> iopt , cur -> dev );
516
+ if ( hwpt_is_paging ( hwpt ) )
517
+ iommufd_group_remove_reserved_iova ( igroup , hwpt );
467
518
err_unlock :
468
519
mutex_unlock (& idev -> igroup -> lock );
469
520
return ERR_PTR (rc );
0 commit comments