@@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
292
292
struct mm_struct * mm )
293
293
{
294
294
int ret ;
295
- unsigned long flags ;
296
295
struct arm_smmu_ctx_desc * cd ;
297
296
struct arm_smmu_mmu_notifier * smmu_mn ;
298
- struct arm_smmu_master * master ;
299
297
300
298
list_for_each_entry (smmu_mn , & smmu_domain -> mmu_notifiers , list ) {
301
299
if (smmu_mn -> mn .mm == mm ) {
@@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
325
323
goto err_free_cd ;
326
324
}
327
325
328
- spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
329
- list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
330
- ret = arm_smmu_write_ctx_desc (master , mm_get_enqcmd_pasid (mm ),
331
- cd );
332
- if (ret ) {
333
- list_for_each_entry_from_reverse (
334
- master , & smmu_domain -> devices , domain_head )
335
- arm_smmu_write_ctx_desc (
336
- master , mm_get_enqcmd_pasid (mm ), NULL );
337
- break ;
338
- }
339
- }
340
- spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
341
- if (ret )
342
- goto err_put_notifier ;
343
-
344
326
list_add (& smmu_mn -> list , & smmu_domain -> mmu_notifiers );
345
327
return smmu_mn ;
346
328
347
- err_put_notifier :
348
- /* Frees smmu_mn */
349
- mmu_notifier_put (& smmu_mn -> mn );
350
329
err_free_cd :
351
330
arm_smmu_free_shared_cd (cd );
352
331
return ERR_PTR (ret );
@@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
363
342
364
343
list_del (& smmu_mn -> list );
365
344
366
- arm_smmu_update_ctx_desc_devices (smmu_domain , mm_get_enqcmd_pasid (mm ),
367
- NULL );
368
-
369
345
/*
370
346
* If we went through clear(), we've already invalidated, and no
371
347
* new TLB entry can have been formed.
@@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
381
357
arm_smmu_free_shared_cd (cd );
382
358
}
383
359
384
- static int __arm_smmu_sva_bind (struct device * dev , struct mm_struct * mm )
360
+ static int __arm_smmu_sva_bind (struct device * dev , ioasid_t pasid ,
361
+ struct mm_struct * mm )
385
362
{
386
363
int ret ;
387
364
struct arm_smmu_bond * bond ;
@@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
404
381
goto err_free_bond ;
405
382
}
406
383
384
+ ret = arm_smmu_write_ctx_desc (master , pasid , bond -> smmu_mn -> cd );
385
+ if (ret )
386
+ goto err_put_notifier ;
387
+
407
388
list_add (& bond -> list , & master -> bonds );
408
389
return 0 ;
409
390
391
+ err_put_notifier :
392
+ arm_smmu_mmu_notifier_put (bond -> smmu_mn );
410
393
err_free_bond :
411
394
kfree (bond );
412
395
return ret ;
@@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
568
551
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
569
552
570
553
mutex_lock (& sva_lock );
554
+
555
+ arm_smmu_write_ctx_desc (master , id , NULL );
556
+
571
557
list_for_each_entry (t , & master -> bonds , list ) {
572
558
if (t -> mm == mm ) {
573
559
bond = t ;
@@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
590
576
struct mm_struct * mm = domain -> mm ;
591
577
592
578
mutex_lock (& sva_lock );
593
- ret = __arm_smmu_sva_bind (dev , mm );
579
+ ret = __arm_smmu_sva_bind (dev , id , mm );
594
580
mutex_unlock (& sva_lock );
595
581
596
582
return ret ;
0 commit comments