@@ -43,23 +43,6 @@ static void *pasid_private_find(ioasid_t pasid)
43
43
return xa_load (& pasid_private_array , pasid );
44
44
}
45
45
46
- static struct intel_svm_dev *
47
- svm_lookup_device_by_dev (struct intel_svm * svm , struct device * dev )
48
- {
49
- struct intel_svm_dev * sdev = NULL , * t ;
50
-
51
- rcu_read_lock ();
52
- list_for_each_entry_rcu (t , & svm -> devs , list ) {
53
- if (t -> dev == dev ) {
54
- sdev = t ;
55
- break ;
56
- }
57
- }
58
- rcu_read_unlock ();
59
-
60
- return sdev ;
61
- }
62
-
63
46
int intel_svm_enable_prq (struct intel_iommu * iommu )
64
47
{
65
48
struct iopf_queue * iopfq ;
@@ -192,7 +175,10 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
192
175
static void intel_mm_release (struct mmu_notifier * mn , struct mm_struct * mm )
193
176
{
194
177
struct intel_svm * svm = container_of (mn , struct intel_svm , notifier );
195
- struct intel_svm_dev * sdev ;
178
+ struct dmar_domain * domain = svm -> domain ;
179
+ struct dev_pasid_info * dev_pasid ;
180
+ struct device_domain_info * info ;
181
+ unsigned long flags ;
196
182
197
183
/* This might end up being called from exit_mmap(), *before* the page
198
184
* tables are cleared. And __mmu_notifier_release() will delete us from
@@ -206,11 +192,13 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
206
192
* page) so that we end up taking a fault that the hardware really
207
193
* *has* to handle gracefully without affecting other processes.
208
194
*/
209
- rcu_read_lock ();
210
- list_for_each_entry_rcu (sdev , & svm -> devs , list )
211
- intel_pasid_tear_down_entry (sdev -> iommu , sdev -> dev ,
212
- svm -> pasid , true);
213
- rcu_read_unlock ();
195
+ spin_lock_irqsave (& domain -> lock , flags );
196
+ list_for_each_entry (dev_pasid , & domain -> dev_pasids , link_domain ) {
197
+ info = dev_iommu_priv_get (dev_pasid -> dev );
198
+ intel_pasid_tear_down_entry (info -> iommu , dev_pasid -> dev ,
199
+ dev_pasid -> pasid , true);
200
+ }
201
+ spin_unlock_irqrestore (& domain -> lock , flags );
214
202
215
203
}
216
204
@@ -219,47 +207,17 @@ static const struct mmu_notifier_ops intel_mmuops = {
219
207
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs ,
220
208
};
221
209
222
- static int pasid_to_svm_sdev (struct device * dev , unsigned int pasid ,
223
- struct intel_svm * * rsvm ,
224
- struct intel_svm_dev * * rsdev )
225
- {
226
- struct intel_svm_dev * sdev = NULL ;
227
- struct intel_svm * svm ;
228
-
229
- if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX )
230
- return - EINVAL ;
231
-
232
- svm = pasid_private_find (pasid );
233
- if (IS_ERR (svm ))
234
- return PTR_ERR (svm );
235
-
236
- if (!svm )
237
- goto out ;
238
-
239
- /*
240
- * If we found svm for the PASID, there must be at least one device
241
- * bond.
242
- */
243
- if (WARN_ON (list_empty (& svm -> devs )))
244
- return - EINVAL ;
245
- sdev = svm_lookup_device_by_dev (svm , dev );
246
-
247
- out :
248
- * rsvm = svm ;
249
- * rsdev = sdev ;
250
-
251
- return 0 ;
252
- }
253
-
254
210
static int intel_svm_set_dev_pasid (struct iommu_domain * domain ,
255
211
struct device * dev , ioasid_t pasid )
256
212
{
257
213
struct device_domain_info * info = dev_iommu_priv_get (dev );
214
+ struct dmar_domain * dmar_domain = to_dmar_domain (domain );
258
215
struct intel_iommu * iommu = info -> iommu ;
259
216
struct mm_struct * mm = domain -> mm ;
260
- struct intel_svm_dev * sdev ;
217
+ struct dev_pasid_info * dev_pasid ;
261
218
struct intel_svm * svm ;
262
219
unsigned long sflags ;
220
+ unsigned long flags ;
263
221
int ret = 0 ;
264
222
265
223
svm = pasid_private_find (pasid );
@@ -270,7 +228,6 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
270
228
271
229
svm -> pasid = pasid ;
272
230
svm -> mm = mm ;
273
- INIT_LIST_HEAD_RCU (& svm -> devs );
274
231
275
232
svm -> notifier .ops = & intel_mmuops ;
276
233
svm -> domain = to_dmar_domain (domain );
@@ -288,25 +245,17 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
288
245
}
289
246
}
290
247
291
- sdev = kzalloc ( sizeof ( * sdev ), GFP_KERNEL ) ;
292
- if (! sdev ) {
293
- ret = - ENOMEM ;
248
+ dmar_domain -> svm = svm ;
249
+ dev_pasid = kzalloc ( sizeof ( * dev_pasid ), GFP_KERNEL );
250
+ if (! dev_pasid )
294
251
goto free_svm ;
295
- }
296
252
297
- sdev -> dev = dev ;
298
- sdev -> iommu = iommu ;
299
- sdev -> did = FLPT_DEFAULT_DID ;
300
- sdev -> sid = PCI_DEVID (info -> bus , info -> devfn );
301
- if (info -> ats_enabled ) {
302
- sdev -> qdep = info -> ats_qdep ;
303
- if (sdev -> qdep >= QI_DEV_EIOTLB_MAX_INVS )
304
- sdev -> qdep = 0 ;
305
- }
253
+ dev_pasid -> dev = dev ;
254
+ dev_pasid -> pasid = pasid ;
306
255
307
256
ret = cache_tag_assign_domain (to_dmar_domain (domain ), dev , pasid );
308
257
if (ret )
309
- goto free_sdev ;
258
+ goto free_dev_pasid ;
310
259
311
260
/* Setup the pasid table: */
312
261
sflags = cpu_feature_enabled (X86_FEATURE_LA57 ) ? PASID_FLAG_FL5LP : 0 ;
@@ -315,16 +264,18 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
315
264
if (ret )
316
265
goto unassign_tag ;
317
266
318
- list_add_rcu (& sdev -> list , & svm -> devs );
267
+ spin_lock_irqsave (& dmar_domain -> lock , flags );
268
+ list_add (& dev_pasid -> link_domain , & dmar_domain -> dev_pasids );
269
+ spin_unlock_irqrestore (& dmar_domain -> lock , flags );
319
270
320
271
return 0 ;
321
272
322
273
unassign_tag :
323
274
cache_tag_unassign_domain (to_dmar_domain (domain ), dev , pasid );
324
- free_sdev :
325
- kfree (sdev );
275
+ free_dev_pasid :
276
+ kfree (dev_pasid );
326
277
free_svm :
327
- if (list_empty (& svm -> devs )) {
278
+ if (list_empty (& dmar_domain -> dev_pasids )) {
328
279
mmu_notifier_unregister (& svm -> notifier , mm );
329
280
pasid_private_remove (pasid );
330
281
kfree (svm );
@@ -333,26 +284,17 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
333
284
return ret ;
334
285
}
335
286
336
- void intel_svm_remove_dev_pasid (struct device * dev , u32 pasid )
287
+ void intel_svm_remove_dev_pasid (struct iommu_domain * domain )
337
288
{
338
- struct intel_svm_dev * sdev ;
339
- struct intel_svm * svm ;
340
- struct mm_struct * mm ;
341
-
342
- if (pasid_to_svm_sdev (dev , pasid , & svm , & sdev ))
343
- return ;
344
- mm = svm -> mm ;
345
-
346
- if (sdev ) {
347
- list_del_rcu (& sdev -> list );
348
- kfree_rcu (sdev , rcu );
289
+ struct dmar_domain * dmar_domain = to_dmar_domain (domain );
290
+ struct intel_svm * svm = dmar_domain -> svm ;
291
+ struct mm_struct * mm = domain -> mm ;
349
292
350
- if (list_empty (& svm -> devs )) {
351
- if (svm -> notifier .ops )
352
- mmu_notifier_unregister (& svm -> notifier , mm );
353
- pasid_private_remove (svm -> pasid );
354
- kfree (svm );
355
- }
293
+ if (list_empty (& dmar_domain -> dev_pasids )) {
294
+ if (svm -> notifier .ops )
295
+ mmu_notifier_unregister (& svm -> notifier , mm );
296
+ pasid_private_remove (svm -> pasid );
297
+ kfree (svm );
356
298
}
357
299
}
358
300
@@ -686,8 +628,10 @@ struct iommu_domain *intel_svm_domain_alloc(void)
686
628
return NULL ;
687
629
domain -> domain .ops = & intel_svm_domain_ops ;
688
630
domain -> use_first_level = true;
631
+ INIT_LIST_HEAD (& domain -> dev_pasids );
689
632
INIT_LIST_HEAD (& domain -> cache_tags );
690
633
spin_lock_init (& domain -> cache_lock );
634
+ spin_lock_init (& domain -> lock );
691
635
692
636
return & domain -> domain ;
693
637
}
0 commit comments