@@ -74,6 +74,9 @@ struct kmem_cache *amd_iommu_irq_cache;
74
74
75
75
static void detach_device (struct device * dev );
76
76
77
+ static int amd_iommu_attach_device (struct iommu_domain * dom ,
78
+ struct device * dev );
79
+
77
80
static void set_dte_entry (struct amd_iommu * iommu ,
78
81
struct iommu_dev_data * dev_data );
79
82
@@ -2263,43 +2266,41 @@ void protection_domain_free(struct protection_domain *domain)
2263
2266
kfree (domain );
2264
2267
}
2265
2268
2269
+ static void protection_domain_init (struct protection_domain * domain , int nid )
2270
+ {
2271
+ spin_lock_init (& domain -> lock );
2272
+ INIT_LIST_HEAD (& domain -> dev_list );
2273
+ INIT_LIST_HEAD (& domain -> dev_data_list );
2274
+ domain -> iop .pgtbl .cfg .amd .nid = nid ;
2275
+ }
2276
+
2266
2277
struct protection_domain * protection_domain_alloc (unsigned int type , int nid )
2267
2278
{
2268
- struct io_pgtable_ops * pgtbl_ops ;
2269
2279
struct protection_domain * domain ;
2270
- int pgtable ;
2271
2280
2272
2281
domain = kzalloc (sizeof (* domain ), GFP_KERNEL );
2273
2282
if (!domain )
2274
2283
return NULL ;
2275
2284
2276
2285
domain -> id = domain_id_alloc ();
2277
- if (!domain -> id )
2278
- goto err_free ;
2286
+ if (!domain -> id ) {
2287
+ kfree (domain );
2288
+ return NULL ;
2289
+ }
2279
2290
2280
- spin_lock_init (& domain -> lock );
2281
- INIT_LIST_HEAD (& domain -> dev_list );
2282
- INIT_LIST_HEAD (& domain -> dev_data_list );
2283
- domain -> iop .pgtbl .cfg .amd .nid = nid ;
2291
+ protection_domain_init (domain , nid );
2292
+
2293
+ return domain ;
2294
+ }
2295
+
2296
+ static int pdom_setup_pgtable (struct protection_domain * domain ,
2297
+ unsigned int type , int pgtable )
2298
+ {
2299
+ struct io_pgtable_ops * pgtbl_ops ;
2284
2300
2285
- switch (type ) {
2286
2301
/* No need to allocate io pgtable ops in passthrough mode */
2287
- case IOMMU_DOMAIN_IDENTITY :
2288
- case IOMMU_DOMAIN_SVA :
2289
- return domain ;
2290
- case IOMMU_DOMAIN_DMA :
2291
- pgtable = amd_iommu_pgtable ;
2292
- break ;
2293
- /*
2294
- * Force IOMMU v1 page table when allocating
2295
- * domain for pass-through devices.
2296
- */
2297
- case IOMMU_DOMAIN_UNMANAGED :
2298
- pgtable = AMD_IOMMU_V1 ;
2299
- break ;
2300
- default :
2301
- goto err_id ;
2302
- }
2302
+ if (!(type & __IOMMU_DOMAIN_PAGING ))
2303
+ return 0 ;
2303
2304
2304
2305
switch (pgtable ) {
2305
2306
case AMD_IOMMU_V1 :
@@ -2309,25 +2310,20 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
2309
2310
domain -> pd_mode = PD_MODE_V2 ;
2310
2311
break ;
2311
2312
default :
2312
- goto err_id ;
2313
+ return - EINVAL ;
2313
2314
}
2314
2315
2315
2316
pgtbl_ops =
2316
2317
alloc_io_pgtable_ops (pgtable , & domain -> iop .pgtbl .cfg , domain );
2317
2318
if (!pgtbl_ops )
2318
- goto err_id ;
2319
+ return - ENOMEM ;
2319
2320
2320
- return domain ;
2321
- err_id :
2322
- domain_id_free (domain -> id );
2323
- err_free :
2324
- kfree (domain );
2325
- return NULL ;
2321
+ return 0 ;
2326
2322
}
2327
2323
2328
- static inline u64 dma_max_address (void )
2324
+ static inline u64 dma_max_address (int pgtable )
2329
2325
{
2330
- if (amd_iommu_pgtable == AMD_IOMMU_V1 )
2326
+ if (pgtable == AMD_IOMMU_V1 )
2331
2327
return ~0ULL ;
2332
2328
2333
2329
/* V2 with 4/5 level page table */
@@ -2340,11 +2336,13 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2340
2336
}
2341
2337
2342
2338
static struct iommu_domain * do_iommu_domain_alloc (unsigned int type ,
2343
- struct device * dev , u32 flags )
2339
+ struct device * dev ,
2340
+ u32 flags , int pgtable )
2344
2341
{
2345
2342
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ;
2346
2343
struct protection_domain * domain ;
2347
2344
struct amd_iommu * iommu = NULL ;
2345
+ int ret ;
2348
2346
2349
2347
if (dev )
2350
2348
iommu = get_amd_iommu_from_dev (dev );
@@ -2356,16 +2354,20 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2356
2354
if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY ))
2357
2355
return ERR_PTR (- EINVAL );
2358
2356
2359
- if (dirty_tracking && !amd_iommu_hd_support (iommu ))
2360
- return ERR_PTR (- EOPNOTSUPP );
2361
-
2362
2357
domain = protection_domain_alloc (type ,
2363
2358
dev ? dev_to_node (dev ) : NUMA_NO_NODE );
2364
2359
if (!domain )
2365
2360
return ERR_PTR (- ENOMEM );
2366
2361
2362
+ ret = pdom_setup_pgtable (domain , type , pgtable );
2363
+ if (ret ) {
2364
+ domain_id_free (domain -> id );
2365
+ kfree (domain );
2366
+ return ERR_PTR (ret );
2367
+ }
2368
+
2367
2369
domain -> domain .geometry .aperture_start = 0 ;
2368
- domain -> domain .geometry .aperture_end = dma_max_address ();
2370
+ domain -> domain .geometry .aperture_end = dma_max_address (pgtable );
2369
2371
domain -> domain .geometry .force_aperture = true;
2370
2372
domain -> domain .pgsize_bitmap = domain -> iop .pgtbl .cfg .pgsize_bitmap ;
2371
2373
@@ -2383,8 +2385,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2383
2385
static struct iommu_domain * amd_iommu_domain_alloc (unsigned int type )
2384
2386
{
2385
2387
struct iommu_domain * domain ;
2388
+ int pgtable = amd_iommu_pgtable ;
2386
2389
2387
- domain = do_iommu_domain_alloc (type , NULL , 0 );
2390
+ /*
2391
+ * Force IOMMU v1 page table when allocating
2392
+ * domain for pass-through devices.
2393
+ */
2394
+ if (type == IOMMU_DOMAIN_UNMANAGED )
2395
+ pgtable = AMD_IOMMU_V1 ;
2396
+
2397
+ domain = do_iommu_domain_alloc (type , NULL , 0 , pgtable );
2388
2398
if (IS_ERR (domain ))
2389
2399
return NULL ;
2390
2400
@@ -2398,11 +2408,36 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
2398
2408
2399
2409
{
2400
2410
unsigned int type = IOMMU_DOMAIN_UNMANAGED ;
2411
+ struct amd_iommu * iommu = NULL ;
2412
+ const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
2413
+ IOMMU_HWPT_ALLOC_PASID ;
2414
+
2415
+ if (dev )
2416
+ iommu = get_amd_iommu_from_dev (dev );
2401
2417
2402
- if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) || parent || user_data )
2418
+ if ((flags & ~supported_flags ) || parent || user_data )
2403
2419
return ERR_PTR (- EOPNOTSUPP );
2404
2420
2405
- return do_iommu_domain_alloc (type , dev , flags );
2421
+ /* Allocate domain with v2 page table if IOMMU supports PASID. */
2422
+ if (flags & IOMMU_HWPT_ALLOC_PASID ) {
2423
+ if (!amd_iommu_pasid_supported ())
2424
+ return ERR_PTR (- EOPNOTSUPP );
2425
+
2426
+ return do_iommu_domain_alloc (type , dev , flags , AMD_IOMMU_V2 );
2427
+ }
2428
+
2429
+ /* Allocate domain with v1 page table for dirty tracking */
2430
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING ) {
2431
+ if (iommu && amd_iommu_hd_support (iommu )) {
2432
+ return do_iommu_domain_alloc (type , dev ,
2433
+ flags , AMD_IOMMU_V1 );
2434
+ }
2435
+
2436
+ return ERR_PTR (- EOPNOTSUPP );
2437
+ }
2438
+
2439
+ /* If nothing specific is required use the kernel commandline default */
2440
+ return do_iommu_domain_alloc (type , dev , 0 , amd_iommu_pgtable );
2406
2441
}
2407
2442
2408
2443
void amd_iommu_domain_free (struct iommu_domain * dom )
@@ -2444,6 +2479,25 @@ static struct iommu_domain blocked_domain = {
2444
2479
}
2445
2480
};
2446
2481
2482
+ static struct protection_domain identity_domain ;
2483
+
2484
+ static const struct iommu_domain_ops identity_domain_ops = {
2485
+ .attach_dev = amd_iommu_attach_device ,
2486
+ };
2487
+
2488
+ void amd_iommu_init_identity_domain (void )
2489
+ {
2490
+ struct iommu_domain * domain = & identity_domain .domain ;
2491
+
2492
+ domain -> type = IOMMU_DOMAIN_IDENTITY ;
2493
+ domain -> ops = & identity_domain_ops ;
2494
+ domain -> owner = & amd_iommu_ops ;
2495
+
2496
+ identity_domain .id = domain_id_alloc ();
2497
+
2498
+ protection_domain_init (& identity_domain , NUMA_NO_NODE );
2499
+ }
2500
+
2447
2501
static int amd_iommu_attach_device (struct iommu_domain * dom ,
2448
2502
struct device * dev )
2449
2503
{
@@ -2842,6 +2896,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev,
2842
2896
const struct iommu_ops amd_iommu_ops = {
2843
2897
.capable = amd_iommu_capable ,
2844
2898
.blocked_domain = & blocked_domain ,
2899
+ .identity_domain = & identity_domain .domain ,
2845
2900
.domain_alloc = amd_iommu_domain_alloc ,
2846
2901
.domain_alloc_user = amd_iommu_domain_alloc_user ,
2847
2902
.domain_alloc_sva = amd_iommu_domain_alloc_sva ,
0 commit comments