Skip to content

Commit a005ef6

Browse files
hegdevasantjoergroedel
authored andcommitted
iommu/amd: Pass page table type as param to pdom_setup_pgtable()
Current code forces v1 page table for UNMANAGED domain and global page table type (amd_iommu_pgtable) for rest of paging domain. Following patch series adds support for domain_alloc_paging() ops. Also enhances domain_alloc_user() to allocate page table based on 'flags. Hence pass page table type as parameter to pdomain_setup_pgtable(). So that caller can decide right page table type. Also update dma_max_address() to take pgtable as parameter. Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jacob Pan <jacob.pan@linux.microsoft.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20241028093810.5901-9-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent b3c9890 commit a005ef6

File tree

1 file changed

+19
-24
lines changed

1 file changed

+19
-24
lines changed

drivers/iommu/amd/iommu.c

Lines changed: 19 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2286,28 +2286,13 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
22862286
}
22872287

22882288
static int pdom_setup_pgtable(struct protection_domain *domain,
2289-
unsigned int type)
2289+
unsigned int type, int pgtable)
22902290
{
22912291
struct io_pgtable_ops *pgtbl_ops;
2292-
int pgtable;
22932292

2294-
switch (type) {
22952293
/* No need to allocate io pgtable ops in passthrough mode */
2296-
case IOMMU_DOMAIN_IDENTITY:
2294+
if (!(type & __IOMMU_DOMAIN_PAGING))
22972295
return 0;
2298-
case IOMMU_DOMAIN_DMA:
2299-
pgtable = amd_iommu_pgtable;
2300-
break;
2301-
/*
2302-
* Force IOMMU v1 page table when allocating
2303-
* domain for pass-through devices.
2304-
*/
2305-
case IOMMU_DOMAIN_UNMANAGED:
2306-
pgtable = AMD_IOMMU_V1;
2307-
break;
2308-
default:
2309-
return -EINVAL;
2310-
}
23112296

23122297
switch (pgtable) {
23132298
case AMD_IOMMU_V1:
@@ -2319,6 +2304,7 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
23192304
default:
23202305
return -EINVAL;
23212306
}
2307+
23222308
pgtbl_ops =
23232309
alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain);
23242310
if (!pgtbl_ops)
@@ -2327,9 +2313,9 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
23272313
return 0;
23282314
}
23292315

2330-
static inline u64 dma_max_address(void)
2316+
static inline u64 dma_max_address(int pgtable)
23312317
{
2332-
if (amd_iommu_pgtable == AMD_IOMMU_V1)
2318+
if (pgtable == AMD_IOMMU_V1)
23332319
return ~0ULL;
23342320

23352321
/* V2 with 4/5 level page table */
@@ -2342,7 +2328,8 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
23422328
}
23432329

23442330
static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2345-
struct device *dev, u32 flags)
2331+
struct device *dev,
2332+
u32 flags, int pgtable)
23462333
{
23472334
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
23482335
struct protection_domain *domain;
@@ -2367,15 +2354,15 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
23672354
if (!domain)
23682355
return ERR_PTR(-ENOMEM);
23692356

2370-
ret = pdom_setup_pgtable(domain, type);
2357+
ret = pdom_setup_pgtable(domain, type, pgtable);
23712358
if (ret) {
23722359
domain_id_free(domain->id);
23732360
kfree(domain);
23742361
return ERR_PTR(ret);
23752362
}
23762363

23772364
domain->domain.geometry.aperture_start = 0;
2378-
domain->domain.geometry.aperture_end = dma_max_address();
2365+
domain->domain.geometry.aperture_end = dma_max_address(pgtable);
23792366
domain->domain.geometry.force_aperture = true;
23802367
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
23812368

@@ -2393,8 +2380,16 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
23932380
static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
23942381
{
23952382
struct iommu_domain *domain;
2383+
int pgtable = amd_iommu_pgtable;
2384+
2385+
/*
2386+
* Force IOMMU v1 page table when allocating
2387+
* domain for pass-through devices.
2388+
*/
2389+
if (type == IOMMU_DOMAIN_UNMANAGED)
2390+
pgtable = AMD_IOMMU_V1;
23962391

2397-
domain = do_iommu_domain_alloc(type, NULL, 0);
2392+
domain = do_iommu_domain_alloc(type, NULL, 0, pgtable);
23982393
if (IS_ERR(domain))
23992394
return NULL;
24002395

@@ -2412,7 +2407,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
24122407
if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data)
24132408
return ERR_PTR(-EOPNOTSUPP);
24142409

2415-
return do_iommu_domain_alloc(type, dev, flags);
2410+
return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V1);
24162411
}
24172412

24182413
void amd_iommu_domain_free(struct iommu_domain *dom)

0 commit comments

Comments
 (0)