|
37 | 37 | #include <asm/iommu.h>
|
38 | 38 | #include <asm/gart.h>
|
39 | 39 | #include <asm/dma.h>
|
| 40 | +#include <uapi/linux/iommufd.h> |
40 | 41 |
|
41 | 42 | #include "amd_iommu.h"
|
42 | 43 | #include "../dma-iommu.h"
|
@@ -2155,28 +2156,64 @@ static inline u64 dma_max_address(void)
|
2155 | 2156 | return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
|
2156 | 2157 | }
|
2157 | 2158 |
|
2158 |
| -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) |
| 2159 | +static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, |
| 2160 | + struct device *dev, u32 flags) |
2159 | 2161 | {
|
2160 | 2162 | struct protection_domain *domain;
|
| 2163 | + struct amd_iommu *iommu = NULL; |
| 2164 | + |
| 2165 | + if (dev) { |
| 2166 | + iommu = rlookup_amd_iommu(dev); |
| 2167 | + if (!iommu) |
| 2168 | + return ERR_PTR(-ENODEV); |
| 2169 | + } |
2161 | 2170 |
|
2162 | 2171 | /*
|
2163 | 2172 | * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
|
2164 | 2173 | * default to use IOMMU_DOMAIN_DMA[_FQ].
|
2165 | 2174 | */
|
2166 | 2175 | if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
|
2167 |
| - return NULL; |
| 2176 | + return ERR_PTR(-EINVAL); |
2168 | 2177 |
|
2169 | 2178 | domain = protection_domain_alloc(type);
|
2170 | 2179 | if (!domain)
|
2171 |
| - return NULL; |
| 2180 | + return ERR_PTR(-ENOMEM); |
2172 | 2181 |
|
2173 | 2182 | domain->domain.geometry.aperture_start = 0;
|
2174 | 2183 | domain->domain.geometry.aperture_end = dma_max_address();
|
2175 | 2184 | domain->domain.geometry.force_aperture = true;
|
2176 | 2185 |
|
| 2186 | + if (iommu) { |
| 2187 | + domain->domain.type = type; |
| 2188 | + domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; |
| 2189 | + domain->domain.ops = iommu->iommu.ops->default_domain_ops; |
| 2190 | + } |
| 2191 | + |
2177 | 2192 | return &domain->domain;
|
2178 | 2193 | }
|
2179 | 2194 |
|
| 2195 | +static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) |
| 2196 | +{ |
| 2197 | + struct iommu_domain *domain; |
| 2198 | + |
| 2199 | + domain = do_iommu_domain_alloc(type, NULL, 0); |
| 2200 | + if (IS_ERR(domain)) |
| 2201 | + return NULL; |
| 2202 | + |
| 2203 | + return domain; |
| 2204 | +} |
| 2205 | + |
| 2206 | +static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev, |
| 2207 | + u32 flags) |
| 2208 | +{ |
| 2209 | + unsigned int type = IOMMU_DOMAIN_UNMANAGED; |
| 2210 | + |
| 2211 | + if (flags) |
| 2212 | + return ERR_PTR(-EOPNOTSUPP); |
| 2213 | + |
| 2214 | + return do_iommu_domain_alloc(type, dev, flags); |
| 2215 | +} |
| 2216 | + |
2180 | 2217 | static void amd_iommu_domain_free(struct iommu_domain *dom)
|
2181 | 2218 | {
|
2182 | 2219 | struct protection_domain *domain;
|
@@ -2464,6 +2501,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
|
2464 | 2501 | const struct iommu_ops amd_iommu_ops = {
|
2465 | 2502 | .capable = amd_iommu_capable,
|
2466 | 2503 | .domain_alloc = amd_iommu_domain_alloc,
|
| 2504 | + .domain_alloc_user = amd_iommu_domain_alloc_user, |
2467 | 2505 | .probe_device = amd_iommu_probe_device,
|
2468 | 2506 | .release_device = amd_iommu_release_device,
|
2469 | 2507 | .probe_finalize = amd_iommu_probe_finalize,
|
|
0 commit comments