Skip to content

Commit 421a511

Browse files
jpemartinsjgunthorpe
authored andcommitted
iommu/amd: Access/Dirty bit support in IOPTEs
IOMMU advertises Access/Dirty bits if the extended feature register reports it. Relevant AMD IOMMU SDM ref[0] "1.3.8 Enhanced Support for Access and Dirty Bits" To enable it set the DTE flag in bits 7 and 8 to enable access, or access+dirty. With that, the IOMMU starts marking the D and A flags on every Memory Request or ATS translation request. It is on the VMM side to steer whether to enable dirty tracking or not, rather than wrongly doing in IOMMU. Relevant AMD IOMMU SDM ref [0], "Table 7. Device Table Entry (DTE) Field Definitions" particularly the entry "HAD". To actually toggle on and off it's relatively simple as it's setting 2 bits on DTE and flush the device DTE cache. To get what's dirtied use existing AMD io-pgtable support, by walking the pagetables over each IOVA, with fetch_pte(). The IOTLB flushing is left to the caller (much like unmap), and iommu_dirty_bitmap_record() is the one adding page-ranges to invalidate. This allows caller to batch the flush over a big span of IOVA space, without the iommu wondering about when to flush. Worthwhile sections from AMD IOMMU SDM: "2.2.3.1 Host Access Support" "2.2.3.2 Host Dirty Support" For details on how IOMMU hardware updates the dirty bit see, and expects from its consequent clearing by CPU: "2.2.7.4 Updating Accessed and Dirty Bits in the Guest Address Tables" "2.2.7.5 Clearing Accessed and Dirty Bits" Quoting the SDM: "The setting of accessed and dirty status bits in the page tables is visible to both the CPU and the peripheral when sharing guest page tables. The IOMMU interlocked operations to update A and D bits must be 64-bit operations and naturally aligned on a 64-bit boundary" .. and for the IOMMU update sequence to Dirty bit, essentially is states: 1. Decodes the read and write intent from the memory access. 2. If P=0 in the page descriptor, fail the access. 3. Compare the A & D bits in the descriptor with the read and write intent in the request. 4. If the A or D bits need to be updated in the descriptor: * Start atomic operation. * Read the descriptor as a 64-bit access. * If the descriptor no longer appears to require an update, release the atomic lock with no further action and continue to step 5. * Calculate the new A & D bits. * Write the descriptor as a 64-bit access. * End atomic operation. 5. Continue to the next stage of translation or to the memory access. Access/Dirty bits readout also need to consider the non-default page-sizes (aka replicated PTEs as mentined by manual), as AMD supports all powers of two (except 512G) page sizes. Select IOMMUFD_DRIVER only if IOMMUFD is enabled considering that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-12-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent 1342881 commit 421a511

File tree

4 files changed

+182
-1
lines changed

4 files changed

+182
-1
lines changed

drivers/iommu/amd/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ config AMD_IOMMU
1010
select IOMMU_API
1111
select IOMMU_IOVA
1212
select IOMMU_IO_PGTABLE
13+
select IOMMUFD_DRIVER if IOMMUFD
1314
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
1415
help
1516
With this option you can enable support for AMD IOMMU hardware in

drivers/iommu/amd/amd_iommu_types.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,9 @@
9797
#define FEATURE_GATS_MASK (3ULL)
9898
#define FEATURE_GAM_VAPIC BIT_ULL(21)
9999
#define FEATURE_GIOSUP BIT_ULL(48)
100+
#define FEATURE_HASUP BIT_ULL(49)
100101
#define FEATURE_EPHSUP BIT_ULL(50)
102+
#define FEATURE_HDSUP BIT_ULL(52)
101103
#define FEATURE_SNP BIT_ULL(63)
102104

103105
#define FEATURE_PASID_SHIFT 32
@@ -212,6 +214,7 @@
212214
/* macros and definitions for device table entries */
213215
#define DEV_ENTRY_VALID 0x00
214216
#define DEV_ENTRY_TRANSLATION 0x01
217+
#define DEV_ENTRY_HAD 0x07
215218
#define DEV_ENTRY_PPR 0x34
216219
#define DEV_ENTRY_IR 0x3d
217220
#define DEV_ENTRY_IW 0x3e
@@ -370,10 +373,16 @@
370373
#define PTE_LEVEL_PAGE_SIZE(level) \
371374
(1ULL << (12 + (9 * (level))))
372375

376+
/*
377+
* The IOPTE dirty bit
378+
*/
379+
#define IOMMU_PTE_HD_BIT (6)
380+
373381
/*
374382
* Bit value definition for I/O PTE fields
375383
*/
376384
#define IOMMU_PTE_PR BIT_ULL(0)
385+
#define IOMMU_PTE_HD BIT_ULL(IOMMU_PTE_HD_BIT)
377386
#define IOMMU_PTE_U BIT_ULL(59)
378387
#define IOMMU_PTE_FC BIT_ULL(60)
379388
#define IOMMU_PTE_IR BIT_ULL(61)
@@ -384,6 +393,7 @@
384393
*/
385394
#define DTE_FLAG_V BIT_ULL(0)
386395
#define DTE_FLAG_TV BIT_ULL(1)
396+
#define DTE_FLAG_HAD (3ULL << 7)
387397
#define DTE_FLAG_GIOV BIT_ULL(54)
388398
#define DTE_FLAG_GV BIT_ULL(55)
389399
#define DTE_GLX_SHIFT (56)
@@ -413,6 +423,7 @@
413423

414424
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
415425
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
426+
#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
416427
#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
417428
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
418429

@@ -563,6 +574,7 @@ struct protection_domain {
563574
int nid; /* Node ID */
564575
u64 *gcr3_tbl; /* Guest CR3 table */
565576
unsigned long flags; /* flags to find out type of domain */
577+
bool dirty_tracking; /* dirty tracking is enabled in the domain */
566578
unsigned dev_cnt; /* devices assigned to this domain */
567579
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
568580
};

drivers/iommu/amd/io_pgtable.c

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,73 @@ static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo
486486
return (__pte & ~offset_mask) | (iova & offset_mask);
487487
}
488488

489+
static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size,
490+
unsigned long flags)
491+
{
492+
bool test_only = flags & IOMMU_DIRTY_NO_CLEAR;
493+
bool dirty = false;
494+
int i, count;
495+
496+
/*
497+
* 2.2.3.2 Host Dirty Support
498+
* When a non-default page size is used , software must OR the
499+
* Dirty bits in all of the replicated host PTEs used to map
500+
* the page. The IOMMU does not guarantee the Dirty bits are
501+
* set in all of the replicated PTEs. Any portion of the page
502+
* may have been written even if the Dirty bit is set in only
503+
* one of the replicated PTEs.
504+
*/
505+
count = PAGE_SIZE_PTE_COUNT(size);
506+
for (i = 0; i < count && test_only; i++) {
507+
if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) {
508+
dirty = true;
509+
break;
510+
}
511+
}
512+
513+
for (i = 0; i < count && !test_only; i++) {
514+
if (test_and_clear_bit(IOMMU_PTE_HD_BIT,
515+
(unsigned long *)&ptep[i])) {
516+
dirty = true;
517+
}
518+
}
519+
520+
return dirty;
521+
}
522+
523+
static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
524+
unsigned long iova, size_t size,
525+
unsigned long flags,
526+
struct iommu_dirty_bitmap *dirty)
527+
{
528+
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
529+
unsigned long end = iova + size - 1;
530+
531+
do {
532+
unsigned long pgsize = 0;
533+
u64 *ptep, pte;
534+
535+
ptep = fetch_pte(pgtable, iova, &pgsize);
536+
if (ptep)
537+
pte = READ_ONCE(*ptep);
538+
if (!ptep || !IOMMU_PTE_PRESENT(pte)) {
539+
pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0);
540+
iova += pgsize;
541+
continue;
542+
}
543+
544+
/*
545+
* Mark the whole IOVA range as dirty even if only one of
546+
* the replicated PTEs were marked dirty.
547+
*/
548+
if (pte_test_and_clear_dirty(ptep, pgsize, flags))
549+
iommu_dirty_bitmap_record(dirty, iova, pgsize);
550+
iova += pgsize;
551+
} while (iova < end);
552+
553+
return 0;
554+
}
555+
489556
/*
490557
* ----------------------------------------------------
491558
*/
@@ -527,6 +594,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
527594
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
528595
pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
529596
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
597+
pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
530598

531599
return &pgtable->iop;
532600
}

drivers/iommu/amd/iommu.c

Lines changed: 101 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ LIST_HEAD(hpet_map);
6666
LIST_HEAD(acpihid_map);
6767

6868
const struct iommu_ops amd_iommu_ops;
69+
const struct iommu_dirty_ops amd_dirty_ops;
6970

7071
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
7172
int amd_iommu_max_glx_val = -1;
@@ -1611,6 +1612,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
16111612
pte_root |= 1ULL << DEV_ENTRY_PPR;
16121613
}
16131614

1615+
if (domain->dirty_tracking)
1616+
pte_root |= DTE_FLAG_HAD;
1617+
16141618
if (domain->flags & PD_IOMMUV2_MASK) {
16151619
u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
16161620
u64 glx = domain->glx;
@@ -2156,9 +2160,15 @@ static inline u64 dma_max_address(void)
21562160
return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
21572161
}
21582162

2163+
static bool amd_iommu_hd_support(struct amd_iommu *iommu)
2164+
{
2165+
return iommu && (iommu->features & FEATURE_HDSUP);
2166+
}
2167+
21592168
static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
21602169
struct device *dev, u32 flags)
21612170
{
2171+
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
21622172
struct protection_domain *domain;
21632173
struct amd_iommu *iommu = NULL;
21642174

@@ -2175,6 +2185,9 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
21752185
if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
21762186
return ERR_PTR(-EINVAL);
21772187

2188+
if (dirty_tracking && !amd_iommu_hd_support(iommu))
2189+
return ERR_PTR(-EOPNOTSUPP);
2190+
21782191
domain = protection_domain_alloc(type);
21792192
if (!domain)
21802193
return ERR_PTR(-ENOMEM);
@@ -2187,6 +2200,9 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
21872200
domain->domain.type = type;
21882201
domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap;
21892202
domain->domain.ops = iommu->iommu.ops->default_domain_ops;
2203+
2204+
if (dirty_tracking)
2205+
domain->domain.dirty_ops = &amd_dirty_ops;
21902206
}
21912207

21922208
return &domain->domain;
@@ -2208,7 +2224,7 @@ static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev,
22082224
{
22092225
unsigned int type = IOMMU_DOMAIN_UNMANAGED;
22102226

2211-
if (flags)
2227+
if (flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
22122228
return ERR_PTR(-EOPNOTSUPP);
22132229

22142230
return do_iommu_domain_alloc(type, dev, flags);
@@ -2251,6 +2267,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
22512267

22522268
dev_data->defer_attach = false;
22532269

2270+
/*
2271+
* Restrict to devices with compatible IOMMU hardware support
2272+
* when enforcement of dirty tracking is enabled.
2273+
*/
2274+
if (dom->dirty_ops && !amd_iommu_hd_support(iommu))
2275+
return -EINVAL;
2276+
22542277
if (dev_data->domain)
22552278
detach_device(dev);
22562279

@@ -2369,13 +2392,85 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
23692392
return true;
23702393
case IOMMU_CAP_DEFERRED_FLUSH:
23712394
return true;
2395+
case IOMMU_CAP_DIRTY_TRACKING: {
2396+
struct amd_iommu *iommu = rlookup_amd_iommu(dev);
2397+
2398+
return amd_iommu_hd_support(iommu);
2399+
}
23722400
default:
23732401
break;
23742402
}
23752403

23762404
return false;
23772405
}
23782406

2407+
static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
2408+
bool enable)
2409+
{
2410+
struct protection_domain *pdomain = to_pdomain(domain);
2411+
struct dev_table_entry *dev_table;
2412+
struct iommu_dev_data *dev_data;
2413+
bool domain_flush = false;
2414+
struct amd_iommu *iommu;
2415+
unsigned long flags;
2416+
u64 pte_root;
2417+
2418+
spin_lock_irqsave(&pdomain->lock, flags);
2419+
if (!(pdomain->dirty_tracking ^ enable)) {
2420+
spin_unlock_irqrestore(&pdomain->lock, flags);
2421+
return 0;
2422+
}
2423+
2424+
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
2425+
iommu = rlookup_amd_iommu(dev_data->dev);
2426+
if (!iommu)
2427+
continue;
2428+
2429+
dev_table = get_dev_table(iommu);
2430+
pte_root = dev_table[dev_data->devid].data[0];
2431+
2432+
pte_root = (enable ? pte_root | DTE_FLAG_HAD :
2433+
pte_root & ~DTE_FLAG_HAD);
2434+
2435+
/* Flush device DTE */
2436+
dev_table[dev_data->devid].data[0] = pte_root;
2437+
device_flush_dte(dev_data);
2438+
domain_flush = true;
2439+
}
2440+
2441+
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
2442+
if (domain_flush) {
2443+
amd_iommu_domain_flush_tlb_pde(pdomain);
2444+
amd_iommu_domain_flush_complete(pdomain);
2445+
}
2446+
pdomain->dirty_tracking = enable;
2447+
spin_unlock_irqrestore(&pdomain->lock, flags);
2448+
2449+
return 0;
2450+
}
2451+
2452+
static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
2453+
unsigned long iova, size_t size,
2454+
unsigned long flags,
2455+
struct iommu_dirty_bitmap *dirty)
2456+
{
2457+
struct protection_domain *pdomain = to_pdomain(domain);
2458+
struct io_pgtable_ops *ops = &pdomain->iop.iop.ops;
2459+
unsigned long lflags;
2460+
2461+
if (!ops || !ops->read_and_clear_dirty)
2462+
return -EOPNOTSUPP;
2463+
2464+
spin_lock_irqsave(&pdomain->lock, lflags);
2465+
if (!pdomain->dirty_tracking && dirty->bitmap) {
2466+
spin_unlock_irqrestore(&pdomain->lock, lflags);
2467+
return -EINVAL;
2468+
}
2469+
spin_unlock_irqrestore(&pdomain->lock, lflags);
2470+
2471+
return ops->read_and_clear_dirty(ops, iova, size, flags, dirty);
2472+
}
2473+
23792474
static void amd_iommu_get_resv_regions(struct device *dev,
23802475
struct list_head *head)
23812476
{
@@ -2498,6 +2593,11 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
24982593
return true;
24992594
}
25002595

2596+
const struct iommu_dirty_ops amd_dirty_ops = {
2597+
.set_dirty_tracking = amd_iommu_set_dirty_tracking,
2598+
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
2599+
};
2600+
25012601
const struct iommu_ops amd_iommu_ops = {
25022602
.capable = amd_iommu_capable,
25032603
.domain_alloc = amd_iommu_domain_alloc,

0 commit comments

Comments
 (0)