Skip to content

Commit 7b87c93

Browse files
jgunthorpewilldeacon
authored andcommitted
iommu/arm-smmu-v3: Move the CD generation for SVA into a function
Pull all the calculations for building the CD table entry for a mmu_struct into arm_smmu_make_sva_cd(). Call it in the two places installing the SVA CD table entry. Open code the last caller of arm_smmu_update_ctx_desc_devices() and remove the function. Remove arm_smmu_write_ctx_desc() since all callers are gone. Add the locking assertions to arm_smmu_alloc_cd_ptr() since arm_smmu_update_ctx_desc_devices() was the last problematic caller. Remove quiet_cd since all users are gone, arm_smmu_make_sva_cd() creates the same value. The behavior of quiet_cd changes slightly, the old implementation edited the CD in place to set CTXDESC_CD_0_TCR_EPD0 assuming it was a SVA CD entry. This version generates a full CD entry with a 0 TTB0 and relies on arm_smmu_write_cd_entry() to install it hitlessly. Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/7-v9-5040dc602008+177d7-smmuv3_newapi_p2_jgg@nvidia.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent 13abe4f commit 7b87c93

File tree

3 files changed

+107
-132
lines changed

3 files changed

+107
-132
lines changed

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c

Lines changed: 98 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -34,25 +34,6 @@ struct arm_smmu_bond {
3434

3535
static DEFINE_MUTEX(sva_lock);
3636

37-
/*
38-
* Write the CD to the CD tables for all masters that this domain is attached
39-
* to. Note that this is only used to update existing CD entries in the target
40-
* CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
41-
*/
42-
static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
43-
int ssid,
44-
struct arm_smmu_ctx_desc *cd)
45-
{
46-
struct arm_smmu_master *master;
47-
unsigned long flags;
48-
49-
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
50-
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
51-
arm_smmu_write_ctx_desc(master, ssid, cd);
52-
}
53-
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
54-
}
55-
5637
static void
5738
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
5839
{
@@ -128,11 +109,85 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
128109
return NULL;
129110
}
130111

112+
static u64 page_size_to_cd(void)
113+
{
114+
static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
115+
PAGE_SIZE == SZ_64K);
116+
if (PAGE_SIZE == SZ_64K)
117+
return ARM_LPAE_TCR_TG0_64K;
118+
if (PAGE_SIZE == SZ_16K)
119+
return ARM_LPAE_TCR_TG0_16K;
120+
return ARM_LPAE_TCR_TG0_4K;
121+
}
122+
123+
static void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
124+
struct arm_smmu_master *master,
125+
struct mm_struct *mm, u16 asid)
126+
{
127+
u64 par;
128+
129+
memset(target, 0, sizeof(*target));
130+
131+
par = cpuid_feature_extract_unsigned_field(
132+
read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1),
133+
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
134+
135+
target->data[0] = cpu_to_le64(
136+
CTXDESC_CD_0_TCR_EPD1 |
137+
#ifdef __BIG_ENDIAN
138+
CTXDESC_CD_0_ENDI |
139+
#endif
140+
CTXDESC_CD_0_V |
141+
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) |
142+
CTXDESC_CD_0_AA64 |
143+
(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
144+
CTXDESC_CD_0_R |
145+
CTXDESC_CD_0_A |
146+
CTXDESC_CD_0_ASET |
147+
FIELD_PREP(CTXDESC_CD_0_ASID, asid));
148+
149+
/*
150+
* If no MM is passed then this creates a SVA entry that faults
151+
* everything. arm_smmu_write_cd_entry() can hitlessly go between these
152+
* two entries types since TTB0 is ignored by HW when EPD0 is set.
153+
*/
154+
if (mm) {
155+
target->data[0] |= cpu_to_le64(
156+
FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
157+
64ULL - vabits_actual) |
158+
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
159+
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
160+
ARM_LPAE_TCR_RGN_WBWA) |
161+
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
162+
ARM_LPAE_TCR_RGN_WBWA) |
163+
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
164+
165+
target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) &
166+
CTXDESC_CD_1_TTB0_MASK);
167+
} else {
168+
target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0);
169+
170+
/*
171+
* Disable stall and immediately generate an abort if stall
172+
* disable is permitted. This speeds up cleanup for an unclean
173+
* exit if the device is still doing a lot of DMA.
174+
*/
175+
if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
176+
target->data[0] &=
177+
cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
178+
}
179+
180+
/*
181+
* MAIR value is pretty much constant and global, so we can just get it
182+
* from the current CPU register
183+
*/
184+
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
185+
}
186+
131187
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
132188
{
133189
u16 asid;
134190
int err = 0;
135-
u64 tcr, par, reg;
136191
struct arm_smmu_ctx_desc *cd;
137192
struct arm_smmu_ctx_desc *ret = NULL;
138193

@@ -166,39 +221,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
166221
if (err)
167222
goto out_free_asid;
168223

169-
tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
170-
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
171-
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
172-
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
173-
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
174-
175-
switch (PAGE_SIZE) {
176-
case SZ_4K:
177-
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
178-
break;
179-
case SZ_16K:
180-
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
181-
break;
182-
case SZ_64K:
183-
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
184-
break;
185-
default:
186-
WARN_ON(1);
187-
err = -EINVAL;
188-
goto out_free_asid;
189-
}
190-
191-
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
192-
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
193-
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
194-
195-
cd->ttbr = virt_to_phys(mm->pgd);
196-
cd->tcr = tcr;
197-
/*
198-
* MAIR value is pretty much constant and global, so we can just get it
199-
* from the current CPU register
200-
*/
201-
cd->mair = read_sysreg(mair_el1);
202224
cd->asid = asid;
203225
cd->mm = mm;
204226

@@ -276,6 +298,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
276298
{
277299
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
278300
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
301+
struct arm_smmu_master *master;
302+
unsigned long flags;
279303

280304
mutex_lock(&sva_lock);
281305
if (smmu_mn->cleared) {
@@ -287,8 +311,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
287311
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
288312
* but disable translation.
289313
*/
290-
arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
291-
&quiet_cd);
314+
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
315+
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
316+
struct arm_smmu_cd target;
317+
struct arm_smmu_cd *cdptr;
318+
319+
cdptr = arm_smmu_get_cd_ptr(master, mm_get_enqcmd_pasid(mm));
320+
if (WARN_ON(!cdptr))
321+
continue;
322+
arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid);
323+
arm_smmu_write_cd_entry(master, mm_get_enqcmd_pasid(mm), cdptr,
324+
&target);
325+
}
326+
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
292327

293328
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
294329
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
@@ -383,6 +418,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
383418
struct mm_struct *mm)
384419
{
385420
int ret;
421+
struct arm_smmu_cd target;
422+
struct arm_smmu_cd *cdptr;
386423
struct arm_smmu_bond *bond;
387424
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
388425
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -409,9 +446,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
409446
goto err_free_bond;
410447
}
411448

412-
ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
413-
if (ret)
449+
cdptr = arm_smmu_alloc_cd_ptr(master, mm_get_enqcmd_pasid(mm));
450+
if (!cdptr) {
451+
ret = -ENOMEM;
414452
goto err_put_notifier;
453+
}
454+
arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
455+
arm_smmu_write_cd_entry(master, pasid, cdptr, &target);
415456

416457
list_add(&bond->list, &master->bonds);
417458
return 0;

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c

Lines changed: 7 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -83,12 +83,6 @@ struct arm_smmu_option_prop {
8383
DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
8484
DEFINE_MUTEX(arm_smmu_asid_lock);
8585

86-
/*
87-
* Special value used by SVA when a process dies, to quiesce a CD without
88-
* disabling it.
89-
*/
90-
struct arm_smmu_ctx_desc quiet_cd = { 0 };
91-
9286
static struct arm_smmu_option_prop arm_smmu_options[] = {
9387
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
9488
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
@@ -1200,7 +1194,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
12001194
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
12011195
CTXDESC_L1_DESC_V;
12021196

1203-
/* See comment in arm_smmu_write_ctx_desc() */
1197+
/* The HW has 64 bit atomicity with stores to the L2 CD table */
12041198
WRITE_ONCE(*dst, cpu_to_le64(val));
12051199
}
12061200

@@ -1223,12 +1217,15 @@ struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
12231217
return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
12241218
}
12251219

1226-
static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
1227-
u32 ssid)
1220+
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
1221+
u32 ssid)
12281222
{
12291223
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
12301224
struct arm_smmu_device *smmu = master->smmu;
12311225

1226+
might_sleep();
1227+
iommu_group_mutex_assert(master->dev);
1228+
12321229
if (!cd_table->cdtab) {
12331230
if (arm_smmu_alloc_cd_tables(master))
12341231
return NULL;
@@ -1346,65 +1343,6 @@ void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
13461343
arm_smmu_write_cd_entry(master, ssid, cdptr, &target);
13471344
}
13481345

1349-
int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
1350-
struct arm_smmu_ctx_desc *cd)
1351-
{
1352-
/*
1353-
* This function handles the following cases:
1354-
*
1355-
* (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
1356-
* (2) Install a secondary CD, for SID+SSID traffic.
1357-
* (4) Quiesce the context without clearing the valid bit. Disable
1358-
* translation, and ignore any translation fault.
1359-
*/
1360-
u64 val;
1361-
struct arm_smmu_cd target;
1362-
struct arm_smmu_cd *cdptr = &target;
1363-
struct arm_smmu_cd *cd_table_entry;
1364-
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
1365-
struct arm_smmu_device *smmu = master->smmu;
1366-
1367-
if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
1368-
return -E2BIG;
1369-
1370-
cd_table_entry = arm_smmu_alloc_cd_ptr(master, ssid);
1371-
if (!cd_table_entry)
1372-
return -ENOMEM;
1373-
1374-
target = *cd_table_entry;
1375-
val = le64_to_cpu(cdptr->data[0]);
1376-
1377-
if (cd == &quiet_cd) { /* (4) */
1378-
val &= ~(CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 |
1379-
CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 |
1380-
CTXDESC_CD_0_TCR_SH0);
1381-
if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
1382-
val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
1383-
val |= CTXDESC_CD_0_TCR_EPD0;
1384-
cdptr->data[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
1385-
} else { /* (1) and (2) */
1386-
cdptr->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
1387-
cdptr->data[2] = 0;
1388-
cdptr->data[3] = cpu_to_le64(cd->mair);
1389-
1390-
val = cd->tcr |
1391-
#ifdef __BIG_ENDIAN
1392-
CTXDESC_CD_0_ENDI |
1393-
#endif
1394-
CTXDESC_CD_0_R | CTXDESC_CD_0_A |
1395-
(cd->mm ? 0 : CTXDESC_CD_0_ASET) |
1396-
CTXDESC_CD_0_AA64 |
1397-
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
1398-
CTXDESC_CD_0_V;
1399-
1400-
if (cd_table->stall_enabled)
1401-
val |= CTXDESC_CD_0_S;
1402-
}
1403-
cdptr->data[0] = cpu_to_le64(val);
1404-
arm_smmu_write_cd_entry(master, ssid, cd_table_entry, &target);
1405-
return 0;
1406-
}
1407-
14081346
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
14091347
{
14101348
int ret;
@@ -1413,7 +1351,6 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
14131351
struct arm_smmu_device *smmu = master->smmu;
14141352
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
14151353

1416-
cd_table->stall_enabled = master->stall_enabled;
14171354
cd_table->s1cdmax = master->ssid_bits;
14181355
max_contexts = 1 << cd_table->s1cdmax;
14191356

@@ -1511,7 +1448,7 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
15111448
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
15121449
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
15131450

1514-
/* See comment in arm_smmu_write_ctx_desc() */
1451+
/* The HW has 64 bit atomicity with stores to the L2 STE table */
15151452
WRITE_ONCE(*dst, cpu_to_le64(val));
15161453
}
15171454

drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -608,8 +608,6 @@ struct arm_smmu_ctx_desc_cfg {
608608
u8 s1fmt;
609609
/* log2 of the maximum number of CDs supported by this table */
610610
u8 s1cdmax;
611-
/* Whether CD entries in this table have the stall bit set. */
612-
u8 stall_enabled:1;
613611
};
614612

615613
struct arm_smmu_s2_cfg {
@@ -748,20 +746,19 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
748746

749747
extern struct xarray arm_smmu_asid_xa;
750748
extern struct mutex arm_smmu_asid_lock;
751-
extern struct arm_smmu_ctx_desc quiet_cd;
752749

753750
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
754751
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
755752
u32 ssid);
753+
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
754+
u32 ssid);
756755
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
757756
struct arm_smmu_master *master,
758757
struct arm_smmu_domain *smmu_domain);
759758
void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
760759
struct arm_smmu_cd *cdptr,
761760
const struct arm_smmu_cd *target);
762761

763-
int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
764-
struct arm_smmu_ctx_desc *cd);
765762
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
766763
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
767764
size_t granule, bool leaf,

0 commit comments

Comments
 (0)