@@ -34,25 +34,6 @@ struct arm_smmu_bond {
34
34
35
35
static DEFINE_MUTEX (sva_lock );
36
36
37
- /*
38
- * Write the CD to the CD tables for all masters that this domain is attached
39
- * to. Note that this is only used to update existing CD entries in the target
40
- * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
41
- */
42
- static void arm_smmu_update_ctx_desc_devices (struct arm_smmu_domain * smmu_domain ,
43
- int ssid ,
44
- struct arm_smmu_ctx_desc * cd )
45
- {
46
- struct arm_smmu_master * master ;
47
- unsigned long flags ;
48
-
49
- spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
50
- list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
51
- arm_smmu_write_ctx_desc (master , ssid , cd );
52
- }
53
- spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
54
- }
55
-
56
37
static void
57
38
arm_smmu_update_s1_domain_cd_entry (struct arm_smmu_domain * smmu_domain )
58
39
{
@@ -128,11 +109,85 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
128
109
return NULL ;
129
110
}
130
111
112
+ static u64 page_size_to_cd (void )
113
+ {
114
+ static_assert (PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
115
+ PAGE_SIZE == SZ_64K );
116
+ if (PAGE_SIZE == SZ_64K )
117
+ return ARM_LPAE_TCR_TG0_64K ;
118
+ if (PAGE_SIZE == SZ_16K )
119
+ return ARM_LPAE_TCR_TG0_16K ;
120
+ return ARM_LPAE_TCR_TG0_4K ;
121
+ }
122
+
123
+ static void arm_smmu_make_sva_cd (struct arm_smmu_cd * target ,
124
+ struct arm_smmu_master * master ,
125
+ struct mm_struct * mm , u16 asid )
126
+ {
127
+ u64 par ;
128
+
129
+ memset (target , 0 , sizeof (* target ));
130
+
131
+ par = cpuid_feature_extract_unsigned_field (
132
+ read_sanitised_ftr_reg (SYS_ID_AA64MMFR0_EL1 ),
133
+ ID_AA64MMFR0_EL1_PARANGE_SHIFT );
134
+
135
+ target -> data [0 ] = cpu_to_le64 (
136
+ CTXDESC_CD_0_TCR_EPD1 |
137
+ #ifdef __BIG_ENDIAN
138
+ CTXDESC_CD_0_ENDI |
139
+ #endif
140
+ CTXDESC_CD_0_V |
141
+ FIELD_PREP (CTXDESC_CD_0_TCR_IPS , par ) |
142
+ CTXDESC_CD_0_AA64 |
143
+ (master -> stall_enabled ? CTXDESC_CD_0_S : 0 ) |
144
+ CTXDESC_CD_0_R |
145
+ CTXDESC_CD_0_A |
146
+ CTXDESC_CD_0_ASET |
147
+ FIELD_PREP (CTXDESC_CD_0_ASID , asid ));
148
+
149
+ /*
150
+ * If no MM is passed then this creates a SVA entry that faults
151
+ * everything. arm_smmu_write_cd_entry() can hitlessly go between these
152
+ * two entries types since TTB0 is ignored by HW when EPD0 is set.
153
+ */
154
+ if (mm ) {
155
+ target -> data [0 ] |= cpu_to_le64 (
156
+ FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ ,
157
+ 64ULL - vabits_actual ) |
158
+ FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , page_size_to_cd ()) |
159
+ FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 ,
160
+ ARM_LPAE_TCR_RGN_WBWA ) |
161
+ FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 ,
162
+ ARM_LPAE_TCR_RGN_WBWA ) |
163
+ FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , ARM_LPAE_TCR_SH_IS ));
164
+
165
+ target -> data [1 ] = cpu_to_le64 (virt_to_phys (mm -> pgd ) &
166
+ CTXDESC_CD_1_TTB0_MASK );
167
+ } else {
168
+ target -> data [0 ] |= cpu_to_le64 (CTXDESC_CD_0_TCR_EPD0 );
169
+
170
+ /*
171
+ * Disable stall and immediately generate an abort if stall
172
+ * disable is permitted. This speeds up cleanup for an unclean
173
+ * exit if the device is still doing a lot of DMA.
174
+ */
175
+ if (!(master -> smmu -> features & ARM_SMMU_FEAT_STALL_FORCE ))
176
+ target -> data [0 ] &=
177
+ cpu_to_le64 (~(CTXDESC_CD_0_S | CTXDESC_CD_0_R ));
178
+ }
179
+
180
+ /*
181
+ * MAIR value is pretty much constant and global, so we can just get it
182
+ * from the current CPU register
183
+ */
184
+ target -> data [3 ] = cpu_to_le64 (read_sysreg (mair_el1 ));
185
+ }
186
+
131
187
static struct arm_smmu_ctx_desc * arm_smmu_alloc_shared_cd (struct mm_struct * mm )
132
188
{
133
189
u16 asid ;
134
190
int err = 0 ;
135
- u64 tcr , par , reg ;
136
191
struct arm_smmu_ctx_desc * cd ;
137
192
struct arm_smmu_ctx_desc * ret = NULL ;
138
193
@@ -166,39 +221,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
166
221
if (err )
167
222
goto out_free_asid ;
168
223
169
- tcr = FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ , 64ULL - vabits_actual ) |
170
- FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 , ARM_LPAE_TCR_RGN_WBWA ) |
171
- FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 , ARM_LPAE_TCR_RGN_WBWA ) |
172
- FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , ARM_LPAE_TCR_SH_IS ) |
173
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64 ;
174
-
175
- switch (PAGE_SIZE ) {
176
- case SZ_4K :
177
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_4K );
178
- break ;
179
- case SZ_16K :
180
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_16K );
181
- break ;
182
- case SZ_64K :
183
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_64K );
184
- break ;
185
- default :
186
- WARN_ON (1 );
187
- err = - EINVAL ;
188
- goto out_free_asid ;
189
- }
190
-
191
- reg = read_sanitised_ftr_reg (SYS_ID_AA64MMFR0_EL1 );
192
- par = cpuid_feature_extract_unsigned_field (reg , ID_AA64MMFR0_EL1_PARANGE_SHIFT );
193
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_IPS , par );
194
-
195
- cd -> ttbr = virt_to_phys (mm -> pgd );
196
- cd -> tcr = tcr ;
197
- /*
198
- * MAIR value is pretty much constant and global, so we can just get it
199
- * from the current CPU register
200
- */
201
- cd -> mair = read_sysreg (mair_el1 );
202
224
cd -> asid = asid ;
203
225
cd -> mm = mm ;
204
226
@@ -276,6 +298,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
276
298
{
277
299
struct arm_smmu_mmu_notifier * smmu_mn = mn_to_smmu (mn );
278
300
struct arm_smmu_domain * smmu_domain = smmu_mn -> domain ;
301
+ struct arm_smmu_master * master ;
302
+ unsigned long flags ;
279
303
280
304
mutex_lock (& sva_lock );
281
305
if (smmu_mn -> cleared ) {
@@ -287,8 +311,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
287
311
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
288
312
* but disable translation.
289
313
*/
290
- arm_smmu_update_ctx_desc_devices (smmu_domain , mm_get_enqcmd_pasid (mm ),
291
- & quiet_cd );
314
+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
315
+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
316
+ struct arm_smmu_cd target ;
317
+ struct arm_smmu_cd * cdptr ;
318
+
319
+ cdptr = arm_smmu_get_cd_ptr (master , mm_get_enqcmd_pasid (mm ));
320
+ if (WARN_ON (!cdptr ))
321
+ continue ;
322
+ arm_smmu_make_sva_cd (& target , master , NULL , smmu_mn -> cd -> asid );
323
+ arm_smmu_write_cd_entry (master , mm_get_enqcmd_pasid (mm ), cdptr ,
324
+ & target );
325
+ }
326
+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
292
327
293
328
arm_smmu_tlb_inv_asid (smmu_domain -> smmu , smmu_mn -> cd -> asid );
294
329
arm_smmu_atc_inv_domain (smmu_domain , mm_get_enqcmd_pasid (mm ), 0 , 0 );
@@ -383,6 +418,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
383
418
struct mm_struct * mm )
384
419
{
385
420
int ret ;
421
+ struct arm_smmu_cd target ;
422
+ struct arm_smmu_cd * cdptr ;
386
423
struct arm_smmu_bond * bond ;
387
424
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
388
425
struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
@@ -409,9 +446,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
409
446
goto err_free_bond ;
410
447
}
411
448
412
- ret = arm_smmu_write_ctx_desc (master , pasid , bond -> smmu_mn -> cd );
413
- if (ret )
449
+ cdptr = arm_smmu_alloc_cd_ptr (master , mm_get_enqcmd_pasid (mm ));
450
+ if (!cdptr ) {
451
+ ret = - ENOMEM ;
414
452
goto err_put_notifier ;
453
+ }
454
+ arm_smmu_make_sva_cd (& target , master , mm , bond -> smmu_mn -> cd -> asid );
455
+ arm_smmu_write_cd_entry (master , pasid , cdptr , & target );
415
456
416
457
list_add (& bond -> list , & master -> bonds );
417
458
return 0 ;
0 commit comments