8
8
#include <linux/mmu_notifier.h>
9
9
#include <linux/sched/mm.h>
10
10
#include <linux/slab.h>
11
+ #include <kunit/visibility.h>
11
12
12
13
#include "arm-smmu-v3.h"
13
14
#include "../../io-pgtable-arm.h"
@@ -34,21 +35,25 @@ struct arm_smmu_bond {
34
35
35
36
static DEFINE_MUTEX (sva_lock );
36
37
37
- /*
38
- * Write the CD to the CD tables for all masters that this domain is attached
39
- * to. Note that this is only used to update existing CD entries in the target
40
- * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
41
- */
42
- static void arm_smmu_update_ctx_desc_devices (struct arm_smmu_domain * smmu_domain ,
43
- int ssid ,
44
- struct arm_smmu_ctx_desc * cd )
38
+ static void
39
+ arm_smmu_update_s1_domain_cd_entry (struct arm_smmu_domain * smmu_domain )
45
40
{
46
41
struct arm_smmu_master * master ;
42
+ struct arm_smmu_cd target_cd ;
47
43
unsigned long flags ;
48
44
49
45
spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
50
46
list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
51
- arm_smmu_write_ctx_desc (master , ssid , cd );
47
+ struct arm_smmu_cd * cdptr ;
48
+
49
+ /* S1 domains only support RID attachment right now */
50
+ cdptr = arm_smmu_get_cd_ptr (master , IOMMU_NO_PASID );
51
+ if (WARN_ON (!cdptr ))
52
+ continue ;
53
+
54
+ arm_smmu_make_s1_cd (& target_cd , master , smmu_domain );
55
+ arm_smmu_write_cd_entry (master , IOMMU_NO_PASID , cdptr ,
56
+ & target_cd );
52
57
}
53
58
spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
54
59
}
@@ -96,7 +101,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
96
101
* be some overlap between use of both ASIDs, until we invalidate the
97
102
* TLB.
98
103
*/
99
- arm_smmu_update_ctx_desc_devices (smmu_domain , IOMMU_NO_PASID , cd );
104
+ arm_smmu_update_s1_domain_cd_entry (smmu_domain );
100
105
101
106
/* Invalidate TLB entries previously associated with that context */
102
107
arm_smmu_tlb_inv_asid (smmu , asid );
@@ -105,11 +110,86 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
105
110
return NULL ;
106
111
}
107
112
113
+ static u64 page_size_to_cd (void )
114
+ {
115
+ static_assert (PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
116
+ PAGE_SIZE == SZ_64K );
117
+ if (PAGE_SIZE == SZ_64K )
118
+ return ARM_LPAE_TCR_TG0_64K ;
119
+ if (PAGE_SIZE == SZ_16K )
120
+ return ARM_LPAE_TCR_TG0_16K ;
121
+ return ARM_LPAE_TCR_TG0_4K ;
122
+ }
123
+
124
+ VISIBLE_IF_KUNIT
125
+ void arm_smmu_make_sva_cd (struct arm_smmu_cd * target ,
126
+ struct arm_smmu_master * master , struct mm_struct * mm ,
127
+ u16 asid )
128
+ {
129
+ u64 par ;
130
+
131
+ memset (target , 0 , sizeof (* target ));
132
+
133
+ par = cpuid_feature_extract_unsigned_field (
134
+ read_sanitised_ftr_reg (SYS_ID_AA64MMFR0_EL1 ),
135
+ ID_AA64MMFR0_EL1_PARANGE_SHIFT );
136
+
137
+ target -> data [0 ] = cpu_to_le64 (
138
+ CTXDESC_CD_0_TCR_EPD1 |
139
+ #ifdef __BIG_ENDIAN
140
+ CTXDESC_CD_0_ENDI |
141
+ #endif
142
+ CTXDESC_CD_0_V |
143
+ FIELD_PREP (CTXDESC_CD_0_TCR_IPS , par ) |
144
+ CTXDESC_CD_0_AA64 |
145
+ (master -> stall_enabled ? CTXDESC_CD_0_S : 0 ) |
146
+ CTXDESC_CD_0_R |
147
+ CTXDESC_CD_0_A |
148
+ CTXDESC_CD_0_ASET |
149
+ FIELD_PREP (CTXDESC_CD_0_ASID , asid ));
150
+
151
+ /*
152
+ * If no MM is passed then this creates a SVA entry that faults
153
+ * everything. arm_smmu_write_cd_entry() can hitlessly go between these
154
+ * two entries types since TTB0 is ignored by HW when EPD0 is set.
155
+ */
156
+ if (mm ) {
157
+ target -> data [0 ] |= cpu_to_le64 (
158
+ FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ ,
159
+ 64ULL - vabits_actual ) |
160
+ FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , page_size_to_cd ()) |
161
+ FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 ,
162
+ ARM_LPAE_TCR_RGN_WBWA ) |
163
+ FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 ,
164
+ ARM_LPAE_TCR_RGN_WBWA ) |
165
+ FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , ARM_LPAE_TCR_SH_IS ));
166
+
167
+ target -> data [1 ] = cpu_to_le64 (virt_to_phys (mm -> pgd ) &
168
+ CTXDESC_CD_1_TTB0_MASK );
169
+ } else {
170
+ target -> data [0 ] |= cpu_to_le64 (CTXDESC_CD_0_TCR_EPD0 );
171
+
172
+ /*
173
+ * Disable stall and immediately generate an abort if stall
174
+ * disable is permitted. This speeds up cleanup for an unclean
175
+ * exit if the device is still doing a lot of DMA.
176
+ */
177
+ if (!(master -> smmu -> features & ARM_SMMU_FEAT_STALL_FORCE ))
178
+ target -> data [0 ] &=
179
+ cpu_to_le64 (~(CTXDESC_CD_0_S | CTXDESC_CD_0_R ));
180
+ }
181
+
182
+ /*
183
+ * MAIR value is pretty much constant and global, so we can just get it
184
+ * from the current CPU register
185
+ */
186
+ target -> data [3 ] = cpu_to_le64 (read_sysreg (mair_el1 ));
187
+ }
188
+
108
189
static struct arm_smmu_ctx_desc * arm_smmu_alloc_shared_cd (struct mm_struct * mm )
109
190
{
110
191
u16 asid ;
111
192
int err = 0 ;
112
- u64 tcr , par , reg ;
113
193
struct arm_smmu_ctx_desc * cd ;
114
194
struct arm_smmu_ctx_desc * ret = NULL ;
115
195
@@ -143,39 +223,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
143
223
if (err )
144
224
goto out_free_asid ;
145
225
146
- tcr = FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ , 64ULL - vabits_actual ) |
147
- FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 , ARM_LPAE_TCR_RGN_WBWA ) |
148
- FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 , ARM_LPAE_TCR_RGN_WBWA ) |
149
- FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , ARM_LPAE_TCR_SH_IS ) |
150
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64 ;
151
-
152
- switch (PAGE_SIZE ) {
153
- case SZ_4K :
154
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_4K );
155
- break ;
156
- case SZ_16K :
157
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_16K );
158
- break ;
159
- case SZ_64K :
160
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , ARM_LPAE_TCR_TG0_64K );
161
- break ;
162
- default :
163
- WARN_ON (1 );
164
- err = - EINVAL ;
165
- goto out_free_asid ;
166
- }
167
-
168
- reg = read_sanitised_ftr_reg (SYS_ID_AA64MMFR0_EL1 );
169
- par = cpuid_feature_extract_unsigned_field (reg , ID_AA64MMFR0_EL1_PARANGE_SHIFT );
170
- tcr |= FIELD_PREP (CTXDESC_CD_0_TCR_IPS , par );
171
-
172
- cd -> ttbr = virt_to_phys (mm -> pgd );
173
- cd -> tcr = tcr ;
174
- /*
175
- * MAIR value is pretty much constant and global, so we can just get it
176
- * from the current CPU register
177
- */
178
- cd -> mair = read_sysreg (mair_el1 );
179
226
cd -> asid = asid ;
180
227
cd -> mm = mm ;
181
228
@@ -253,6 +300,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
253
300
{
254
301
struct arm_smmu_mmu_notifier * smmu_mn = mn_to_smmu (mn );
255
302
struct arm_smmu_domain * smmu_domain = smmu_mn -> domain ;
303
+ struct arm_smmu_master * master ;
304
+ unsigned long flags ;
256
305
257
306
mutex_lock (& sva_lock );
258
307
if (smmu_mn -> cleared ) {
@@ -264,8 +313,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
264
313
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
265
314
* but disable translation.
266
315
*/
267
- arm_smmu_update_ctx_desc_devices (smmu_domain , mm_get_enqcmd_pasid (mm ),
268
- & quiet_cd );
316
+ spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
317
+ list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
318
+ struct arm_smmu_cd target ;
319
+ struct arm_smmu_cd * cdptr ;
320
+
321
+ cdptr = arm_smmu_get_cd_ptr (master , mm_get_enqcmd_pasid (mm ));
322
+ if (WARN_ON (!cdptr ))
323
+ continue ;
324
+ arm_smmu_make_sva_cd (& target , master , NULL , smmu_mn -> cd -> asid );
325
+ arm_smmu_write_cd_entry (master , mm_get_enqcmd_pasid (mm ), cdptr ,
326
+ & target );
327
+ }
328
+ spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
269
329
270
330
arm_smmu_tlb_inv_asid (smmu_domain -> smmu , smmu_mn -> cd -> asid );
271
331
arm_smmu_atc_inv_domain (smmu_domain , mm_get_enqcmd_pasid (mm ), 0 , 0 );
@@ -360,6 +420,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
360
420
struct mm_struct * mm )
361
421
{
362
422
int ret ;
423
+ struct arm_smmu_cd target ;
424
+ struct arm_smmu_cd * cdptr ;
363
425
struct arm_smmu_bond * bond ;
364
426
struct arm_smmu_master * master = dev_iommu_priv_get (dev );
365
427
struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
@@ -386,9 +448,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
386
448
goto err_free_bond ;
387
449
}
388
450
389
- ret = arm_smmu_write_ctx_desc (master , pasid , bond -> smmu_mn -> cd );
390
- if (ret )
451
+ cdptr = arm_smmu_alloc_cd_ptr (master , mm_get_enqcmd_pasid (mm ));
452
+ if (!cdptr ) {
453
+ ret = - ENOMEM ;
391
454
goto err_put_notifier ;
455
+ }
456
+ arm_smmu_make_sva_cd (& target , master , mm , bond -> smmu_mn -> cd -> asid );
457
+ arm_smmu_write_cd_entry (master , pasid , cdptr , & target );
392
458
393
459
list_add (& bond -> list , & master -> bonds );
394
460
return 0 ;
@@ -546,7 +612,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
546
612
547
613
mutex_lock (& sva_lock );
548
614
549
- arm_smmu_write_ctx_desc (master , id , NULL );
615
+ arm_smmu_clear_cd (master , id );
550
616
551
617
list_for_each_entry (t , & master -> bonds , list ) {
552
618
if (t -> mm == mm ) {
@@ -569,6 +635,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
569
635
int ret = 0 ;
570
636
struct mm_struct * mm = domain -> mm ;
571
637
638
+ if (mm_get_enqcmd_pasid (mm ) != id )
639
+ return - EINVAL ;
640
+
572
641
mutex_lock (& sva_lock );
573
642
ret = __arm_smmu_sva_bind (dev , id , mm );
574
643
mutex_unlock (& sva_lock );
0 commit comments