@@ -142,17 +142,24 @@ static inline unsigned long get_trans_granule(void)
142
142
* EL1, Inner Shareable".
143
143
*
144
144
*/
145
- #define __TLBI_VADDR_RANGE (baddr , asid , scale , num , ttl ) \
146
- ({ \
147
- unsigned long __ta = (baddr); \
148
- unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
149
- __ta &= GENMASK_ULL(36, 0); \
150
- __ta |= __ttl << 37; \
151
- __ta |= (unsigned long)(num) << 39; \
152
- __ta |= (unsigned long)(scale) << 44; \
153
- __ta |= get_trans_granule() << 46; \
154
- __ta |= (unsigned long)(asid) << 48; \
155
- __ta; \
145
+ #define TLBIR_ASID_MASK GENMASK_ULL(63, 48)
146
+ #define TLBIR_TG_MASK GENMASK_ULL(47, 46)
147
+ #define TLBIR_SCALE_MASK GENMASK_ULL(45, 44)
148
+ #define TLBIR_NUM_MASK GENMASK_ULL(43, 39)
149
+ #define TLBIR_TTL_MASK GENMASK_ULL(38, 37)
150
+ #define TLBIR_BADDR_MASK GENMASK_ULL(36, 0)
151
+
152
+ #define __TLBI_VADDR_RANGE (baddr , asid , scale , num , ttl ) \
153
+ ({ \
154
+ unsigned long __ta = 0; \
155
+ unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
156
+ __ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr); \
157
+ __ta |= FIELD_PREP(TLBIR_TTL_MASK, __ttl); \
158
+ __ta |= FIELD_PREP(TLBIR_NUM_MASK, num); \
159
+ __ta |= FIELD_PREP(TLBIR_SCALE_MASK, scale); \
160
+ __ta |= FIELD_PREP(TLBIR_TG_MASK, get_trans_granule()); \
161
+ __ta |= FIELD_PREP(TLBIR_ASID_MASK, asid); \
162
+ __ta; \
156
163
})
157
164
158
165
/* These macros are used by the TLBI RANGE feature. */
@@ -161,12 +168,18 @@ static inline unsigned long get_trans_granule(void)
161
168
#define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
162
169
163
170
/*
164
- * Generate 'num' values from -1 to 30 with -1 rejected by the
165
- * __flush_tlb_range() loop below.
171
+ * Generate 'num' values from -1 to 31 with -1 rejected by the
172
+ * __flush_tlb_range() loop below. Its return value is only
173
+ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
174
+ * 'pages' is more than that, you must iterate over the overall
175
+ * range.
166
176
*/
167
- #define TLBI_RANGE_MASK GENMASK_ULL(4, 0)
168
- #define __TLBI_RANGE_NUM (pages , scale ) \
169
- ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
177
+ #define __TLBI_RANGE_NUM (pages , scale ) \
178
+ ({ \
179
+ int __pages = min((pages), \
180
+ __TLBI_RANGE_PAGES(31, (scale))); \
181
+ (__pages >> (5 * (scale) + 1)) - 1; \
182
+ })
170
183
171
184
/*
172
185
* TLB Invalidation
@@ -379,10 +392,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
379
392
* 3. If there is 1 page remaining, flush it through non-range operations. Range
380
393
* operations can only span an even number of pages. We save this for last to
381
394
* ensure 64KB start alignment is maintained for the LPA2 case.
382
- *
383
- * Note that certain ranges can be represented by either num = 31 and
384
- * scale or num = 0 and scale + 1. The loop below favours the latter
385
- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
386
395
*/
387
396
#define __flush_tlb_range_op (op , start , pages , stride , \
388
397
asid , tlb_level , tlbi_user , lpa2 ) \
@@ -437,11 +446,11 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
437
446
* When not uses TLB range ops, we can handle up to
438
447
* (MAX_DVM_OPS - 1) pages;
439
448
* When uses TLB range ops, we can handle up to
440
- * ( MAX_TLBI_RANGE_PAGES - 1) pages.
449
+ * MAX_TLBI_RANGE_PAGES pages.
441
450
*/
442
451
if ((!system_supports_tlb_range () &&
443
452
(end - start ) >= (MAX_DVM_OPS * stride )) ||
444
- pages >= MAX_TLBI_RANGE_PAGES ) {
453
+ pages > MAX_TLBI_RANGE_PAGES ) {
445
454
flush_tlb_mm (vma -> vm_mm );
446
455
return ;
447
456
}
0 commit comments