Skip to content

Commit f7edb07

Browse files
pjaroszynskiwilldeacon
authored andcommitted
Fix mmu notifiers for range-based invalidates
Update the __flush_tlb_range_op macro not to modify its parameters as these are unexepcted semantics. In practice, this fixes the call to mmu_notifier_arch_invalidate_secondary_tlbs() in __flush_tlb_range_nosync() to use the correct range instead of an empty range with start=end. The empty range was (un)lucky as it results in taking the invalidate-all path that doesn't cause correctness issues, but can certainly result in suboptimal perf. This has been broken since commit 6bbd42e ("mmu_notifiers: call invalidate_range() when invalidating TLBs") when the call to the notifiers was added to __flush_tlb_range(). It predates the addition of the __flush_tlb_range_op() macro from commit 3608390 ("arm64: tlb: Refactor the core flush algorithm of __flush_tlb_range") that made the bug hard to spot. Fixes: 6bbd42e ("mmu_notifiers: call invalidate_range() when invalidating TLBs") Signed-off-by: Piotr Jaroszynski <pjaroszynski@nvidia.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Raghavendra Rao Ananta <rananta@google.com> Cc: SeongJae Park <sj@kernel.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Nicolin Chen <nicolinc@nvidia.com> Cc: linux-arm-kernel@lists.infradead.org Cc: iommu@lists.linux.dev Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Link: https://lore.kernel.org/r/20250304085127.2238030-1-pjaroszynski@nvidia.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent d4234d1 commit f7edb07

File tree

1 file changed

+12
-10
lines changed

1 file changed

+12
-10
lines changed

arch/arm64/include/asm/tlbflush.h

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
396396
#define __flush_tlb_range_op(op, start, pages, stride, \
397397
asid, tlb_level, tlbi_user, lpa2) \
398398
do { \
399+
typeof(start) __flush_start = start; \
400+
typeof(pages) __flush_pages = pages; \
399401
int num = 0; \
400402
int scale = 3; \
401403
int shift = lpa2 ? 16 : PAGE_SHIFT; \
402404
unsigned long addr; \
403405
\
404-
while (pages > 0) { \
406+
while (__flush_pages > 0) { \
405407
if (!system_supports_tlb_range() || \
406-
pages == 1 || \
407-
(lpa2 && start != ALIGN(start, SZ_64K))) { \
408-
addr = __TLBI_VADDR(start, asid); \
408+
__flush_pages == 1 || \
409+
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
410+
addr = __TLBI_VADDR(__flush_start, asid); \
409411
__tlbi_level(op, addr, tlb_level); \
410412
if (tlbi_user) \
411413
__tlbi_user_level(op, addr, tlb_level); \
412-
start += stride; \
413-
pages -= stride >> PAGE_SHIFT; \
414+
__flush_start += stride; \
415+
__flush_pages -= stride >> PAGE_SHIFT; \
414416
continue; \
415417
} \
416418
\
417-
num = __TLBI_RANGE_NUM(pages, scale); \
419+
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
418420
if (num >= 0) { \
419-
addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
421+
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
420422
scale, num, tlb_level); \
421423
__tlbi(r##op, addr); \
422424
if (tlbi_user) \
423425
__tlbi_user(r##op, addr); \
424-
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
425-
pages -= __TLBI_RANGE_PAGES(num, scale); \
426+
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
427+
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
426428
} \
427429
scale--; \
428430
} \

0 commit comments

Comments
 (0)