Skip to content

Commit ef92486

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "The main one is a horrible macro fix for our TLB flushing code which resulted in over-invalidation on the MMU notifier path. Summary: - Fix population of the vmemmap for regions of memory that are smaller than a section (128 MiB) - Fix range-based TLB over-invalidation when invoked via a MMU notifier" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: Fix mmu notifiers for range-based invalidates arm64: mm: Populate vmemmap at the page level if not section aligned
2 parents 2eaca8a + f7edb07 commit ef92486

File tree

2 files changed

+16
-11
lines changed

2 files changed

+16
-11
lines changed

arch/arm64/include/asm/tlbflush.h

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
396396
#define __flush_tlb_range_op(op, start, pages, stride, \
397397
asid, tlb_level, tlbi_user, lpa2) \
398398
do { \
399+
typeof(start) __flush_start = start; \
400+
typeof(pages) __flush_pages = pages; \
399401
int num = 0; \
400402
int scale = 3; \
401403
int shift = lpa2 ? 16 : PAGE_SHIFT; \
402404
unsigned long addr; \
403405
\
404-
while (pages > 0) { \
406+
while (__flush_pages > 0) { \
405407
if (!system_supports_tlb_range() || \
406-
pages == 1 || \
407-
(lpa2 && start != ALIGN(start, SZ_64K))) { \
408-
addr = __TLBI_VADDR(start, asid); \
408+
__flush_pages == 1 || \
409+
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
410+
addr = __TLBI_VADDR(__flush_start, asid); \
409411
__tlbi_level(op, addr, tlb_level); \
410412
if (tlbi_user) \
411413
__tlbi_user_level(op, addr, tlb_level); \
412-
start += stride; \
413-
pages -= stride >> PAGE_SHIFT; \
414+
__flush_start += stride; \
415+
__flush_pages -= stride >> PAGE_SHIFT; \
414416
continue; \
415417
} \
416418
\
417-
num = __TLBI_RANGE_NUM(pages, scale); \
419+
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
418420
if (num >= 0) { \
419-
addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
421+
addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
420422
scale, num, tlb_level); \
421423
__tlbi(r##op, addr); \
422424
if (tlbi_user) \
423425
__tlbi_user(r##op, addr); \
424-
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
425-
pages -= __TLBI_RANGE_PAGES(num, scale); \
426+
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
427+
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
426428
} \
427429
scale--; \
428430
} \

arch/arm64/mm/mmu.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1177,8 +1177,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
11771177
struct vmem_altmap *altmap)
11781178
{
11791179
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1180+
/* [start, end] should be within one section */
1181+
WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page));
11801182

1181-
if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
1183+
if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) ||
1184+
(end - start < PAGES_PER_SECTION * sizeof(struct page)))
11821185
return vmemmap_populate_basepages(start, end, node, altmap);
11831186
else
11841187
return vmemmap_populate_hugepages(start, end, node, altmap);

0 commit comments

Comments
 (0)