Skip to content

Commit a923705

Browse files
Kefeng Wangctmarinas
authored andcommitted
arm64: optimize flush tlb kernel range
Currently the kernel TLBs is flushed page by page if the target VA range is less than MAX_DVM_OPS * PAGE_SIZE, otherwise we'll brutally issue a TLBI ALL. But we could optimize it when CPU supports TLB range operations, convert to use __flush_tlb_range_op() like other tlb range flush to improve performance. Co-developed-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20240923131351.713304-3-wangkefeng.wang@huawei.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
1 parent 7ffc13e commit a923705

File tree

1 file changed

+9
-7
lines changed

1 file changed

+9
-7
lines changed

arch/arm64/include/asm/tlbflush.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -501,19 +501,21 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
501501

502502
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
503503
{
504-
unsigned long addr;
504+
const unsigned long stride = PAGE_SIZE;
505+
unsigned long pages;
505506

506-
if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
507+
start = round_down(start, stride);
508+
end = round_up(end, stride);
509+
pages = (end - start) >> PAGE_SHIFT;
510+
511+
if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
507512
flush_tlb_all();
508513
return;
509514
}
510515

511-
start = __TLBI_VADDR(start, 0);
512-
end = __TLBI_VADDR(end, 0);
513-
514516
dsb(ishst);
515-
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
516-
__tlbi(vaale1is, addr);
517+
__flush_tlb_range_op(vaale1is, start, pages, stride, 0,
518+
TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
517519
dsb(ish);
518520
isb();
519521
}

0 commit comments

Comments
 (0)