Skip to content

Commit 0b7eb55

Browse files
rikvanrielIngo Molnar
authored andcommitted
x86/mm: Only do broadcast flush from reclaim if pages were unmapped
Track whether pages were unmapped from any MM (even ones with a currently empty mm_cpumask) by the reclaim code, to figure out whether or not broadcast TLB flush should be done when reclaim finishes. The reason any MM must be tracked, and not only ones contributing to the tlbbatch cpumask, is that broadcast ASIDs are expected to be kept up to date even on CPUs where the MM is not currently active. This change allows reclaim to avoid doing TLB flushes when only clean page cache pages and/or slab memory were reclaimed, which is fairly common. ( This is a simpler alternative to the code that was in my INVLPGB series before, and it seems to capture most of the benefit due to how common it is to reclaim only page cache. ) Signed-off-by: Rik van Riel <riel@surriel.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/20250319132520.6b10ad90@fangorn
1 parent de844ef commit 0b7eb55

File tree

3 files changed

+8
-1
lines changed

3 files changed

+8
-1
lines changed

arch/x86/include/asm/tlbbatch.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ struct arch_tlbflush_unmap_batch {
1010
* the PFNs being flushed..
1111
*/
1212
struct cpumask cpumask;
13+
/*
14+
* Set if pages were unmapped from any MM, even one that does not
15+
* have active CPUs in its cpumask.
16+
*/
17+
bool unmapped_pages;
1318
};
1419

1520
#endif /* _ARCH_X86_TLBBATCH_H */

arch/x86/include/asm/tlbflush.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,6 +353,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
353353
{
354354
inc_mm_tlb_gen(mm);
355355
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
356+
batch->unmapped_pages = true;
356357
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
357358
}
358359

arch/x86/mm/tlb.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1633,8 +1633,9 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
16331633
* a local TLB flush is needed. Optimize this use-case by calling
16341634
* flush_tlb_func_local() directly in this case.
16351635
*/
1636-
if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
1636+
if (cpu_feature_enabled(X86_FEATURE_INVLPGB) && batch->unmapped_pages) {
16371637
invlpgb_flush_all_nonglobals();
1638+
batch->unmapped_pages = false;
16381639
} else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
16391640
flush_tlb_multi(&batch->cpumask, info);
16401641
} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {

0 commit comments

Comments
 (0)