Skip to content

Commit e269b5d

Browse files
surenbaghdasaryanakpm00
authored andcommitted
alloc_tag: fix module allocation tags populated area calculation
vm_module_tags_populate() calculation of the populated area assumes that area starts at a page boundary and therefore when new pages are allocation, the end of the area is page-aligned as well. If the start of the area is not page-aligned then allocating a page and incrementing the end of the area by PAGE_SIZE leads to an area at the end but within the area boundary which is not populated. Accessing this are will lead to a kernel panic. Fix the calculation by down-aligning the start of the area and using that as the location allocated pages are mapped to. [gehao@kylinos.cn: fix vm_module_tags_populate's KASAN poisoning logic] Link: https://lkml.kernel.org/r/20241205170528.81000-1-hao.ge@linux.dev [gehao@kylinos.cn: fix panic when CONFIG_KASAN enabled and CONFIG_KASAN_VMALLOC not enabled] Link: https://lkml.kernel.org/r/20241212072126.134572-1-hao.ge@linux.dev Link: https://lkml.kernel.org/r/20241130001423.1114965-1-surenb@google.com Fixes: 0f9b685 ("alloc_tag: populate memory for module tags as needed") Signed-off-by: Suren Baghdasaryan <surenb@google.com> Reported-by: kernel test robot <oliver.sang@intel.com> Closes: https://lore.kernel.org/oe-lkp/202411132111.6a221562-lkp@intel.com Acked-by: Yu Zhao <yuzhao@google.com> Tested-by: Adrian Huang <ahuang12@lenovo.com> Cc: David Wang <00107082@163.com> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Sourav Panda <souravpanda@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 640a603 commit e269b5d

File tree

1 file changed

+29
-5
lines changed

1 file changed

+29
-5
lines changed

lib/alloc_tag.c

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -408,28 +408,52 @@ static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
408408

409409
static int vm_module_tags_populate(void)
410410
{
411-
unsigned long phys_size = vm_module_tags->nr_pages << PAGE_SHIFT;
411+
unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
412+
(vm_module_tags->nr_pages << PAGE_SHIFT);
413+
unsigned long new_end = module_tags.start_addr + module_tags.size;
412414

413-
if (phys_size < module_tags.size) {
415+
if (phys_end < new_end) {
414416
struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
415-
unsigned long addr = module_tags.start_addr + phys_size;
417+
unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
418+
unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
416419
unsigned long more_pages;
417420
unsigned long nr;
418421

419-
more_pages = ALIGN(module_tags.size - phys_size, PAGE_SIZE) >> PAGE_SHIFT;
422+
more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
420423
nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
421424
NUMA_NO_NODE, more_pages, next_page);
422425
if (nr < more_pages ||
423-
vmap_pages_range(addr, addr + (nr << PAGE_SHIFT), PAGE_KERNEL,
426+
vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
424427
next_page, PAGE_SHIFT) < 0) {
425428
/* Clean up and error out */
426429
for (int i = 0; i < nr; i++)
427430
__free_page(next_page[i]);
428431
return -ENOMEM;
429432
}
433+
430434
vm_module_tags->nr_pages += nr;
435+
436+
/*
437+
* Kasan allocates 1 byte of shadow for every 8 bytes of data.
438+
* When kasan_alloc_module_shadow allocates shadow memory,
439+
* its unit of allocation is a page.
440+
* Therefore, here we need to align to MODULE_ALIGN.
441+
*/
442+
if (old_shadow_end < new_shadow_end)
443+
kasan_alloc_module_shadow((void *)old_shadow_end,
444+
new_shadow_end - old_shadow_end,
445+
GFP_KERNEL);
431446
}
432447

448+
/*
449+
* Mark the pages as accessible, now that they are mapped.
450+
* With hardware tag-based KASAN, marking is skipped for
451+
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
452+
*/
453+
kasan_unpoison_vmalloc((void *)module_tags.start_addr,
454+
new_end - module_tags.start_addr,
455+
KASAN_VMALLOC_PROT_NORMAL);
456+
433457
return 0;
434458
}
435459

0 commit comments

Comments
 (0)