Skip to content

Commit 3c6539b

Browse files
thestingerKAGA-KOKO
authored andcommitted
x86/vdso: Move vDSO to mmap region
The vDSO (and its initial randomization) was introduced in commit 2aae950 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu"), but had very low entropy. The entropy was improved in commit 394f56f ("x86_64, vdso: Fix the vdso address randomization algorithm"), but there is still improvement to be made. In principle there should not be executable code at a low entropy offset from the stack, since the stack and executable code having separate randomization is part of what makes ASLR stronger. Remove the only executable code near the stack region and give the vDSO the same randomized base as other mmap mappings including the linker and other shared objects. This results in higher entropy being provided and there's little to no advantage in separating this from the existing executable code there. This is already how other architectures like arm64 handle the vDSO. As an side, while it's sensible for userspace to reserve the initial mmap base as a region for executable code with a random gap for other mmap allocations, along with providing randomization within that region, there isn't much the kernel can do to help due to how dynamic linkers load the shared objects. This was extracted from the PaX RANDMMAP feature. [kees: updated commit log with historical details and other tweaks] Signed-off-by: Daniel Micay <danielmicay@gmail.com> Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Closes: KSPP#280 Link: https://lore.kernel.org/r/20240210091827.work.233-kees@kernel.org
1 parent b7bcffe commit 3c6539b

File tree

3 files changed

+2
-63
lines changed

3 files changed

+2
-63
lines changed

arch/x86/entry/vdso/vma.c

Lines changed: 2 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -274,59 +274,6 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
274274
return ret;
275275
}
276276

277-
#ifdef CONFIG_X86_64
278-
/*
279-
* Put the vdso above the (randomized) stack with another randomized
280-
* offset. This way there is no hole in the middle of address space.
281-
* To save memory make sure it is still in the same PTE as the stack
282-
* top. This doesn't give that many random bits.
283-
*
284-
* Note that this algorithm is imperfect: the distribution of the vdso
285-
* start address within a PMD is biased toward the end.
286-
*
287-
* Only used for the 64-bit and x32 vdsos.
288-
*/
289-
static unsigned long vdso_addr(unsigned long start, unsigned len)
290-
{
291-
unsigned long addr, end;
292-
unsigned offset;
293-
294-
/*
295-
* Round up the start address. It can start out unaligned as a result
296-
* of stack start randomization.
297-
*/
298-
start = PAGE_ALIGN(start);
299-
300-
/* Round the lowest possible end address up to a PMD boundary. */
301-
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
302-
if (end >= DEFAULT_MAP_WINDOW)
303-
end = DEFAULT_MAP_WINDOW;
304-
end -= len;
305-
306-
if (end > start) {
307-
offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
308-
addr = start + (offset << PAGE_SHIFT);
309-
} else {
310-
addr = start;
311-
}
312-
313-
/*
314-
* Forcibly align the final address in case we have a hardware
315-
* issue that requires alignment for performance reasons.
316-
*/
317-
addr = align_vdso_addr(addr);
318-
319-
return addr;
320-
}
321-
322-
static int map_vdso_randomized(const struct vdso_image *image)
323-
{
324-
unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
325-
326-
return map_vdso(image, addr);
327-
}
328-
#endif
329-
330277
int map_vdso_once(const struct vdso_image *image, unsigned long addr)
331278
{
332279
struct mm_struct *mm = current->mm;
@@ -369,7 +316,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
369316
if (!vdso64_enabled)
370317
return 0;
371318

372-
return map_vdso_randomized(&vdso_image_64);
319+
return map_vdso(&vdso_image_64, 0);
373320
}
374321

375322
#ifdef CONFIG_COMPAT
@@ -380,7 +327,7 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
380327
if (x32) {
381328
if (!vdso64_enabled)
382329
return 0;
383-
return map_vdso_randomized(&vdso_image_x32);
330+
return map_vdso(&vdso_image_x32, 0);
384331
}
385332
#endif
386333
#ifdef CONFIG_IA32_EMULATION

arch/x86/include/asm/elf.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,5 +392,4 @@ struct va_alignment {
392392
} ____cacheline_aligned;
393393

394394
extern struct va_alignment va_align;
395-
extern unsigned long align_vdso_addr(unsigned long);
396395
#endif /* _ASM_X86_ELF_H */

arch/x86/kernel/sys_x86_64.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,13 +52,6 @@ static unsigned long get_align_bits(void)
5252
return va_align.bits & get_align_mask();
5353
}
5454

55-
unsigned long align_vdso_addr(unsigned long addr)
56-
{
57-
unsigned long align_mask = get_align_mask();
58-
addr = (addr + align_mask) & ~align_mask;
59-
return addr | get_align_bits();
60-
}
61-
6255
static int __init control_va_addr_alignment(char *str)
6356
{
6457
/* guard against enabling this on other CPU families */

0 commit comments

Comments
 (0)