Skip to content

Commit c20748c

Browse files
committed
mm,vmm: improve get_free_pages() implementation
Use vmap_range() to handle multiple mapping areas automatically and consistently. Standardize rules for virtual address returned by get_free_pages(). Signed-off-by: Pawel Wieczorkiewicz <wipawel@grsecurity.net>
1 parent 8109789 commit c20748c

File tree

3 files changed

+86
-33
lines changed

3 files changed

+86
-33
lines changed

include/arch/x86/page.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,18 @@ static inline mfn_t virt_to_mfn(const void *va) {
275275
return paddr_to_mfn(virt_to_paddr(va));
276276
}
277277

278+
static inline unsigned long order_to_flags(unsigned int order) {
279+
switch (order) {
280+
case PAGE_ORDER_2M:
281+
return L2_PROT;
282+
case PAGE_ORDER_1G:
283+
return L3_PROT;
284+
case PAGE_ORDER_4K:
285+
default:
286+
return L1_PROT;
287+
}
288+
}
289+
278290
#endif /* __ASSEMBLY__ */
279291

280292
#endif /* KTF_PAGE_H */

include/mm/vmm.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,15 @@
2727

2828
#include <page.h>
2929

30+
/* clang-format off */
3031
enum gfp_flags {
31-
GFP_KERNEL = 0x00000001,
32-
GFP_USER = 0x00000002,
33-
GFP_IDENT = 0x00000004,
32+
GFP_NONE = 0x00000000,
33+
GFP_IDENT = 0x00000001,
34+
GFP_USER = 0x00000002,
35+
GFP_KERNEL = 0x00000004,
3436
GFP_KERNEL_MAP = 0x00000008,
3537
};
38+
/* clang-format on */
3639
typedef enum gfp_flags gfp_flags_t;
3740

3841
/* External definitions */

mm/vmm.c

Lines changed: 68 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,70 @@
3333
/* Used by higher level mmap_range() functions - must be taken before vmap_lock */
3434
static spinlock_t mmap_lock = SPINLOCK_INIT;
3535

36-
void *get_free_pages(unsigned int order, gfp_flags_t flags) {
37-
frame_t *frame;
36+
static inline vmap_flags_t gfp_to_vmap_flags(gfp_flags_t gfp_flags) {
37+
vmap_flags_t vmap_flags = VMAP_NONE;
38+
39+
if (gfp_flags == GFP_USER)
40+
return VMAP_KERNEL_USER | VMAP_USER;
41+
42+
if (gfp_flags & GFP_IDENT) {
43+
vmap_flags |= VMAP_IDENT;
44+
if (gfp_flags & GFP_USER)
45+
vmap_flags |= VMAP_USER_IDENT;
46+
}
47+
48+
if (gfp_flags & GFP_KERNEL) {
49+
vmap_flags |= VMAP_KERNEL;
50+
if (gfp_flags & GFP_USER)
51+
vmap_flags |= VMAP_USER_KERNEL;
52+
}
53+
54+
if (gfp_flags & GFP_KERNEL_MAP) {
55+
vmap_flags |= VMAP_KERNEL_MAP;
56+
if (gfp_flags & GFP_USER)
57+
vmap_flags |= VMAP_USER_KERNEL_MAP;
58+
}
59+
60+
return vmap_flags;
61+
}
62+
63+
static inline void *gfp_mfn_to_virt(gfp_flags_t gfp_flags, mfn_t mfn) {
64+
/* Return virtual address if a single area is specified ... */
65+
switch (gfp_flags) {
66+
case GFP_IDENT:
67+
return mfn_to_virt(mfn);
68+
case GFP_KERNEL_MAP:
69+
return mfn_to_virt_map(mfn);
70+
case GFP_USER:
71+
return mfn_to_virt_user(mfn);
72+
case GFP_KERNEL:
73+
return mfn_to_virt_kern(mfn);
74+
default:
75+
/* Otherwise, return kernel addresses if specified before identity
76+
* mapping or user. The below order reflects most common uses.
77+
*/
78+
if (gfp_flags & GFP_KERNEL_MAP)
79+
return mfn_to_virt_map(mfn);
80+
else if (gfp_flags & GFP_KERNEL)
81+
return mfn_to_virt_kern(mfn);
82+
else if (gfp_flags & GFP_IDENT)
83+
return mfn_to_virt(mfn);
84+
else if (gfp_flags & GFP_USER)
85+
return mfn_to_virt_user(mfn);
86+
}
87+
88+
return NULL;
89+
}
90+
91+
void *get_free_pages(unsigned int order, gfp_flags_t gfp_flags) {
3892
void *va = NULL;
93+
frame_t *frame;
3994
mfn_t mfn;
95+
size_t size;
96+
unsigned long pt_flags;
97+
vmap_flags_t vmap_flags;
98+
99+
ASSERT(gfp_flags != GFP_NONE);
40100

41101
if (!boot_flags.virt)
42102
panic("Unable to use %s() before final page tables are set", __func__);
@@ -46,35 +106,13 @@ void *get_free_pages(unsigned int order, gfp_flags_t flags) {
46106
return va;
47107
mfn = frame->mfn;
48108

49-
spin_lock(&mmap_lock);
50-
if (flags == GFP_USER) {
51-
va = vmap_kern(mfn_to_virt_user(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT,
52-
L1_PROT);
53-
vmap_user(mfn_to_virt_user(mfn), mfn, order, L4_PROT_USER, L3_PROT_USER,
54-
L2_PROT_USER, L1_PROT_USER);
55-
}
56-
57-
if (flags & GFP_IDENT) {
58-
va = vmap_kern(mfn_to_virt(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, L1_PROT);
59-
if (flags & GFP_USER)
60-
vmap_user(mfn_to_virt(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, L1_PROT);
61-
}
62-
63-
if (flags & GFP_KERNEL) {
64-
va = vmap_kern(mfn_to_virt_kern(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT,
65-
L1_PROT);
66-
if (flags & GFP_USER)
67-
vmap_user(mfn_to_virt_kern(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT,
68-
L1_PROT);
69-
}
109+
size = ORDER_TO_SIZE(order);
110+
pt_flags = order_to_flags(order);
111+
vmap_flags = gfp_to_vmap_flags(gfp_flags);
70112

71-
if (flags & GFP_KERNEL_MAP) {
72-
va = vmap_kern(mfn_to_virt_map(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT,
73-
L1_PROT);
74-
if (flags & GFP_USER)
75-
vmap_user(mfn_to_virt_map(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT,
76-
L1_PROT);
77-
}
113+
spin_lock(&mmap_lock);
114+
if (vmap_range(mfn_to_paddr(mfn), size, pt_flags, vmap_flags) == 0)
115+
va = gfp_mfn_to_virt(gfp_flags, mfn);
78116
spin_unlock(&mmap_lock);
79117

80118
return va;

0 commit comments

Comments
 (0)