33
33
/* Used by higher level mmap_range() functions - must be taken before vmap_lock */
34
34
static spinlock_t mmap_lock = SPINLOCK_INIT ;
35
35
36
- void * get_free_pages (unsigned int order , gfp_flags_t flags ) {
37
- frame_t * frame ;
36
+ static inline vmap_flags_t gfp_to_vmap_flags (gfp_flags_t gfp_flags ) {
37
+ vmap_flags_t vmap_flags = VMAP_NONE ;
38
+
39
+ if (gfp_flags == GFP_USER )
40
+ return VMAP_KERNEL_USER | VMAP_USER ;
41
+
42
+ if (gfp_flags & GFP_IDENT ) {
43
+ vmap_flags |= VMAP_IDENT ;
44
+ if (gfp_flags & GFP_USER )
45
+ vmap_flags |= VMAP_USER_IDENT ;
46
+ }
47
+
48
+ if (gfp_flags & GFP_KERNEL ) {
49
+ vmap_flags |= VMAP_KERNEL ;
50
+ if (gfp_flags & GFP_USER )
51
+ vmap_flags |= VMAP_USER_KERNEL ;
52
+ }
53
+
54
+ if (gfp_flags & GFP_KERNEL_MAP ) {
55
+ vmap_flags |= VMAP_KERNEL_MAP ;
56
+ if (gfp_flags & GFP_USER )
57
+ vmap_flags |= VMAP_USER_KERNEL_MAP ;
58
+ }
59
+
60
+ return vmap_flags ;
61
+ }
62
+
63
+ static inline void * gfp_mfn_to_virt (gfp_flags_t gfp_flags , mfn_t mfn ) {
64
+ /* Return virtual address if a single area is specified ... */
65
+ switch (gfp_flags ) {
66
+ case GFP_IDENT :
67
+ return mfn_to_virt (mfn );
68
+ case GFP_KERNEL_MAP :
69
+ return mfn_to_virt_map (mfn );
70
+ case GFP_USER :
71
+ return mfn_to_virt_user (mfn );
72
+ case GFP_KERNEL :
73
+ return mfn_to_virt_kern (mfn );
74
+ default :
75
+ /* Otherwise, return kernel addresses if specified before identity
76
+ * mapping or user. The below order reflects most common uses.
77
+ */
78
+ if (gfp_flags & GFP_KERNEL_MAP )
79
+ return mfn_to_virt_map (mfn );
80
+ else if (gfp_flags & GFP_KERNEL )
81
+ return mfn_to_virt_kern (mfn );
82
+ else if (gfp_flags & GFP_IDENT )
83
+ return mfn_to_virt (mfn );
84
+ else if (gfp_flags & GFP_USER )
85
+ return mfn_to_virt_user (mfn );
86
+ }
87
+
88
+ return NULL ;
89
+ }
90
+
91
+ void * get_free_pages (unsigned int order , gfp_flags_t gfp_flags ) {
38
92
void * va = NULL ;
93
+ frame_t * frame ;
39
94
mfn_t mfn ;
95
+ size_t size ;
96
+ unsigned long pt_flags ;
97
+ vmap_flags_t vmap_flags ;
98
+
99
+ ASSERT (gfp_flags != GFP_NONE );
40
100
41
101
if (!boot_flags .virt )
42
102
panic ("Unable to use %s() before final page tables are set" , __func__ );
@@ -46,35 +106,13 @@ void *get_free_pages(unsigned int order, gfp_flags_t flags) {
46
106
return va ;
47
107
mfn = frame -> mfn ;
48
108
49
- spin_lock (& mmap_lock );
50
- if (flags == GFP_USER ) {
51
- va = vmap_kern (mfn_to_virt_user (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT ,
52
- L1_PROT );
53
- vmap_user (mfn_to_virt_user (mfn ), mfn , order , L4_PROT_USER , L3_PROT_USER ,
54
- L2_PROT_USER , L1_PROT_USER );
55
- }
56
-
57
- if (flags & GFP_IDENT ) {
58
- va = vmap_kern (mfn_to_virt (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT , L1_PROT );
59
- if (flags & GFP_USER )
60
- vmap_user (mfn_to_virt (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT , L1_PROT );
61
- }
62
-
63
- if (flags & GFP_KERNEL ) {
64
- va = vmap_kern (mfn_to_virt_kern (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT ,
65
- L1_PROT );
66
- if (flags & GFP_USER )
67
- vmap_user (mfn_to_virt_kern (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT ,
68
- L1_PROT );
69
- }
109
+ size = ORDER_TO_SIZE (order );
110
+ pt_flags = order_to_flags (order );
111
+ vmap_flags = gfp_to_vmap_flags (gfp_flags );
70
112
71
- if (flags & GFP_KERNEL_MAP ) {
72
- va = vmap_kern (mfn_to_virt_map (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT ,
73
- L1_PROT );
74
- if (flags & GFP_USER )
75
- vmap_user (mfn_to_virt_map (mfn ), mfn , order , L4_PROT , L3_PROT , L2_PROT ,
76
- L1_PROT );
77
- }
113
+ spin_lock (& mmap_lock );
114
+ if (vmap_range (mfn_to_paddr (mfn ), size , pt_flags , vmap_flags ) == 0 )
115
+ va = gfp_mfn_to_virt (gfp_flags , mfn );
78
116
spin_unlock (& mmap_lock );
79
117
80
118
return va ;
0 commit comments