Skip to content

Commit 4bce609

Browse files
committed
mm: fully map the kernel to high memory
1 parent f4b50dd commit 4bce609

File tree

4 files changed

+168
-27
lines changed

4 files changed

+168
-27
lines changed

Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,10 @@ ARCH = x86_64
1919
ASM = nasm
2020
ASMOBJFORMAT = elf64
2121
ASMFLAGS = -w-zeroing
22-
LINKER_SCRIPT = ./defs/$(ARCH)-linker.ld
22+
LINKER_SCRIPT = ./defs/$(ARCH)-hm-linker.ld
2323
CARGO_XBUILD_TARGET = ./defs/$(ARCH)-rustubs.json
2424
CARGO_XBUILD_FLAGS =
25+
RUSTC_FLAGS := -C code-model=large
2526
# ---------- No need to edit below this line --------------
2627
# ---------- If you have to, something is wrong -----------
2728
LDFLAGS = -no-warn-rwx-segment -static -e startup
@@ -65,7 +66,7 @@ $(BUILD)/_%.o : %.s | $(BUILD)
6566
# define this, the linker will have troubles, especially when we use a "no_std" build
6667
rust_kernel: check
6768
@echo "---BUILDING RUST KERNEL---"
68-
@cargo xbuild --target $(CARGO_XBUILD_TARGET) $(CARGO_XBUILD_FLAGS)
69+
@RUSTFLAGS="$(RUSTC_FLAGS)" cargo xbuild --target $(CARGO_XBUILD_TARGET) $(CARGO_XBUILD_FLAGS)
6970

7071
# need nasm
7172
# TODO make this arch dependent

boot/startup-x86_64.s

Lines changed: 44 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -20,18 +20,29 @@ MAX_MEM: equ 512
2020
; they would be no longer accessable after the kernel switch to a higher half
2121
; mapping. This is especially true for the multiboot info data.
2222

23+
; Also be careful with imm width in asm instructions
24+
; many instructions does not take 64 bit imm value. e.g. cmp. If the operand is
25+
; an extern symbol the linker may tell you xyz "truncate to fit". In which case
26+
; you should load the addresses or values into an register before using them
2327

2428
; exported symbols
2529
[GLOBAL startup]
2630
[GLOBAL mb_magic]
2731
[GLOBAL mb_info_addr]
2832
; functions from other parts of rustubs
29-
[EXTERN _entry]
3033
[EXTERN ___BSS_PM_START__]
3134
[EXTERN ___BSS_PM_END__]
32-
33-
[SECTION .text]
34-
35+
[EXTERN KERNEL_OFFSET]
36+
[EXTERN _entry]
37+
; =====================================================================
38+
; begin of the text secion: unlike the text* sections from the rust code
39+
; the text here is not supposed to be relocated to an higher memory,
40+
; as we can not use high memory until we completely set up longmode paging.
41+
; Therefore we explicitly link the startup text section to low address.
42+
; the same goes for the ".data32" section: they are not necessarily 32bit,
43+
; the point is to confine all address within 4GB (32bit) range
44+
; =====================================================================
45+
[SECTION .text32]
3546
; symbols used in 32bit mode:
3647
; mb_magic
3748
; mab_info_addr
@@ -45,6 +56,7 @@ startup:
4556
; EAX: magic value 0x2BADB002
4657
; EBX: 32-bit physical address of the multiboot information struct
4758
; we store them in global variables for future uses in rust code.
59+
; TODO place them on the stack and pass as parameters to _entry
4860
mov dword [mb_magic], eax
4961
mov dword [mb_info_addr], ebx
5062
; setup GDT by loading GDT descriptor
@@ -59,7 +71,7 @@ startup:
5971

6072
; define stack
6173
mov ss, ax
62-
mov esp, init_stack+STACKSIZE
74+
lea esp, init_stack+STACKSIZE
6375

6476
init_longmode:
6577
; activate address extension (PAE)
@@ -81,12 +93,13 @@ clear_pt:
8193
; table levels needed. see docs/x86_paging.txt
8294

8395
; PML4 (Page Map Level 4 / 1st level)
96+
; PML4 entry flag: 0xf = PRESENG | R/W | USER | Write Through
8497
mov eax, pdp0
8598
or eax, 0xf
8699
mov dword [pml4+0], eax
87100
mov dword [pml4+4], 0
88-
; PDPE flags
89-
mov eax, 0x0 | 0x87 ; start-address bytes bit [30:31] + flags
101+
; PDPE flags 0x87 = PageSize=1G | USER | R/W | PRESENT
102+
mov eax, 0x0 | 0x83 ; start-address bytes bit [30:31] + flags
90103
mov ebx, 0 ; start-address bytes bit [32:38]
91104
mov ecx, 0
92105
fill_tables2:
@@ -126,22 +139,20 @@ activate_long_mode:
126139
; - symbols defined in 64 bit code below, if mapped to higher memory (VA)
127140
; - all symbols exported from rust code or linker script
128141
; =====================================================================
129-
130142
[BITS 64]
131143
longmode_start:
132144
; now we set the pagetables for higher half memory
133145
; since we have Provisional paging now, why not using 64bit code?
134-
mov eax, pdp1
135-
or eax, 0xf
136-
mov dword [pml4+256], eax
137-
mov dword [pml4+256+4], 0
138-
; PDPE flags, see above
139-
146+
; the 256th entry of pml4 points to memory from 0xffff_8000_0000_0000
147+
mov rax, pdp1
148+
; privileged, r/w, present
149+
or rax, 0x3
150+
mov qword [pml4+256*8], rax
140151
; entry 0~63 is an identical mapping with offset 0x8000_0000_0000
141-
; clear the BSS section before going to rust code
152+
; 1G Page | Privileged | R/W | PRESENT
142153
; TODO this should not be executable
143154
mov rax, 0x0
144-
or rax, 0x87
155+
or rax, 0x83
145156
mov rdi, 0
146157
fill_kvma1:
147158
mov qword [pdp1 + 8*rdi], rax
@@ -153,7 +164,7 @@ fill_kvma1:
153164
; entry 64~127 is a hole (also some sort of protection)
154165
; entry 128~191 are mapping of the kernel image itself
155166
mov rax, 0x0
156-
or rax, 0x87
167+
or rax, 0x83
157168
mov rdi, 128
158169
fill_kvma2:
159170
mov qword [pdp1 + 8*rdi], rax
@@ -162,33 +173,43 @@ fill_kvma2:
162173
cmp rdi, 192
163174
jne fill_kvma2
164175
; done :-)
165-
166176
; clear BSS section for the rust code.
167177
mov rdi, ___BSS_PM_START__
178+
mov rax, ___BSS_PM_END__
168179
clear_bss:
180+
; clear the BSS section before going to rust code
181+
; TODO speed this up by clearing 8 bytes at once. Alignment should be taken
182+
; care of..
169183
mov byte [rdi], 0
170184
inc rdi
171-
cmp rdi, ___BSS_PM_END__
185+
cmp rdi, rax
172186
jne clear_bss
187+
173188
; enable FPU
174189
fninit
175190

176191
; NOTE: must NOT use sse target features for rust compiler, if sse not
177192
; enabled here.
178193

194+
; shift the rsp to high memory mapping:
195+
mov rax, KERNEL_OFFSET,
196+
or rsp, rax
179197
; finally go to the rust code!
180-
call _entry
198+
mov rax, _entry
199+
jmp rax
200+
181201
; should not reach below
182202
cli
183203
hlt
184204

185205
; =====================================================================
186206
; data sections they should all have VAs identical to their PAs
187207
; so we map these symbols differently than those generated by rust code
208+
; the "data" itself doesn't care about 64 or 32 bit width, but we need
209+
; to make sure they are not relocated to an address bigger then 4G (32)
188210
; =====================================================================
189211

190-
[SECTION .data]
191-
212+
[SECTION .data32]
192213
gdt:
193214
; see docs/x86_gdt.txt
194215

@@ -232,8 +253,7 @@ mb_magic:
232253
mb_info_addr:
233254
dd 0x00000000
234255

235-
[SECTION .bss]
236-
256+
[SECTION .init_k_stack]
237257
global init_stack:data (init_stack.end - init_stack)
238258
init_stack:
239259
resb STACKSIZE

defs/x86_64-hm-linker.ld

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
2+
/* defs for Multiboot headers */
3+
/* https://www.gnu.org/software/grub/manual/multiboot/multiboot.txt */
4+
MB_MAGIC = 0x1badb002;
5+
/* bit 0
6+
* all boot modules loaded along with the operating system must be
7+
* aligned on page (4KB) bit 1 must include mem_* structures
8+
*/
9+
MB_FLAGS = 0x3;
10+
MB_CHKSUM = 0x100000000 - (MB_MAGIC + MB_FLAGS);
11+
12+
PROVIDE(KERNEL_OFFSET = 0xffff800000000000);
13+
14+
SECTIONS
15+
{
16+
. = 0x100000;
17+
PROVIDE (___KERNEL_PM_START__ = . );
18+
.boot :
19+
{
20+
header_start = .;
21+
LONG(MB_MAGIC)
22+
LONG(MB_FLAGS)
23+
LONG(MB_CHKSUM)
24+
LONG(0)
25+
LONG(0)
26+
LONG(0)
27+
LONG(0)
28+
LONG(0)
29+
LONG(0)
30+
LONG(0)
31+
LONG(0)
32+
LONG(0)
33+
header_end = .;
34+
}
35+
36+
37+
.d32 :
38+
{
39+
*(".data32")
40+
}
41+
42+
.reserved :
43+
{
44+
*(".reserved")
45+
*(".reserved.*")
46+
*(".gdt")
47+
}
48+
49+
/*
50+
* basically the same as BSS, but I want some flexibility and I don't care
51+
* for zeroing because it's explicitly overwritten anyways. I KNOW WHAT I'M
52+
* DOING! An example is the idt.
53+
*/
54+
.reserved_0 (NOLOAD) :
55+
{
56+
*(".init_k_stack")
57+
*(".reserved_0")
58+
*(".reserved_0.*")
59+
*(".reserved_0.init_stack")
60+
}
61+
62+
63+
/* global page table for 64-bit long mode */
64+
.global_pagetable ALIGN(4096) (NOLOAD) :
65+
{
66+
*(".global_pagetable")
67+
}
68+
69+
. = ALIGN(4096);
70+
/* reserve space for a premitive stack based physical frame allocator */
71+
/* each frame is 4KiB in size and has a 64bit (physical) address. e.g. */
72+
/* for every 1 GiB physical memory we need 2 MiB space reserved for the */
73+
/* free stack. For a easier bootstraping we are using a fix-sized stack */
74+
/* array. Currently using 4GiB, therefore reserve 8MiB. */
75+
PROVIDE (___FREE_PAGE_STACK__ = .);
76+
.global_free_page_stack ALIGN(4096) (NOLOAD) :
77+
{
78+
*("..global_free_page_stack")
79+
}
80+
. = ALIGN(4096);
81+
82+
.t32 :
83+
{
84+
*(".text32")
85+
*(".text.interrupt_gate")
86+
}
87+
88+
. = . + KERNEL_OFFSET;
89+
.text : AT(ADDR(.text) - KERNEL_OFFSET)
90+
{
91+
*(".text")
92+
*(".text.*")
93+
*(".text$")
94+
}
95+
96+
.data : AT(ADDR(.data) - KERNEL_OFFSET)
97+
{
98+
*(".data")
99+
*(".data.*")
100+
*(".data$")
101+
}
102+
103+
.bss : AT(ADDR(.bss) - KERNEL_OFFSET)
104+
{
105+
PROVIDE (___BSS_PM_START__ = .);
106+
*(".bss")
107+
*(".bss.*")
108+
PROVIDE (___BSS_PM_END__ = .);
109+
}
110+
111+
.rodata : AT(ADDR(.rodata) - KERNEL_OFFSET)
112+
{
113+
*(".rodata")
114+
*(".rodata$")
115+
*(".rodata.*")
116+
}
117+
118+
PROVIDE (___KERNEL_PM_END__ = . - KERNEL_OFFSET);
119+
}

src/arch/x86_64/asm/vectors.s

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
1+
; vi: ft=nasm
12
; vectors.s - idt for x86_64
2-
3+
[BITS 64]
34
[GLOBAL idt]
45
[GLOBAL idt_descr]
56
[GLOBAL vectors_start]

0 commit comments

Comments
 (0)