1
1
#[ macro_use]
2
2
extern crate alloc;
3
3
4
+ #[ macro_use]
5
+ extern crate ctor;
6
+
4
7
use std:: sync:: Arc ;
5
8
use std:: thread;
6
9
use std:: thread:: sleep;
@@ -11,23 +14,23 @@ use alloc::alloc::Layout;
11
14
use buddy_system_allocator:: LockedHeap ;
12
15
use criterion:: { black_box, criterion_group, criterion_main, Criterion } ;
13
16
14
- const LARGE_SIZE : usize = 1024 ;
15
- const SMALL_SIZE : usize = 8 ;
16
- const THREAD_SIZE : usize = 10 ;
17
+ // use for first three benchmark
17
18
const ALIGN : usize = 8 ;
18
19
19
20
#[ inline]
20
- pub fn large_alloc < const ORDER : usize > ( heap : & LockedHeap < ORDER > ) {
21
- let layout = unsafe { Layout :: from_size_align_unchecked ( LARGE_SIZE , ALIGN ) } ;
21
+ pub fn small_alloc < const ORDER : usize > ( heap : & LockedHeap < ORDER > ) {
22
+ const SMALL_SIZE : usize = 8 ;
23
+ let layout = unsafe { Layout :: from_size_align_unchecked ( SMALL_SIZE , ALIGN ) } ;
22
24
unsafe {
23
25
let addr = heap. alloc ( layout) ;
24
26
heap. dealloc ( addr, layout) ;
25
27
}
26
28
}
27
29
28
30
#[ inline]
29
- pub fn small_alloc < const ORDER : usize > ( heap : & LockedHeap < ORDER > ) {
30
- let layout = unsafe { Layout :: from_size_align_unchecked ( SMALL_SIZE , ALIGN ) } ;
31
+ pub fn large_alloc < const ORDER : usize > ( heap : & LockedHeap < ORDER > ) {
32
+ const LARGE_SIZE : usize = 1024 ;
33
+ let layout = unsafe { Layout :: from_size_align_unchecked ( LARGE_SIZE , ALIGN ) } ;
31
34
unsafe {
32
35
let addr = heap. alloc ( layout) ;
33
36
heap. dealloc ( addr, layout) ;
@@ -36,6 +39,7 @@ pub fn small_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) {
36
39
37
40
#[ inline]
38
41
pub fn mutil_thread_alloc < const ORDER : usize > ( heap : & ' static LockedHeap < ORDER > ) {
42
+ const THREAD_SIZE : usize = 10 ;
39
43
let mut threads = Vec :: with_capacity ( THREAD_SIZE ) ;
40
44
let alloc = Arc :: new ( heap) ;
41
45
for i in 0 ..THREAD_SIZE {
@@ -80,63 +84,72 @@ pub fn mutil_thread_alloc<const ORDER: usize>(heap: &'static LockedHeap<ORDER>)
80
84
/// ----------------------------------------------------------------------
81
85
///
82
86
#[ inline]
83
- pub fn thread_test < const ORDER : usize > ( heap : & ' static LockedHeap < ORDER > ) {
87
+ pub fn thread_test ( ) {
84
88
const N_ITERATIONS : usize = 50 ;
85
- const N_OBJECTS : usize = 30000 ;
89
+ const N_OBJECTS : usize = 3000 ;
86
90
const N_THREADS : usize = 10 ;
87
91
const OBJECT_SIZE : usize = 1 ;
88
92
89
- let mut threads = Vec :: with_capacity ( THREAD_SIZE ) ;
90
- let alloc = Arc :: new ( heap) ;
93
+ #[ derive( Clone ) ]
94
+ struct Foo {
95
+ pub a : i32 ,
96
+ pub b : i32 ,
97
+ }
91
98
92
- for i in 0 ..THREAD_SIZE {
93
- let prethread_alloc = alloc. clone ( ) ;
99
+ let mut threads = Vec :: with_capacity ( N_THREADS ) ;
100
+
101
+ for _i in 0 ..N_THREADS {
94
102
let handle = thread:: spawn ( move || {
95
- // a = new Foo * [nobjects / nthreads];
96
- let layout = unsafe {
97
- Layout :: from_size_align_unchecked ( SMALL_SIZE * ( N_OBJECTS / N_THREADS ) , ALIGN )
98
- } ;
99
- let addr = unsafe { prethread_alloc. alloc ( layout) } ;
100
- for j in 0 ..N_ITERATIONS {
103
+ // let a = new Foo * [nobjects / nthreads];
104
+ let mut a = Vec :: with_capacity ( N_OBJECTS / N_THREADS ) ;
105
+ for _j in 0 ..N_ITERATIONS {
101
106
// inner object:
102
107
// a[i] = new Foo[objSize];
103
- let mut addrs = vec ! [ ] ;
104
- let layout =
105
- unsafe { Layout :: from_size_align_unchecked ( SMALL_SIZE * OBJECT_SIZE , ALIGN ) } ;
106
- for i in 0 ..( N_OBJECTS / N_THREADS ) {
107
- addrs. push ( unsafe { prethread_alloc. alloc ( layout) } ) ;
108
- }
109
- for addr in addrs {
110
- unsafe { prethread_alloc. dealloc ( addr, layout) }
108
+ for _k in 0 ..( N_OBJECTS / N_THREADS ) {
109
+ a. push ( vec ! [ Foo { a: 0 , b: 1 } ; OBJECT_SIZE ] ) ;
111
110
}
112
111
}
113
- unsafe { prethread_alloc . dealloc ( addr , layout ) }
112
+ // auto drop here
114
113
} ) ;
115
114
threads. push ( handle) ;
116
115
}
117
- drop ( alloc) ;
118
116
119
117
for t in threads {
120
118
t. join ( ) . unwrap ( ) ;
121
119
}
122
120
}
123
121
124
122
const ORDER : usize = 32 ;
125
- static HEAP_ALLOCATOR : LockedHeap < ORDER > = LockedHeap :: < ORDER > :: new ( ) ;
126
- const KERNEL_HEAP_SIZE : usize = 16 * 1024 * 1024 ;
127
123
const MACHINE_ALIGN : usize = core:: mem:: size_of :: < usize > ( ) ;
124
+ const KERNEL_HEAP_SIZE : usize = 16 * 1024 * 1024 ;
128
125
const HEAP_BLOCK : usize = KERNEL_HEAP_SIZE / MACHINE_ALIGN ;
129
126
static mut HEAP : [ usize ; HEAP_BLOCK ] = [ 0 ; HEAP_BLOCK ] ;
130
127
131
- pub fn criterion_benchmark ( c : & mut Criterion ) {
132
- // init heap
128
+ /// Use `LockedHeap` as global allocator
129
+ #[ global_allocator]
130
+ static HEAP_ALLOCATOR : LockedHeap < ORDER > = LockedHeap :: < ORDER > :: new ( ) ;
131
+
132
+ /// # Init heap
133
+ ///
134
+ /// We need `ctor` here because benchmark is running behind the std enviroment,
135
+ /// which means std will do some initialization before execute `fn main()`.
136
+ /// However, our memory allocator must be init in runtime(use linkedlist, which
137
+ /// can not be evaluated in compile time). And in the initialization phase, heap
138
+ /// memory is needed.
139
+ ///
140
+ /// So the solution in this dilemma is to run `fn init_heap()` in initialization phase
141
+ /// rather than in `fn main()`. We need `ctor` to do this.
142
+ #[ ctor]
143
+ fn init_heap ( ) {
133
144
let heap_start = unsafe { HEAP . as_ptr ( ) as usize } ;
134
145
unsafe {
135
146
HEAP_ALLOCATOR
136
147
. lock ( )
137
148
. init ( heap_start, HEAP_BLOCK * MACHINE_ALIGN ) ;
138
149
}
150
+ }
139
151
152
+ pub fn criterion_benchmark ( c : & mut Criterion ) {
140
153
// run benchmark
141
154
c. bench_function ( "small alloc" , |b| {
142
155
b. iter ( || small_alloc ( black_box ( & HEAP_ALLOCATOR ) ) )
@@ -147,9 +160,7 @@ pub fn criterion_benchmark(c: &mut Criterion) {
147
160
c. bench_function ( "mutil thread alloc" , |b| {
148
161
b. iter ( || mutil_thread_alloc ( black_box ( & HEAP_ALLOCATOR ) ) )
149
162
} ) ;
150
- c. bench_function ( "threadtest" , |b| {
151
- b. iter ( || thread_test ( black_box ( & HEAP_ALLOCATOR ) ) )
152
- } ) ;
163
+ c. bench_function ( "threadtest" , |b| b. iter ( || thread_test ( ) ) ) ;
153
164
}
154
165
155
166
criterion_group ! ( benches, criterion_benchmark) ;
0 commit comments