|
| 1 | +#[macro_use] |
| 2 | +extern crate alloc; |
| 3 | + |
| 4 | +use std::sync::Arc; |
| 5 | +use std::thread; |
| 6 | +use std::thread::sleep; |
| 7 | +use std::time::Duration; |
| 8 | + |
| 9 | +use alloc::alloc::GlobalAlloc; |
| 10 | +use alloc::alloc::Layout; |
| 11 | +use buddy_system_allocator::LockedHeap; |
| 12 | +use criterion::{black_box, criterion_group, criterion_main, Criterion}; |
| 13 | + |
| 14 | +#[inline] |
| 15 | +pub fn large_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) { |
| 16 | + let layout = unsafe { Layout::from_size_align_unchecked(1024, 8) }; |
| 17 | + unsafe { |
| 18 | + let addr = heap.alloc(layout); |
| 19 | + heap.dealloc(addr, layout); |
| 20 | + } |
| 21 | +} |
| 22 | + |
| 23 | +#[inline] |
| 24 | +pub fn small_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) { |
| 25 | + let layout = unsafe { Layout::from_size_align_unchecked(8, 8) }; |
| 26 | + unsafe { |
| 27 | + let addr = heap.alloc(layout); |
| 28 | + heap.dealloc(addr, layout); |
| 29 | + } |
| 30 | +} |
| 31 | + |
| 32 | +#[inline] |
| 33 | +pub fn mutil_thread_alloc<const ORDER: usize>(heap: &'static LockedHeap<ORDER>) { |
| 34 | + let mut threads = vec![]; |
| 35 | + let alloc = Arc::new(heap); |
| 36 | + for i in 0..10 { |
| 37 | + let a = alloc.clone(); |
| 38 | + let handle = thread::spawn(move || { |
| 39 | + let layout = unsafe { Layout::from_size_align_unchecked(i * 10, 8) }; |
| 40 | + let addr; |
| 41 | + unsafe { addr = a.alloc(layout) } |
| 42 | + sleep(Duration::from_nanos(10 - i as u64)); |
| 43 | + unsafe { a.dealloc(addr, layout) } |
| 44 | + }); |
| 45 | + threads.push(handle); |
| 46 | + } |
| 47 | + drop(alloc); |
| 48 | + |
| 49 | + for t in threads { |
| 50 | + t.join().unwrap(); |
| 51 | + } |
| 52 | +} |
| 53 | + |
| 54 | +const ORDER: usize = 32; |
| 55 | +static HEAP_ALLOCATOR: LockedHeap<ORDER> = LockedHeap::<ORDER>::new(); |
| 56 | +const KERNEL_HEAP_SIZE: usize = 16 * 1024 * 1024; |
| 57 | +const MACHINE_ALIGN: usize = core::mem::size_of::<usize>(); |
| 58 | +const HEAP_BLOCK: usize = KERNEL_HEAP_SIZE / MACHINE_ALIGN; |
| 59 | +static mut HEAP: [usize; HEAP_BLOCK] = [0; HEAP_BLOCK]; |
| 60 | + |
| 61 | +pub fn criterion_benchmark(c: &mut Criterion) { |
| 62 | + // init heap |
| 63 | + let heap_start = unsafe { HEAP.as_ptr() as usize }; |
| 64 | + unsafe { |
| 65 | + HEAP_ALLOCATOR |
| 66 | + .lock() |
| 67 | + .init(heap_start, HEAP_BLOCK * MACHINE_ALIGN); |
| 68 | + } |
| 69 | + |
| 70 | + // run benchmark |
| 71 | + c.bench_function("small alloc", |b| { |
| 72 | + b.iter(|| small_alloc(black_box(&HEAP_ALLOCATOR))) |
| 73 | + }); |
| 74 | + c.bench_function("large alloc", |b| { |
| 75 | + b.iter(|| large_alloc(black_box(&HEAP_ALLOCATOR))) |
| 76 | + }); |
| 77 | + c.bench_function("mutil thread alloc", |b| { |
| 78 | + b.iter(|| mutil_thread_alloc(black_box(&HEAP_ALLOCATOR))) |
| 79 | + }); |
| 80 | +} |
| 81 | + |
| 82 | +criterion_group!(benches, criterion_benchmark); |
| 83 | +criterion_main!(benches); |
0 commit comments