Skip to content

Commit 8d1f804

Browse files
MatiasVarapriyasiddharth
authored andcommitted
Add proof for conformance to 2.7.7.2 section
Add the verify_spec_2_7_7_2() proof to verify that the implementation of queue satisfies 2.7.7.2 requirement. The proof relies on whether the EVENT_IDX feature has been negotiated. Conversely with `test_needs_notification()` test, this proof `tests` for all possible values of the queue structure. Signed-off-by: Matias Ezequiel Vara Larsen <mvaralar@redhat.com> Signed-off-by: Siddharth Priya <s2priya@uwaterloo.ca>
1 parent a3bdfea commit 8d1f804

File tree

2 files changed

+258
-0
lines changed

2 files changed

+258
-0
lines changed

virtio-queue/Cargo.toml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,18 @@ criterion = "0.6.0"
2323
vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] }
2424
memoffset = "0.9.0"
2525

26+
[target.'cfg(kani)'.dependencies]
27+
libc = "0.2.161"
28+
vm-memory = { workspace = true, features = ["backend-mmap"] }
29+
2630
[[bench]]
2731
name = "main"
2832
harness = false
33+
34+
# From https://model-checking.github.io/kani/usage.html#configuration-in-cargotoml
35+
#
36+
# Starting with Rust 1.80 (or nightly-2024-05-05), every reachable #[cfg] will be automatically
37+
# checked that they match the expected config names and values. To avoid warnings on
38+
# cfg(kani), we recommend adding the check-cfg lint config in your crate's Cargo.toml
39+
[lints.rust]
40+
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }

virtio-queue/src/queue.rs

Lines changed: 246 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,252 @@ impl Queue {
269269
}
270270
}
271271

272+
#[cfg(kani)]
273+
#[allow(dead_code)]
274+
mod verification {
275+
use std::mem::ManuallyDrop;
276+
use std::num::Wrapping;
277+
278+
use vm_memory::FileOffset;
279+
use vm_memory::MmapRegion;
280+
use vm_memory::{GuestMemoryRegion, MemoryRegionAddress};
281+
282+
use super::*;
283+
284+
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
285+
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
286+
/// search to determine which region a specific read or write request goes to,
287+
/// this only uses a single region. Eliminating this binary search significantly
288+
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
289+
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
290+
/// it works identically to `GuestMemoryMmap` with only a single contained region.
291+
pub struct ProofGuestMemory {
292+
the_region: vm_memory::GuestRegionMmap,
293+
}
294+
295+
impl GuestMemory for ProofGuestMemory {
296+
type R = vm_memory::GuestRegionMmap;
297+
298+
fn num_regions(&self) -> usize {
299+
1
300+
}
301+
302+
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
303+
self.the_region
304+
.to_region_addr(addr)
305+
.map(|_| &self.the_region)
306+
}
307+
308+
fn iter(&self) -> impl Iterator<Item = &Self::R> {
309+
std::iter::once(&self.the_region)
310+
}
311+
312+
fn try_access<F>(
313+
&self,
314+
count: usize,
315+
addr: GuestAddress,
316+
mut f: F,
317+
) -> vm_memory::guest_memory::Result<usize>
318+
where
319+
F: FnMut(
320+
usize,
321+
usize,
322+
MemoryRegionAddress,
323+
&Self::R,
324+
) -> vm_memory::guest_memory::Result<usize>,
325+
{
326+
// We only have a single region, meaning a lot of the complications of the default
327+
// try_access implementation for dealing with reads/writes across multiple
328+
// regions does not apply.
329+
let region_addr = self
330+
.the_region
331+
.to_region_addr(addr)
332+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
333+
self.the_region
334+
.checked_offset(region_addr, count)
335+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
336+
f(0, count, region_addr, &self.the_region)
337+
}
338+
}
339+
340+
pub struct ProofContext(pub Queue, pub ProofGuestMemory);
341+
342+
pub struct MmapRegionStub {
343+
addr: *mut u8,
344+
size: usize,
345+
bitmap: (),
346+
file_offset: Option<FileOffset>,
347+
prot: i32,
348+
flags: i32,
349+
owned: bool,
350+
hugetlbfs: Option<bool>,
351+
}
352+
353+
/// We start the first guest memory region at an offset so that harnesses using
354+
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
355+
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
356+
/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
357+
const GUEST_MEMORY_BASE: u64 = 0;
358+
359+
// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
360+
// to make sure we not only test queues where all segments are consecutively aligned.
361+
// We need to give at least 16 bytes of buffer space for the descriptor table to be
362+
// able to change its address, as it is 16-byte aligned.
363+
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;
364+
365+
fn guest_memory(memory: *mut u8) -> ProofGuestMemory {
366+
// Ideally, we'd want to do
367+
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
368+
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
369+
// .build()
370+
// .unwrap()};
371+
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
372+
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
373+
// standard library using a special version of the libc crate, it runs into some problems
374+
// [1] Even if we work around those problems, we run into performance problems [2].
375+
// Therefore, for now we stick to this ugly transmute hack (which only works because
376+
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
377+
//
378+
// [1]: https://github.com/model-checking/kani/issues/2673
379+
// [2]: https://github.com/model-checking/kani/issues/2538
380+
let region_stub = MmapRegionStub {
381+
addr: memory,
382+
size: GUEST_MEMORY_SIZE,
383+
bitmap: Default::default(),
384+
file_offset: None,
385+
prot: 0,
386+
flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
387+
owned: false,
388+
hugetlbfs: None,
389+
};
390+
391+
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
392+
393+
let guest_region =
394+
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
395+
396+
// Use a single memory region, just as firecracker does for guests of size < 2GB
397+
// For largest guests, firecracker uses two regions (due to the MMIO gap being
398+
// at the top of 32-bit address space)
399+
ProofGuestMemory {
400+
the_region: guest_region,
401+
}
402+
}
403+
404+
// can't implement kani::Arbitrary for the relevant types due to orphan rules
405+
fn setup_kani_guest_memory() -> ProofGuestMemory {
406+
// Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
407+
// as `any_vec` will likely result in worse performance. We do not loose much from
408+
// `exact_vec`, as our proofs do not make any assumptions about "filling" guest
409+
// memory: Since everything is placed at non-deterministic addresses with
410+
// non-deterministic lengths, we still cover all scenarios that would be covered by
411+
// smaller guest memory closely. We leak the memory allocated here, so that it
412+
// doesnt get deallocated at the end of this function. We do not explicitly
413+
// de-allocate, but since this is a kani proof, that does not matter.
414+
guest_memory(
415+
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
416+
)
417+
}
418+
419+
const MAX_QUEUE_SIZE: u16 = 256;
420+
421+
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
422+
// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
423+
// specification.
424+
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;
425+
426+
/// descriptor table has 16 bytes per entry, avail ring starts right after
427+
const AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16;
428+
429+
/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
430+
/// and needs 2 bytes of padding
431+
const USED_RING_BASE_ADDRESS: u64 = AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2;
432+
433+
/// The address of the first byte after the queue. Since our queue starts at guest physical
434+
/// address 0, this is also the size of the memory area occupied by the queue.
435+
/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
436+
const QUEUE_END: u64 = USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64;
437+
438+
impl kani::Arbitrary for ProofContext {
439+
fn any() -> Self {
440+
let mem = setup_kani_guest_memory();
441+
442+
let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
443+
444+
queue.ready = true;
445+
446+
queue.set_desc_table_address(
447+
Some(QUEUE_BASE_ADDRESS as u32),
448+
Some((QUEUE_BASE_ADDRESS >> 32) as u32),
449+
);
450+
451+
queue.set_avail_ring_address(
452+
Some(AVAIL_RING_BASE_ADDRESS as u32),
453+
Some((AVAIL_RING_BASE_ADDRESS >> 32) as u32),
454+
);
455+
456+
queue.set_used_ring_address(
457+
Some(USED_RING_BASE_ADDRESS as u32),
458+
Some((USED_RING_BASE_ADDRESS >> 32) as u32),
459+
);
460+
461+
queue.set_next_avail(kani::any());
462+
queue.set_next_used(kani::any());
463+
queue.set_event_idx(kani::any());
464+
queue.num_added = Wrapping(kani::any());
465+
466+
kani::assume(queue.is_valid(&mem));
467+
468+
ProofContext(queue, mem)
469+
}
470+
}
471+
472+
#[kani::proof]
473+
#[kani::unwind(0)] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
474+
// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
475+
// recursively calls itself. Kani will generally unwind this recursion infinitely
476+
fn verify_spec_2_7_7_2() {
477+
// Section 2.7.7.2 deals with device-to-driver notification suppression.
478+
// It describes a mechanism by which the driver can tell the device that it does not
479+
// want notifications (IRQs) about the device finishing processing individual buffers
480+
// (descriptor chain heads) from the avail ring until a specific number of descriptors
481+
// has been processed. This is done by the driver
482+
// defining a "used_event" index, which tells the device "please do not notify me until
483+
// used.ring[used_event] has been written to by you".
484+
let ProofContext(mut queue, mem) = kani::any();
485+
486+
let num_added_old = queue.num_added.0;
487+
let needs_notification = queue.needs_notification(&mem);
488+
489+
// event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
490+
if !queue.event_idx_enabled {
491+
// The specification here says
492+
// After the device writes a descriptor index into the used ring:
493+
// – If flags is 1, the device SHOULD NOT send a notification.
494+
// – If flags is 0, the device MUST send a notification
495+
// flags is the first field in the avail_ring_address, which we completely ignore. We
496+
// always send a notification, and as there only is a SHOULD NOT, that is okay
497+
assert!(needs_notification.unwrap());
498+
} else {
499+
// next_used - 1 is where the previous descriptor was placed
500+
if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap())
501+
== std::num::Wrapping(queue.next_used - Wrapping(1))
502+
&& num_added_old > 0
503+
{
504+
// If the idx field in the used ring (which determined where that descriptor index
505+
// was placed) was equal to used_event, the device MUST send a
506+
// notification.
507+
assert!(needs_notification.unwrap());
508+
509+
kani::cover!();
510+
}
511+
512+
// The other case is handled by a "SHOULD NOT send a notification" in the spec.
513+
// So we do not care
514+
}
515+
}
516+
}
517+
272518
impl<'a> QueueGuard<'a> for Queue {
273519
type G = &'a mut Self;
274520
}

0 commit comments

Comments
 (0)