Skip to content

Commit 643eb0c

Browse files
MatiasVarastefano-garzarella
authored andcommitted
Add proof for conformance to 2.7.7.2 section
Add the verify_spec_2_7_7_2() proof to verify that the implementation of queue satisfies 2.7.7.2 requirement. The proof relies on whether the EVENT_IDX feature has been negotiated. Conversely with `test_needs_notification()` test, this proof `tests` for all possible values of the queue structure. Signed-off-by: Matias Ezequiel Vara Larsen <mvaralar@redhat.com> Signed-off-by: Siddharth Priya <s2priya@uwaterloo.ca>
1 parent 2751e66 commit 643eb0c

File tree

2 files changed

+290
-0
lines changed

2 files changed

+290
-0
lines changed

virtio-queue/Cargo.toml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,18 @@ criterion = "0.6.0"
2323
vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] }
2424
memoffset = "0.9.0"
2525

26+
[target.'cfg(kani)'.dependencies]
27+
libc = "0.2.161"
28+
vm-memory = { workspace = true, features = ["backend-mmap"] }
29+
2630
[[bench]]
2731
name = "main"
2832
harness = false
33+
34+
# From https://model-checking.github.io/kani/usage.html#configuration-in-cargotoml
35+
#
36+
# Starting with Rust 1.80 (or nightly-2024-05-05), every reachable #[cfg] will be automatically
37+
# checked that they match the expected config names and values. To avoid warnings on
38+
# cfg(kani), we recommend adding the check-cfg lint config in your crate's Cargo.toml
39+
[lints.rust]
40+
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }

virtio-queue/src/queue.rs

Lines changed: 278 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,284 @@ impl Queue {
269269
}
270270
}
271271

272+
#[cfg(kani)]
273+
#[allow(dead_code)]
274+
mod verification {
275+
use std::mem::ManuallyDrop;
276+
use std::num::Wrapping;
277+
278+
use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion};
279+
280+
use super::*;
281+
282+
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
283+
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
284+
/// search to determine which region a specific read or write request goes to,
285+
/// this only uses a single region. Eliminating this binary search significantly
286+
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
287+
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
288+
/// it works identically to `GuestMemoryMmap` with only a single contained region.
289+
pub struct ProofGuestMemory {
290+
the_region: vm_memory::GuestRegionMmap,
291+
}
292+
293+
impl GuestMemory for ProofGuestMemory {
294+
type R = vm_memory::GuestRegionMmap;
295+
296+
fn num_regions(&self) -> usize {
297+
1
298+
}
299+
300+
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
301+
self.the_region
302+
.to_region_addr(addr)
303+
.map(|_| &self.the_region)
304+
}
305+
306+
fn iter(&self) -> impl Iterator<Item = &Self::R> {
307+
std::iter::once(&self.the_region)
308+
}
309+
310+
fn try_access<F>(
311+
&self,
312+
count: usize,
313+
addr: GuestAddress,
314+
mut f: F,
315+
) -> vm_memory::guest_memory::Result<usize>
316+
where
317+
F: FnMut(
318+
usize,
319+
usize,
320+
MemoryRegionAddress,
321+
&Self::R,
322+
) -> vm_memory::guest_memory::Result<usize>,
323+
{
324+
// We only have a single region, meaning a lot of the complications of the default
325+
// try_access implementation for dealing with reads/writes across multiple
326+
// regions does not apply.
327+
let region_addr = self
328+
.the_region
329+
.to_region_addr(addr)
330+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
331+
self.the_region
332+
.checked_offset(region_addr, count)
333+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
334+
f(0, count, region_addr, &self.the_region)
335+
}
336+
}
337+
338+
pub struct ProofContext(pub Queue, pub ProofGuestMemory);
339+
340+
pub struct MmapRegionStub {
341+
addr: *mut u8,
342+
size: usize,
343+
bitmap: (),
344+
file_offset: Option<FileOffset>,
345+
prot: i32,
346+
flags: i32,
347+
owned: bool,
348+
hugetlbfs: Option<bool>,
349+
}
350+
351+
/// We start the first guest memory region at an offset so that harnesses using
352+
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
353+
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
354+
/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
355+
const GUEST_MEMORY_BASE: u64 = 0;
356+
357+
// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
358+
// to make sure we not only test queues where all segments are consecutively aligned.
359+
// We need to give at least 16 bytes of buffer space for the descriptor table to be
360+
// able to change its address, as it is 16-byte aligned.
361+
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;
362+
363+
fn guest_memory(memory: *mut u8) -> ProofGuestMemory {
364+
// Ideally, we'd want to do
365+
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
366+
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
367+
// .build()
368+
// .unwrap()};
369+
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
370+
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
371+
// standard library using a special version of the libc crate, it runs into some problems
372+
// [1] Even if we work around those problems, we run into performance problems [2].
373+
// Therefore, for now we stick to this ugly transmute hack (which only works because
374+
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
375+
//
376+
// [1]: https://github.com/model-checking/kani/issues/2673
377+
// [2]: https://github.com/model-checking/kani/issues/2538
378+
let region_stub = MmapRegionStub {
379+
addr: memory,
380+
size: GUEST_MEMORY_SIZE,
381+
bitmap: Default::default(),
382+
file_offset: None,
383+
prot: 0,
384+
flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
385+
owned: false,
386+
hugetlbfs: None,
387+
};
388+
389+
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
390+
391+
let guest_region =
392+
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
393+
394+
// Use a single memory region for guests of size < 2GB
395+
ProofGuestMemory {
396+
the_region: guest_region,
397+
}
398+
}
399+
400+
// can't implement kani::Arbitrary for the relevant types due to orphan rules
401+
fn setup_kani_guest_memory() -> ProofGuestMemory {
402+
// Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
403+
// as `any_vec` will likely result in worse performance. We do not loose much from
404+
// `exact_vec`, as our proofs do not make any assumptions about "filling" guest
405+
// memory: Since everything is placed at non-deterministic addresses with
406+
// non-deterministic lengths, we still cover all scenarios that would be covered by
407+
// smaller guest memory closely. We leak the memory allocated here, so that it
408+
// doesnt get deallocated at the end of this function. We do not explicitly
409+
// de-allocate, but since this is a kani proof, that does not matter.
410+
guest_memory(
411+
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
412+
)
413+
}
414+
415+
const MAX_QUEUE_SIZE: u16 = 4;
416+
417+
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
418+
// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
419+
// specification.
420+
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;
421+
422+
/// descriptor table has 16 bytes per entry, avail ring starts right after
423+
const MAX_START_AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16;
424+
425+
/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
426+
/// and needs 2 bytes of padding
427+
const MAX_START_USED_RING_BASE_ADDRESS: u64 =
428+
MAX_START_AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2;
429+
430+
/// The address of the first byte after the queue. Since our queue starts at guest physical
431+
/// address 0, this is also the size of the memory area occupied by the queue.
432+
/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
433+
const QUEUE_END: u64 = MAX_START_USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64;
434+
435+
impl kani::Arbitrary for ProofContext {
436+
fn any() -> Self {
437+
// descriptor table has 16 bytes per entry, avail ring starts right after
438+
let desc_tbl_queue_size = kani::any::<u16>();
439+
// Alignment of the descriptor table is 16 bytes as per the VirtIO spec.
440+
// See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
441+
kani::assume(
442+
desc_tbl_queue_size <= 16 * MAX_QUEUE_SIZE && (desc_tbl_queue_size & 0xF == 0),
443+
);
444+
let avail_ring_base_address: u64 = QUEUE_BASE_ADDRESS + desc_tbl_queue_size as u64;
445+
446+
// Used ring starts after avail ring (which has max size 6 + 2 * MAX_QUEUE_SIZE),
447+
// and needs 2 bytes of padding
448+
let avail_ring_queue_size = kani::any::<u16>();
449+
// Alignment of the available ring is 2 bytes as per the VirtIO spec.
450+
// See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
451+
kani::assume(
452+
avail_ring_queue_size <= 6 + 2 * MAX_QUEUE_SIZE + 2
453+
&& (avail_ring_queue_size & 0x1 == 0),
454+
);
455+
let used_ring_base_address: u64 =
456+
avail_ring_base_address + avail_ring_queue_size as u64;
457+
458+
// The address of the first byte after the queue. Since our queue starts at guest physical
459+
// address 0, this is also the size of the memory area occupied by the queue.
460+
// Note that the used ring structure has max size 6 + 8 * MAX_QUEUE_SIZE
461+
let used_ring_queue_size = kani::any::<u16>();
462+
// Alignment of the used ring is 4 bytes as per the VirtIO spec.
463+
// See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
464+
kani::assume(
465+
avail_ring_queue_size <= 6 + 8 * MAX_QUEUE_SIZE
466+
&& (used_ring_queue_size & 0x3 == 0),
467+
);
468+
469+
// The size of the queue data structures should fill the available space
470+
kani::assume(QUEUE_END == used_ring_base_address + used_ring_queue_size as u64);
471+
472+
let mem = setup_kani_guest_memory();
473+
474+
let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
475+
476+
queue.ready = true;
477+
478+
queue.set_desc_table_address(
479+
Some(QUEUE_BASE_ADDRESS as u32),
480+
Some((QUEUE_BASE_ADDRESS >> 32) as u32),
481+
);
482+
483+
queue.set_avail_ring_address(
484+
Some(avail_ring_base_address as u32),
485+
Some((avail_ring_base_address >> 32) as u32),
486+
);
487+
488+
queue.set_used_ring_address(
489+
Some(used_ring_base_address as u32),
490+
Some((used_ring_base_address >> 32) as u32),
491+
);
492+
493+
queue.set_next_avail(kani::any());
494+
queue.set_next_used(kani::any());
495+
queue.set_event_idx(kani::any());
496+
queue.num_added = Wrapping(kani::any());
497+
498+
kani::assume(queue.is_valid(&mem));
499+
500+
ProofContext(queue, mem)
501+
}
502+
}
503+
504+
#[kani::proof]
505+
#[kani::unwind(0)] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
506+
// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
507+
// recursively calls itself. Kani will generally unwind this recursion infinitely
508+
fn verify_spec_2_7_7_2() {
509+
// Section 2.7.7.2 deals with device-to-driver notification suppression.
510+
// It describes a mechanism by which the driver can tell the device that it does not
511+
// want notifications (IRQs) about the device finishing processing individual buffers
512+
// (descriptor chain heads) from the avail ring until a specific number of descriptors
513+
// has been processed. This is done by the driver
514+
// defining a "used_event" index, which tells the device "please do not notify me until
515+
// used.ring[used_event] has been written to by you".
516+
let ProofContext(mut queue, mem) = kani::any();
517+
518+
let num_added_old = queue.num_added.0;
519+
let needs_notification = queue.needs_notification(&mem);
520+
521+
// event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
522+
if !queue.event_idx_enabled {
523+
// The specification here says
524+
// After the device writes a descriptor index into the used ring:
525+
// – If flags is 1, the device SHOULD NOT send a notification.
526+
// – If flags is 0, the device MUST send a notification
527+
// flags is the first field in the avail_ring_address, which we completely ignore. We
528+
// always send a notification, and as there only is a SHOULD NOT, that is okay
529+
assert!(needs_notification.unwrap());
530+
} else {
531+
// next_used - 1 is where the previous descriptor was placed
532+
if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap())
533+
== std::num::Wrapping(queue.next_used - Wrapping(1))
534+
&& num_added_old > 0
535+
{
536+
// If the idx field in the used ring (which determined where that descriptor index
537+
// was placed) was equal to used_event, the device MUST send a
538+
// notification.
539+
assert!(needs_notification.unwrap());
540+
541+
kani::cover!();
542+
}
543+
544+
// The other case is handled by a "SHOULD NOT send a notification" in the spec.
545+
// So we do not care
546+
}
547+
}
548+
}
549+
272550
impl<'a> QueueGuard<'a> for Queue {
273551
type G = &'a mut Self;
274552
}

0 commit comments

Comments
 (0)