From a3bdfeaba66013b090ea00b51fb282c5b3aadd3d Mon Sep 17 00:00:00 2001 From: Matias Ezequiel Vara Larsen Date: Fri, 6 Jun 2025 10:50:53 +0200 Subject: [PATCH 1/4] Run kani as part of the CI pipeline Run kani as a part of the CI pipeline. In particular, run the proofs for virtio-queue. In some cases, kani may not finish so set a twenty minutes timeout. Signed-off-by: Matias Ezequiel Vara Larsen Signed-off-by: Siddharth Priya --- .buildkite/custom-tests.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.buildkite/custom-tests.json b/.buildkite/custom-tests.json index defc9d47..442b9059 100644 --- a/.buildkite/custom-tests.json +++ b/.buildkite/custom-tests.json @@ -48,6 +48,12 @@ "command": "cd fuzz && cargo test --package common --lib blk && cargo +nightly fuzz run blk -- -max_total_time=900 -timeout=60s", "platform": ["x86_64", "aarch64"], "timeout_in_minutes": 20 + }, + { + "test_name": "prove-virtio-queue", + "command": "cargo kani --package virtio-queue", + "platform": ["x86_64", "aarch64"], + "timeout_in_minutes": 20 } ] } From a8d0a95e2d5822d513190abfd223d1cb7997fdeb Mon Sep 17 00:00:00 2001 From: Matias Ezequiel Vara Larsen Date: Wed, 15 Jan 2025 14:15:36 +0100 Subject: [PATCH 2/4] Add proof for conformance to 2.7.7.2 section Add the verify_spec_2_7_7_2() proof to verify that the implementation of queue satisfies 2.7.7.2 requirement. The proof relies on whether the EVENT_IDX feature has been negotiated. Conversely with `test_needs_notification()` test, this proof `tests` for all possible values of the queue structure. Signed-off-by: Matias Ezequiel Vara Larsen Signed-off-by: Siddharth Priya --- virtio-queue/Cargo.toml | 12 ++ virtio-queue/src/queue.rs | 244 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 256 insertions(+) diff --git a/virtio-queue/Cargo.toml b/virtio-queue/Cargo.toml index b48f5610..6b58416d 100644 --- a/virtio-queue/Cargo.toml +++ b/virtio-queue/Cargo.toml @@ -23,6 +23,18 @@ criterion = "0.6.0" vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] } memoffset = "0.9.0" +[target.'cfg(kani)'.dependencies] +libc = "0.2.161" +vm-memory = { workspace = true, features = ["backend-mmap"] } + [[bench]] name = "main" harness = false + +# From https://model-checking.github.io/kani/usage.html#configuration-in-cargotoml +# +# Starting with Rust 1.80 (or nightly-2024-05-05), every reachable #[cfg] will be automatically +# checked that they match the expected config names and values. To avoid warnings on +# cfg(kani), we recommend adding the check-cfg lint config in your crate's Cargo.toml +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] } diff --git a/virtio-queue/src/queue.rs b/virtio-queue/src/queue.rs index 37a44d24..c6fa6195 100644 --- a/virtio-queue/src/queue.rs +++ b/virtio-queue/src/queue.rs @@ -269,6 +269,250 @@ impl Queue { } } +#[cfg(kani)] +#[allow(dead_code)] +mod verification { + use std::mem::ManuallyDrop; + use std::num::Wrapping; + + use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion}; + + use super::*; + + /// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real + /// `GuestMemoryMmap`, which manages a list of regions and then does a binary + /// search to determine which region a specific read or write request goes to, + /// this only uses a single region. Eliminating this binary search significantly + /// speeds up all queue proofs, because it eliminates the only loop contained herein, + /// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally, + /// it works identically to `GuestMemoryMmap` with only a single contained region. + pub struct ProofGuestMemory { + the_region: vm_memory::GuestRegionMmap, + } + + impl GuestMemory for ProofGuestMemory { + type R = vm_memory::GuestRegionMmap; + + fn num_regions(&self) -> usize { + 1 + } + + fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { + self.the_region + .to_region_addr(addr) + .map(|_| &self.the_region) + } + + fn iter(&self) -> impl Iterator { + std::iter::once(&self.the_region) + } + + fn try_access( + &self, + count: usize, + addr: GuestAddress, + mut f: F, + ) -> vm_memory::guest_memory::Result + where + F: FnMut( + usize, + usize, + MemoryRegionAddress, + &Self::R, + ) -> vm_memory::guest_memory::Result, + { + // We only have a single region, meaning a lot of the complications of the default + // try_access implementation for dealing with reads/writes across multiple + // regions does not apply. + let region_addr = self + .the_region + .to_region_addr(addr) + .ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?; + self.the_region + .checked_offset(region_addr, count) + .ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?; + f(0, count, region_addr, &self.the_region) + } + } + + pub struct ProofContext(pub Queue, pub ProofGuestMemory); + + pub struct MmapRegionStub { + addr: *mut u8, + size: usize, + bitmap: (), + file_offset: Option, + prot: i32, + flags: i32, + owned: bool, + hugetlbfs: Option, + } + + /// We start the first guest memory region at an offset so that harnesses using + /// Queue::any() will be exposed to queue segments both before and after valid guest memory. + /// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the + /// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0` + const GUEST_MEMORY_BASE: u64 = 0; + + // We size our guest memory to fit a properly aligned queue, plus some wiggles bytes + // to make sure we not only test queues where all segments are consecutively aligned. + // We need to give at least 16 bytes of buffer space for the descriptor table to be + // able to change its address, as it is 16-byte aligned. + const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30; + + fn guest_memory(memory: *mut u8) -> ProofGuestMemory { + // Ideally, we'd want to do + // let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE) + // .with_raw_mmap_pointer(bytes.as_mut_ptr()) + // .build() + // .unwrap()}; + // However, .build() calls to .build_raw(), which contains a call to libc::sysconf. + // Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust + // standard library using a special version of the libc crate, it runs into some problems + // [1] Even if we work around those problems, we run into performance problems [2]. + // Therefore, for now we stick to this ugly transmute hack (which only works because + // the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)). + // + // [1]: https://github.com/model-checking/kani/issues/2673 + // [2]: https://github.com/model-checking/kani/issues/2538 + let region_stub = MmapRegionStub { + addr: memory, + size: GUEST_MEMORY_SIZE, + bitmap: Default::default(), + file_offset: None, + prot: 0, + flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, + owned: false, + hugetlbfs: None, + }; + + let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) }; + + let guest_region = + vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap(); + + // Use a single memory region, just as firecracker does for guests of size < 2GB + // For largest guests, firecracker uses two regions (due to the MMIO gap being + // at the top of 32-bit address space) + ProofGuestMemory { + the_region: guest_region, + } + } + + // can't implement kani::Arbitrary for the relevant types due to orphan rules + fn setup_kani_guest_memory() -> ProofGuestMemory { + // Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now + // as `any_vec` will likely result in worse performance. We do not loose much from + // `exact_vec`, as our proofs do not make any assumptions about "filling" guest + // memory: Since everything is placed at non-deterministic addresses with + // non-deterministic lengths, we still cover all scenarios that would be covered by + // smaller guest memory closely. We leak the memory allocated here, so that it + // doesnt get deallocated at the end of this function. We do not explicitly + // de-allocate, but since this is a kani proof, that does not matter. + guest_memory( + ManuallyDrop::new(kani::vec::exact_vec::()).as_mut_ptr(), + ) + } + + const MAX_QUEUE_SIZE: u16 = 256; + + // Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting + // at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2 + // specification. + const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE; + + /// descriptor table has 16 bytes per entry, avail ring starts right after + const AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16; + + /// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE), + /// and needs 2 bytes of padding + const USED_RING_BASE_ADDRESS: u64 = AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2; + + /// The address of the first byte after the queue. Since our queue starts at guest physical + /// address 0, this is also the size of the memory area occupied by the queue. + /// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE + const QUEUE_END: u64 = USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64; + + impl kani::Arbitrary for ProofContext { + fn any() -> Self { + let mem = setup_kani_guest_memory(); + + let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap(); + + queue.ready = true; + + queue.set_desc_table_address( + Some(QUEUE_BASE_ADDRESS as u32), + Some((QUEUE_BASE_ADDRESS >> 32) as u32), + ); + + queue.set_avail_ring_address( + Some(AVAIL_RING_BASE_ADDRESS as u32), + Some((AVAIL_RING_BASE_ADDRESS >> 32) as u32), + ); + + queue.set_used_ring_address( + Some(USED_RING_BASE_ADDRESS as u32), + Some((USED_RING_BASE_ADDRESS >> 32) as u32), + ); + + queue.set_next_avail(kani::any()); + queue.set_next_used(kani::any()); + queue.set_event_idx(kani::any()); + queue.num_added = Wrapping(kani::any()); + + kani::assume(queue.is_valid(&mem)); + + ProofContext(queue, mem) + } + } + + #[kani::proof] + #[kani::unwind(0)] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place. + // This is a compiler intrinsic that has a "dummy" implementation in stdlib that just + // recursively calls itself. Kani will generally unwind this recursion infinitely + fn verify_spec_2_7_7_2() { + // Section 2.7.7.2 deals with device-to-driver notification suppression. + // It describes a mechanism by which the driver can tell the device that it does not + // want notifications (IRQs) about the device finishing processing individual buffers + // (descriptor chain heads) from the avail ring until a specific number of descriptors + // has been processed. This is done by the driver + // defining a "used_event" index, which tells the device "please do not notify me until + // used.ring[used_event] has been written to by you". + let ProofContext(mut queue, mem) = kani::any(); + + let num_added_old = queue.num_added.0; + let needs_notification = queue.needs_notification(&mem); + + // event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated + if !queue.event_idx_enabled { + // The specification here says + // After the device writes a descriptor index into the used ring: + // – If flags is 1, the device SHOULD NOT send a notification. + // – If flags is 0, the device MUST send a notification + // flags is the first field in the avail_ring_address, which we completely ignore. We + // always send a notification, and as there only is a SHOULD NOT, that is okay + assert!(needs_notification.unwrap()); + } else { + // next_used - 1 is where the previous descriptor was placed + if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap()) + == std::num::Wrapping(queue.next_used - Wrapping(1)) + && num_added_old > 0 + { + // If the idx field in the used ring (which determined where that descriptor index + // was placed) was equal to used_event, the device MUST send a + // notification. + assert!(needs_notification.unwrap()); + + kani::cover!(); + } + + // The other case is handled by a "SHOULD NOT send a notification" in the spec. + // So we do not care + } + } +} + impl<'a> QueueGuard<'a> for Queue { type G = &'a mut Self; } From 8c2669f61f48a8de1eee9e3a20c09a5e70e6eb6e Mon Sep 17 00:00:00 2001 From: Matias Ezequiel Vara Larsen Date: Wed, 15 Jan 2025 14:15:36 +0100 Subject: [PATCH 3/4] Add proof for conformance to 2.7.7.2 section Add the verify_spec_2_7_7_2() proof to verify that the implementation of queue satisfies 2.7.7.2 requirement. The proof relies on whether the EVENT_IDX feature has been negotiated. Conversely with `test_needs_notification()` test, this proof `tests` for all possible values of the queue structure. Signed-off-by: Matias Ezequiel Vara Larsen --- virtio-queue/Cargo.toml | 3 ++- virtio-queue/src/queue.rs | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/virtio-queue/Cargo.toml b/virtio-queue/Cargo.toml index 6b58416d..5ad9837d 100644 --- a/virtio-queue/Cargo.toml +++ b/virtio-queue/Cargo.toml @@ -13,10 +13,11 @@ edition = "2021" test-utils = [] [dependencies] -vm-memory = { workspace = true } +vm-memory = { workspace = true, features = ["backend-mmap", "backend-bitmap"] } vmm-sys-util = { workspace = true } log = "0.4.17" virtio-bindings = { path="../virtio-bindings", version = "0.2.5" } +libc = "0.2.161" [dev-dependencies] criterion = "0.6.0" diff --git a/virtio-queue/src/queue.rs b/virtio-queue/src/queue.rs index c6fa6195..1c614199 100644 --- a/virtio-queue/src/queue.rs +++ b/virtio-queue/src/queue.rs @@ -273,9 +273,12 @@ impl Queue { #[allow(dead_code)] mod verification { use std::mem::ManuallyDrop; + use vm_memory::MmapRegion; + use std::num::Wrapping; + use vm_memory::FileOffset; - use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion}; + use vm_memory::{GuestMemoryRegion, MemoryRegionAddress}; use super::*; From 7113f45da80fcb4140e3145bbd3bcd622869b560 Mon Sep 17 00:00:00 2001 From: Siddharth Priya Date: Wed, 11 Jun 2025 10:03:26 -0400 Subject: [PATCH 4/4] feat(kani): stub out guest memory methods for add_used GuestMemory trait brings in Bytes trait and therefore hardcodes impl for write_obj and store methods. This does not work well with Kani which now has to process these methods symbolically. The solution is to provide NOP impl for these methods. My current solution is to use kani stub macro to replace these methods by local stubs. Another solution can be to provide the proof writer a way to construct GuestMemory such that they can stub out methods as needed. Signed-off-by: Siddharth Priya --- virtio-queue/src/queue.rs | 77 +++++++++++++++++++++++++++++++++------ 1 file changed, 65 insertions(+), 12 deletions(-) diff --git a/virtio-queue/src/queue.rs b/virtio-queue/src/queue.rs index 1c614199..3716989c 100644 --- a/virtio-queue/src/queue.rs +++ b/virtio-queue/src/queue.rs @@ -269,6 +269,28 @@ impl Queue { } } +fn write_elem( + mem: &M, + addr: GuestAddress, + head_index: u16, + len: u32, +) -> Result<(), Error> { + mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr) + .map_err(Error::GuestMemory) +} + +fn store_elem( + mem: &M, + used_ring: GuestAddress, + next_used: u16, +) -> Result<(), Error> { + let addr = used_ring + .checked_add(2) + .ok_or(Error::AddressOverflow)?; + mem.store(u16::to_le(next_used), addr, Ordering::Release) + .map_err(Error::GuestMemory) +} + #[cfg(kani)] #[allow(dead_code)] mod verification { @@ -278,7 +300,7 @@ mod verification { use std::num::Wrapping; use vm_memory::FileOffset; - use vm_memory::{GuestMemoryRegion, MemoryRegionAddress}; + use vm_memory::{GuestRegionMmap, GuestMemoryRegion, MemoryRegionAddress, AtomicAccess, GuestMemory, GuestMemoryError}; use super::*; @@ -514,8 +536,47 @@ mod verification { // So we do not care } } -} + fn stub_write_elem( + _mem: &M, + _addr: GuestAddress, + _head_index: u16, + _len: u32,) -> Result<(), Error> { + Ok(()) + } + + fn stub_store_elem( + _mem: &M, + _used_ring: GuestAddress, + _next_used: u16,) -> Result<(), Error> { + Ok(()) + } + + #[kani::proof] + #[kani::unwind(0)] + #[kani::stub(write_elem, stub_write_elem)] + #[kani::stub(store_elem, stub_store_elem)] + fn verify_add_used() { + let ProofContext(mut queue, mem) = kani::any(); + let used_idx = queue.next_used; + + let used_desc_table_index = kani::any(); + if queue.add_used(&mem, used_desc_table_index, kani::any()).is_ok() { + assert_eq!(queue.next_used, used_idx + Wrapping(1)); + } else { + assert_eq!(queue.next_used, used_idx); + + // Ideally, here we would want to actually read the relevant values from memory and + // assert they are unchanged. However, kani will run out of memory if we try to do so, + // so we instead verify the following "proxy property": If an error happened, then + // it happened at the very beginning of add_used, meaning no memory accesses were + // done. This is relying on implementation details of add_used, namely that + // the check for out-of-bounds descriptor index happens at the very beginning of the + // function. + assert!(used_desc_table_index >= queue.size()); + } + } +} impl<'a> QueueGuard<'a> for Queue { type G = &'a mut Self; } @@ -713,20 +774,12 @@ impl QueueT for Queue { .used_ring .checked_add(offset) .ok_or(Error::AddressOverflow)?; - mem.write_obj(VirtqUsedElem::new(head_index.into(), len), addr) - .map_err(Error::GuestMemory)?; + write_elem(mem, addr, head_index, len)?; self.next_used += Wrapping(1); self.num_added += Wrapping(1); - mem.store( - u16::to_le(self.next_used.0), - self.used_ring - .checked_add(2) - .ok_or(Error::AddressOverflow)?, - Ordering::Release, - ) - .map_err(Error::GuestMemory) + store_elem(mem, self.used_ring, self.next_used.0) } fn enable_notification(&mut self, mem: &M) -> Result {