@@ -269,6 +269,282 @@ impl Queue {
269
269
}
270
270
}
271
271
272
+ #[ cfg( kani) ]
273
+ mod verification {
274
+ use std:: mem:: ManuallyDrop ;
275
+ use std:: num:: Wrapping ;
276
+
277
+ use vm_memory:: { FileOffset , GuestMemoryRegion , MemoryRegionAddress , MmapRegion } ;
278
+
279
+ use super :: * ;
280
+
281
+ /// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
282
+ /// `GuestMemoryMmap`, which manages a list of regions and then does a binary
283
+ /// search to determine which region a specific read or write request goes to,
284
+ /// this only uses a single region. Eliminating this binary search significantly
285
+ /// speeds up all queue proofs, because it eliminates the only loop contained herein,
286
+ /// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
287
+ /// it works identically to `GuestMemoryMmap` with only a single contained region.
288
+ pub struct SingleRegionGuestMemory {
289
+ the_region : vm_memory:: GuestRegionMmap ,
290
+ }
291
+
292
+ impl GuestMemory for SingleRegionGuestMemory {
293
+ type R = vm_memory:: GuestRegionMmap ;
294
+
295
+ fn num_regions ( & self ) -> usize {
296
+ 1
297
+ }
298
+
299
+ fn find_region ( & self , addr : GuestAddress ) -> Option < & Self :: R > {
300
+ self . the_region
301
+ . to_region_addr ( addr)
302
+ . map ( |_| & self . the_region )
303
+ }
304
+
305
+ fn iter ( & self ) -> impl Iterator < Item = & Self :: R > {
306
+ std:: iter:: once ( & self . the_region )
307
+ }
308
+
309
+ fn try_access < F > (
310
+ & self ,
311
+ count : usize ,
312
+ addr : GuestAddress ,
313
+ mut f : F ,
314
+ ) -> vm_memory:: guest_memory:: Result < usize >
315
+ where
316
+ F : FnMut (
317
+ usize ,
318
+ usize ,
319
+ MemoryRegionAddress ,
320
+ & Self :: R ,
321
+ ) -> vm_memory:: guest_memory:: Result < usize > ,
322
+ {
323
+ // We only have a single region, meaning a lot of the complications of the default
324
+ // try_access implementation for dealing with reads/writes across multiple
325
+ // regions does not apply.
326
+ let region_addr = self
327
+ . the_region
328
+ . to_region_addr ( addr)
329
+ . ok_or ( vm_memory:: guest_memory:: Error :: InvalidGuestAddress ( addr) ) ?;
330
+ self . the_region
331
+ . checked_offset ( region_addr, count)
332
+ . ok_or ( vm_memory:: guest_memory:: Error :: InvalidGuestAddress ( addr) ) ?;
333
+ f ( 0 , count, region_addr, & self . the_region )
334
+ }
335
+ }
336
+
337
+ impl kani:: Arbitrary for SingleRegionGuestMemory {
338
+ fn any ( ) -> Self {
339
+ guest_memory (
340
+ ManuallyDrop :: new ( kani:: vec:: exact_vec :: < u8 , GUEST_MEMORY_SIZE > ( ) ) . as_mut_ptr ( ) ,
341
+ )
342
+ }
343
+ }
344
+ pub struct ProofContext {
345
+ pub queue : Queue ,
346
+ pub memory : SingleRegionGuestMemory ,
347
+ }
348
+
349
+ pub struct MmapRegionStub {
350
+ _addr : * mut u8 ,
351
+ _size : usize ,
352
+ _bitmap : ( ) ,
353
+ _file_offset : Option < FileOffset > ,
354
+ _prot : i32 ,
355
+ _flags : i32 ,
356
+ _owned : bool ,
357
+ _hugetlbfs : Option < bool > ,
358
+ }
359
+
360
+ /// We start the first guest memory region at an offset so that harnesses using
361
+ /// Queue::any() will be exposed to queue segments both before and after valid guest memory.
362
+ /// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
363
+ /// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
364
+ const GUEST_MEMORY_BASE : u64 = 0 ;
365
+
366
+ // We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
367
+ // to make sure we not only test queues where all segments are consecutively aligned.
368
+ // We need to give at least 16 bytes of buffer space for the descriptor table to be
369
+ // able to change its address, as it is 16-byte aligned.
370
+ const GUEST_MEMORY_SIZE : usize = QUEUE_END as usize + 30 ;
371
+
372
+ fn guest_memory ( memory : * mut u8 ) -> SingleRegionGuestMemory {
373
+ // Ideally, we'd want to do
374
+ // let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
375
+ // .with_raw_mmap_pointer(bytes.as_mut_ptr())
376
+ // .build()
377
+ // .unwrap()};
378
+ // However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
379
+ // Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
380
+ // standard library using a special version of the libc crate, it runs into some problems
381
+ // [1] Even if we work around those problems, we run into performance problems [2].
382
+ // Therefore, for now we stick to this ugly transmute hack (which only works because
383
+ // the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
384
+ //
385
+ // [1]: https://github.com/model-checking/kani/issues/2673
386
+ // [2]: https://github.com/model-checking/kani/issues/2538
387
+ let region_stub = MmapRegionStub {
388
+ _addr : memory,
389
+ _size : GUEST_MEMORY_SIZE ,
390
+ _bitmap : Default :: default ( ) ,
391
+ _file_offset : None ,
392
+ _prot : 0 ,
393
+ _flags : libc:: MAP_ANONYMOUS | libc:: MAP_PRIVATE ,
394
+ _owned : false ,
395
+ _hugetlbfs : None ,
396
+ } ;
397
+
398
+ let region: MmapRegion < ( ) > = unsafe { std:: mem:: transmute ( region_stub) } ;
399
+
400
+ let guest_region =
401
+ vm_memory:: GuestRegionMmap :: new ( region, GuestAddress ( GUEST_MEMORY_BASE ) ) . unwrap ( ) ;
402
+
403
+ // Use a single memory region for guests of size < 2GB
404
+ SingleRegionGuestMemory {
405
+ the_region : guest_region,
406
+ }
407
+ }
408
+
409
+ const MAX_QUEUE_SIZE : u16 = 4 ;
410
+
411
+ // Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
412
+ // at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
413
+ // specification.
414
+ const QUEUE_BASE_ADDRESS : u64 = GUEST_MEMORY_BASE ;
415
+
416
+ /// descriptor table has 16 bytes per entry, avail ring starts right after
417
+ const MAX_START_AVAIL_RING_BASE_ADDRESS : u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16 ;
418
+
419
+ /// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
420
+ /// and needs 2 bytes of padding
421
+ const MAX_START_USED_RING_BASE_ADDRESS : u64 =
422
+ MAX_START_AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2 ;
423
+
424
+ /// The address of the first byte after the queue. Since our queue starts at guest physical
425
+ /// address 0, this is also the size of the memory area occupied by the queue.
426
+ /// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
427
+ const QUEUE_END : u64 = MAX_START_USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64 ;
428
+
429
+ impl kani:: Arbitrary for ProofContext {
430
+ fn any ( ) -> Self {
431
+ // descriptor table has 16 bytes per entry, avail ring starts right after
432
+ let desc_tbl_queue_size = kani:: any :: < u16 > ( ) ;
433
+ // Alignment of the descriptor table is 16 bytes as per the VirtIO spec.
434
+ // See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
435
+ kani:: assume (
436
+ desc_tbl_queue_size <= 16 * MAX_QUEUE_SIZE && ( desc_tbl_queue_size & 0xF == 0 ) ,
437
+ ) ;
438
+ let avail_ring_base_address: u64 = QUEUE_BASE_ADDRESS + desc_tbl_queue_size as u64 ;
439
+
440
+ // Used ring starts after avail ring (which has max size 6 + 2 * MAX_QUEUE_SIZE),
441
+ // and needs 2 bytes of padding
442
+ let avail_ring_queue_size = kani:: any :: < u16 > ( ) ;
443
+ // Alignment of the available ring is 2 bytes as per the VirtIO spec.
444
+ // See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
445
+ kani:: assume (
446
+ avail_ring_queue_size <= 6 + 2 * MAX_QUEUE_SIZE + 2
447
+ && ( avail_ring_queue_size & 0x1 == 0 ) ,
448
+ ) ;
449
+ let used_ring_base_address: u64 =
450
+ avail_ring_base_address + avail_ring_queue_size as u64 ;
451
+
452
+ // The address of the first byte after the queue. Since our queue starts at guest physical
453
+ // address 0, this is also the size of the memory area occupied by the queue.
454
+ // Note that the used ring structure has max size 6 + 8 * MAX_QUEUE_SIZE
455
+ let used_ring_queue_size = kani:: any :: < u16 > ( ) ;
456
+ // Alignment of the used ring is 4 bytes as per the VirtIO spec.
457
+ // See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf
458
+ kani:: assume (
459
+ avail_ring_queue_size <= 6 + 8 * MAX_QUEUE_SIZE
460
+ && ( used_ring_queue_size & 0x3 == 0 ) ,
461
+ ) ;
462
+
463
+ // The size of the queue data structures should fill the available space
464
+ kani:: assume ( QUEUE_END == used_ring_base_address + used_ring_queue_size as u64 ) ;
465
+
466
+ let mem = SingleRegionGuestMemory :: any ( ) ;
467
+
468
+ let mut queue = Queue :: new ( MAX_QUEUE_SIZE ) . unwrap ( ) ;
469
+
470
+ queue. ready = true ;
471
+
472
+ queue. set_desc_table_address (
473
+ Some ( QUEUE_BASE_ADDRESS as u32 ) ,
474
+ Some ( ( QUEUE_BASE_ADDRESS >> 32 ) as u32 ) ,
475
+ ) ;
476
+
477
+ queue. set_avail_ring_address (
478
+ Some ( avail_ring_base_address as u32 ) ,
479
+ Some ( ( avail_ring_base_address >> 32 ) as u32 ) ,
480
+ ) ;
481
+
482
+ queue. set_used_ring_address (
483
+ Some ( used_ring_base_address as u32 ) ,
484
+ Some ( ( used_ring_base_address >> 32 ) as u32 ) ,
485
+ ) ;
486
+
487
+ queue. set_next_avail ( kani:: any ( ) ) ;
488
+ queue. set_next_used ( kani:: any ( ) ) ;
489
+ queue. set_event_idx ( kani:: any ( ) ) ;
490
+ queue. num_added = Wrapping ( kani:: any ( ) ) ;
491
+
492
+ kani:: assume ( queue. is_valid ( & mem) ) ;
493
+
494
+ ProofContext { queue, memory : mem }
495
+ }
496
+ }
497
+
498
+ #[ kani:: proof]
499
+ // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
500
+ // This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
501
+ // recursively calls itself. Kani will generally unwind this recursion infinitely.
502
+ #[ kani:: unwind( 0 ) ]
503
+ fn verify_spec_2_7_7_2 ( ) {
504
+ // Section 2.7.7.2 deals with device-to-driver notification suppression.
505
+ // It describes a mechanism by which the driver can tell the device that it does not
506
+ // want notifications (IRQs) about the device finishing processing individual buffers
507
+ // (descriptor chain heads) from the avail ring until a specific number of descriptors
508
+ // has been processed. This is done by the driver
509
+ // defining a "used_event" index, which tells the device "please do not notify me until
510
+ // used.ring[used_event] has been written to by you".
511
+ let ProofContext {
512
+ mut queue,
513
+ memory : mem,
514
+ } = kani:: any ( ) ;
515
+
516
+ let num_added_old = queue. num_added . 0 ;
517
+ let needs_notification = queue. needs_notification ( & mem) ;
518
+
519
+ // event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
520
+ if !queue. event_idx_enabled {
521
+ // The specification here says
522
+ // After the device writes a descriptor index into the used ring:
523
+ // – If flags is 1, the device SHOULD NOT send a notification.
524
+ // – If flags is 0, the device MUST send a notification
525
+ // flags is the first field in the avail_ring_address, which we completely ignore. We
526
+ // always send a notification, and as there only is a SHOULD NOT, that is okay
527
+ assert ! ( needs_notification. unwrap( ) ) ;
528
+ } else {
529
+ // next_used - 1 is where the previous descriptor was placed
530
+ if Wrapping ( queue. used_event ( & mem, Ordering :: Relaxed ) . unwrap ( ) )
531
+ == std:: num:: Wrapping ( queue. next_used - Wrapping ( 1 ) )
532
+ && num_added_old > 0
533
+ {
534
+ // If the idx field in the used ring (which determined where that descriptor index
535
+ // was placed) was equal to used_event, the device MUST send a
536
+ // notification.
537
+ assert ! ( needs_notification. unwrap( ) ) ;
538
+
539
+ kani:: cover!( ) ;
540
+ }
541
+
542
+ // The other case is handled by a "SHOULD NOT send a notification" in the spec.
543
+ // So we do not care
544
+ }
545
+ }
546
+ }
547
+
272
548
impl < ' a > QueueGuard < ' a > for Queue {
273
549
type G = & ' a mut Self ;
274
550
}
0 commit comments