|
| 1 | +// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. |
| 2 | +// Copyright (C) 2020 Red Hat, Inc. All rights reserved. |
| 3 | +// SPDX-License-Identifier: Apache-2.0 |
| 4 | + |
| 5 | +//! A wrapper over an `ArcSwap<GuestMemory>` struct to support RCU-style mutability. |
| 6 | +//! |
| 7 | +//! With the `backend-atomic` feature enabled, simply replacing `GuestMemoryMmap` |
| 8 | +//! with `GuestMemoryAtomic<GuestMemoryMmap>` will enable support for mutable memory maps. |
| 9 | +//! To support mutable memory maps, devices will also need to use |
| 10 | +//! `GuestAddressSpace::memory()` to gain temporary access to guest memory. |
| 11 | +
|
| 12 | +extern crate arc_swap; |
| 13 | + |
| 14 | +use arc_swap::{ArcSwap, Guard}; |
| 15 | +use std::ops::Deref; |
| 16 | +use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; |
| 17 | + |
| 18 | +use crate::{GuestAddressSpace, GuestMemory}; |
| 19 | + |
| 20 | +/// A fast implementation of a mutable collection of memory regions. |
| 21 | +/// |
| 22 | +/// This implementation uses ArcSwap to provide RCU-like snapshotting of the memory map: |
| 23 | +/// every update of the memory map creates a completely new GuestMemory object, and |
| 24 | +/// readers will not be blocked because the copies they retrieved will be collected once |
| 25 | +/// no one can access them anymore. Under the assumption that updates to the memory map |
| 26 | +/// are rare, this allows a very efficient implementation of the `memory()` method. |
| 27 | +#[derive(Clone, Debug)] |
| 28 | +pub struct GuestMemoryAtomic<M: GuestMemory> { |
| 29 | + // GuestAddressSpace<M>, which we want to implement, is basically a drop-in |
| 30 | + // replacement for &M. Therefore, we need to pass to devices the GuestMemoryAtomic |
| 31 | + // rather than a reference to it. To obtain this effect we wrap the actual fields |
| 32 | + // of GuestMemoryAtomic with an Arc, and derive the Clone trait. See the |
| 33 | + // documentation for GuestAddressSpace for an example. |
| 34 | + inner: Arc<(ArcSwap<M>, Mutex<()>)>, |
| 35 | +} |
| 36 | + |
| 37 | +impl<M: GuestMemory> From<Arc<M>> for GuestMemoryAtomic<M> { |
| 38 | + /// create a new GuestMemoryAtomic object whose initial contents come from |
| 39 | + /// the `map` reference counted GuestMemory. |
| 40 | + fn from(map: Arc<M>) -> Self { |
| 41 | + let inner = (ArcSwap::new(map), Mutex::new(())); |
| 42 | + GuestMemoryAtomic { |
| 43 | + inner: Arc::new(inner), |
| 44 | + } |
| 45 | + } |
| 46 | +} |
| 47 | + |
| 48 | +impl<M: GuestMemory> GuestMemoryAtomic<M> { |
| 49 | + /// create a new GuestMemoryAtomic object whose initial contents come from |
| 50 | + /// the `map` GuestMemory. |
| 51 | + pub fn new(map: M) -> Self { |
| 52 | + Arc::new(map).into() |
| 53 | + } |
| 54 | + |
| 55 | + fn load(&self) -> Guard<'static, Arc<M>> { |
| 56 | + self.inner.0.load() |
| 57 | + } |
| 58 | + |
| 59 | + /// Acquires the update mutex for the GuestMemoryAtomic, blocking the current |
| 60 | + /// thread until it is able to do so. The returned RAII guard allows for |
| 61 | + /// scoped unlock of the mutex (that is, the mutex will be unlocked when |
| 62 | + /// the guard goes out of scope), and optionally also for replacing the |
| 63 | + /// contents of the GuestMemoryAtomic when the lock is dropped. |
| 64 | + pub fn lock(&self) -> LockResult<GuestMemoryExclusiveGuard<M>> { |
| 65 | + match self.inner.1.lock() { |
| 66 | + Ok(guard) => Ok(GuestMemoryExclusiveGuard { |
| 67 | + parent: self, |
| 68 | + _guard: guard, |
| 69 | + }), |
| 70 | + Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard { |
| 71 | + parent: self, |
| 72 | + _guard: err.into_inner(), |
| 73 | + })), |
| 74 | + } |
| 75 | + } |
| 76 | +} |
| 77 | + |
| 78 | +impl<M: GuestMemory> GuestAddressSpace for GuestMemoryAtomic<M> { |
| 79 | + type T = GuestMemoryLoadGuard<M>; |
| 80 | + type M = M; |
| 81 | + |
| 82 | + fn memory(&self) -> Self::T { |
| 83 | + GuestMemoryLoadGuard { guard: self.load() } |
| 84 | + } |
| 85 | +} |
| 86 | + |
| 87 | +/// A guard that provides temporary access to a GuestMemoryAtomic. This |
| 88 | +/// object is returned from the `memory()` method. It dereference to |
| 89 | +/// a snapshot of the GuestMemory, so it can be used transparently to |
| 90 | +/// access memory. |
| 91 | +#[derive(Debug)] |
| 92 | +pub struct GuestMemoryLoadGuard<M: GuestMemory> { |
| 93 | + guard: Guard<'static, Arc<M>>, |
| 94 | +} |
| 95 | + |
| 96 | +impl<M: GuestMemory> GuestMemoryLoadGuard<M> { |
| 97 | + /// Make a clone of the held pointer and returns it. This is more |
| 98 | + /// expensive than just using the snapshot, but it allows to hold on |
| 99 | + /// to the snapshot outside the scope of the guard. It also allows |
| 100 | + /// writers to proceed, so it is recommended if the reference must |
| 101 | + /// be held for a long time (including for caching purposes). |
| 102 | + pub fn into_inner(self) -> Arc<M> { |
| 103 | + Guard::into_inner(self.guard) |
| 104 | + } |
| 105 | +} |
| 106 | + |
| 107 | +impl<M: GuestMemory> Deref for GuestMemoryLoadGuard<M> { |
| 108 | + type Target = M; |
| 109 | + |
| 110 | + fn deref(&self) -> &Self::Target { |
| 111 | + &*self.guard |
| 112 | + } |
| 113 | +} |
| 114 | + |
| 115 | +/// An RAII implementation of a "scoped lock" for GuestMemoryAtomic. When |
| 116 | +/// this structure is dropped (falls out of scope) the lock will be unlocked, |
| 117 | +/// possibly after updating the memory map represented by the |
| 118 | +/// GuestMemoryAtomic that created the guard. |
| 119 | +pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> { |
| 120 | + parent: &'a GuestMemoryAtomic<M>, |
| 121 | + _guard: MutexGuard<'a, ()>, |
| 122 | +} |
| 123 | + |
| 124 | +impl<M: GuestMemory> GuestMemoryExclusiveGuard<'_, M> { |
| 125 | + /// Replace the memory map in the GuestMemoryAtomic that created the guard |
| 126 | + /// with the new memory map, `map`. The lock is then dropped since this |
| 127 | + /// method consumes the guard. |
| 128 | + pub fn replace(self, map: M) { |
| 129 | + self.parent.inner.0.store(Arc::new(map)) |
| 130 | + } |
| 131 | +} |
| 132 | + |
| 133 | +#[cfg(test)] |
| 134 | +#[cfg(feature = "backend-mmap")] |
| 135 | +mod tests { |
| 136 | + use super::*; |
| 137 | + use crate::{ |
| 138 | + GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion, GuestMemoryResult, |
| 139 | + GuestUsize, |
| 140 | + }; |
| 141 | + |
| 142 | + type GuestMemoryMmapAtomic = GuestMemoryAtomic<GuestMemoryMmap>; |
| 143 | + |
| 144 | + #[test] |
| 145 | + fn test_atomic_memory() { |
| 146 | + let region_size = 0x400; |
| 147 | + let regions = vec![ |
| 148 | + (GuestAddress(0x0), region_size), |
| 149 | + (GuestAddress(0x1000), region_size), |
| 150 | + ]; |
| 151 | + let mut iterated_regions = Vec::new(); |
| 152 | + let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); |
| 153 | + let gm = GuestMemoryMmapAtomic::new(gmm); |
| 154 | + let mem = gm.memory(); |
| 155 | + |
| 156 | + let res: GuestMemoryResult<()> = mem.with_regions(|_, region| { |
| 157 | + assert_eq!(region.len(), region_size as GuestUsize); |
| 158 | + Ok(()) |
| 159 | + }); |
| 160 | + assert!(res.is_ok()); |
| 161 | + let res: GuestMemoryResult<()> = mem.with_regions_mut(|_, region| { |
| 162 | + iterated_regions.push((region.start_addr(), region.len() as usize)); |
| 163 | + Ok(()) |
| 164 | + }); |
| 165 | + assert!(res.is_ok()); |
| 166 | + assert_eq!(regions, iterated_regions); |
| 167 | + assert_eq!(mem.num_regions(), 2); |
| 168 | + assert!(mem.find_region(GuestAddress(0x1000)).is_some()); |
| 169 | + assert!(mem.find_region(GuestAddress(0x10000)).is_none()); |
| 170 | + |
| 171 | + assert!(regions |
| 172 | + .iter() |
| 173 | + .map(|x| (x.0, x.1)) |
| 174 | + .eq(iterated_regions.iter().map(|x| *x))); |
| 175 | + |
| 176 | + let mem2 = mem.into_inner(); |
| 177 | + let res: GuestMemoryResult<()> = mem2.with_regions(|_, region| { |
| 178 | + assert_eq!(region.len(), region_size as GuestUsize); |
| 179 | + Ok(()) |
| 180 | + }); |
| 181 | + assert!(res.is_ok()); |
| 182 | + let res: GuestMemoryResult<()> = mem2.with_regions_mut(|_, _| Ok(())); |
| 183 | + assert!(res.is_ok()); |
| 184 | + assert_eq!(mem2.num_regions(), 2); |
| 185 | + assert!(mem2.find_region(GuestAddress(0x1000)).is_some()); |
| 186 | + assert!(mem2.find_region(GuestAddress(0x10000)).is_none()); |
| 187 | + |
| 188 | + assert!(regions |
| 189 | + .iter() |
| 190 | + .map(|x| (x.0, x.1)) |
| 191 | + .eq(iterated_regions.iter().map(|x| *x))); |
| 192 | + |
| 193 | + let mem3 = mem2.memory(); |
| 194 | + let res: GuestMemoryResult<()> = mem3.with_regions(|_, region| { |
| 195 | + assert_eq!(region.len(), region_size as GuestUsize); |
| 196 | + Ok(()) |
| 197 | + }); |
| 198 | + assert!(res.is_ok()); |
| 199 | + let res: GuestMemoryResult<()> = mem3.with_regions_mut(|_, _| Ok(())); |
| 200 | + assert!(res.is_ok()); |
| 201 | + assert_eq!(mem3.num_regions(), 2); |
| 202 | + assert!(mem3.find_region(GuestAddress(0x1000)).is_some()); |
| 203 | + assert!(mem3.find_region(GuestAddress(0x10000)).is_none()); |
| 204 | + } |
| 205 | +} |
0 commit comments