Skip to content

Commit 8c9d013

Browse files
bonzinijiangliu
authored andcommitted
atomic: introduce GuestMemoryAtomic
GuestMemoryAtomic provides an efficient mutable GuestAddressSpace implementation that wraps a GuestMemory type M and uses ArcSwap to support RCU-like snapshotting of the memory map. For more background discussion, please refer to #43 This patch is a shared work with Liu Jiang <gerry@linux.alibaba.com>, with precious inputs from Alexandru Agache <aagch@amazon.com> as well. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 1d68e61 commit 8c9d013

File tree

5 files changed

+228
-1
lines changed

5 files changed

+228
-1
lines changed

Cargo.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,11 @@ edition = "2018"
1010
default = []
1111
integer-atomics = []
1212
backend-mmap = []
13+
backend-atomic = ["arc-swap"]
1314

1415
[dependencies]
1516
libc = ">=0.2.39"
17+
arc-swap = { version = ">=0.4.4", optional = true }
1618

1719
[target.'cfg(windows)'.dependencies.winapi]
1820
version = ">=0.3"

coverage_config.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
{
22
"coverage_score": 84.3,
33
"exclude_path": "mmap_windows.rs",
4-
"crate_features": "backend-mmap"
4+
"crate_features": "backend-mmap,backend-atomic"
55
}

src/atomic.rs

Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2+
// Copyright (C) 2020 Red Hat, Inc. All rights reserved.
3+
// SPDX-License-Identifier: Apache-2.0
4+
5+
//! A wrapper over an `ArcSwap<GuestMemory>` struct to support RCU-style mutability.
6+
//!
7+
//! With the `backend-atomic` feature enabled, simply replacing `GuestMemoryMmap`
8+
//! with `GuestMemoryAtomic<GuestMemoryMmap>` will enable support for mutable memory maps.
9+
//! To support mutable memory maps, devices will also need to use
10+
//! `GuestAddressSpace::memory()` to gain temporary access to guest memory.
11+
12+
extern crate arc_swap;
13+
14+
use arc_swap::{ArcSwap, Guard};
15+
use std::ops::Deref;
16+
use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError};
17+
18+
use crate::{GuestAddressSpace, GuestMemory};
19+
20+
/// A fast implementation of a mutable collection of memory regions.
21+
///
22+
/// This implementation uses ArcSwap to provide RCU-like snapshotting of the memory map:
23+
/// every update of the memory map creates a completely new GuestMemory object, and
24+
/// readers will not be blocked because the copies they retrieved will be collected once
25+
/// no one can access them anymore. Under the assumption that updates to the memory map
26+
/// are rare, this allows a very efficient implementation of the `memory()` method.
27+
#[derive(Clone, Debug)]
28+
pub struct GuestMemoryAtomic<M: GuestMemory> {
29+
// GuestAddressSpace<M>, which we want to implement, is basically a drop-in
30+
// replacement for &M. Therefore, we need to pass to devices the GuestMemoryAtomic
31+
// rather than a reference to it. To obtain this effect we wrap the actual fields
32+
// of GuestMemoryAtomic with an Arc, and derive the Clone trait. See the
33+
// documentation for GuestAddressSpace for an example.
34+
inner: Arc<(ArcSwap<M>, Mutex<()>)>,
35+
}
36+
37+
impl<M: GuestMemory> From<Arc<M>> for GuestMemoryAtomic<M> {
38+
/// create a new GuestMemoryAtomic object whose initial contents come from
39+
/// the `map` reference counted GuestMemory.
40+
fn from(map: Arc<M>) -> Self {
41+
let inner = (ArcSwap::new(map), Mutex::new(()));
42+
GuestMemoryAtomic {
43+
inner: Arc::new(inner),
44+
}
45+
}
46+
}
47+
48+
impl<M: GuestMemory> GuestMemoryAtomic<M> {
49+
/// create a new GuestMemoryAtomic object whose initial contents come from
50+
/// the `map` GuestMemory.
51+
pub fn new(map: M) -> Self {
52+
Arc::new(map).into()
53+
}
54+
55+
fn load(&self) -> Guard<'static, Arc<M>> {
56+
self.inner.0.load()
57+
}
58+
59+
/// Acquires the update mutex for the GuestMemoryAtomic, blocking the current
60+
/// thread until it is able to do so. The returned RAII guard allows for
61+
/// scoped unlock of the mutex (that is, the mutex will be unlocked when
62+
/// the guard goes out of scope), and optionally also for replacing the
63+
/// contents of the GuestMemoryAtomic when the lock is dropped.
64+
pub fn lock(&self) -> LockResult<GuestMemoryExclusiveGuard<M>> {
65+
match self.inner.1.lock() {
66+
Ok(guard) => Ok(GuestMemoryExclusiveGuard {
67+
parent: self,
68+
_guard: guard,
69+
}),
70+
Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard {
71+
parent: self,
72+
_guard: err.into_inner(),
73+
})),
74+
}
75+
}
76+
}
77+
78+
impl<M: GuestMemory> GuestAddressSpace for GuestMemoryAtomic<M> {
79+
type T = GuestMemoryLoadGuard<M>;
80+
type M = M;
81+
82+
fn memory(&self) -> Self::T {
83+
GuestMemoryLoadGuard { guard: self.load() }
84+
}
85+
}
86+
87+
/// A guard that provides temporary access to a GuestMemoryAtomic. This
88+
/// object is returned from the `memory()` method. It dereference to
89+
/// a snapshot of the GuestMemory, so it can be used transparently to
90+
/// access memory.
91+
#[derive(Debug)]
92+
pub struct GuestMemoryLoadGuard<M: GuestMemory> {
93+
guard: Guard<'static, Arc<M>>,
94+
}
95+
96+
impl<M: GuestMemory> GuestMemoryLoadGuard<M> {
97+
/// Make a clone of the held pointer and returns it. This is more
98+
/// expensive than just using the snapshot, but it allows to hold on
99+
/// to the snapshot outside the scope of the guard. It also allows
100+
/// writers to proceed, so it is recommended if the reference must
101+
/// be held for a long time (including for caching purposes).
102+
pub fn into_inner(self) -> Arc<M> {
103+
Guard::into_inner(self.guard)
104+
}
105+
}
106+
107+
impl<M: GuestMemory> Deref for GuestMemoryLoadGuard<M> {
108+
type Target = M;
109+
110+
fn deref(&self) -> &Self::Target {
111+
&*self.guard
112+
}
113+
}
114+
115+
/// An RAII implementation of a "scoped lock" for GuestMemoryAtomic. When
116+
/// this structure is dropped (falls out of scope) the lock will be unlocked,
117+
/// possibly after updating the memory map represented by the
118+
/// GuestMemoryAtomic that created the guard.
119+
pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> {
120+
parent: &'a GuestMemoryAtomic<M>,
121+
_guard: MutexGuard<'a, ()>,
122+
}
123+
124+
impl<M: GuestMemory> GuestMemoryExclusiveGuard<'_, M> {
125+
/// Replace the memory map in the GuestMemoryAtomic that created the guard
126+
/// with the new memory map, `map`. The lock is then dropped since this
127+
/// method consumes the guard.
128+
pub fn replace(self, map: M) {
129+
self.parent.inner.0.store(Arc::new(map))
130+
}
131+
}
132+
133+
#[cfg(test)]
134+
#[cfg(feature = "backend-mmap")]
135+
mod tests {
136+
use super::*;
137+
use crate::{
138+
GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion, GuestMemoryResult,
139+
GuestUsize,
140+
};
141+
142+
type GuestMemoryMmapAtomic = GuestMemoryAtomic<GuestMemoryMmap>;
143+
144+
#[test]
145+
fn test_atomic_memory() {
146+
let region_size = 0x400;
147+
let regions = vec![
148+
(GuestAddress(0x0), region_size),
149+
(GuestAddress(0x1000), region_size),
150+
];
151+
let mut iterated_regions = Vec::new();
152+
let gmm = GuestMemoryMmap::from_ranges(&regions).unwrap();
153+
let gm = GuestMemoryMmapAtomic::new(gmm);
154+
let mem = gm.memory();
155+
156+
let res: GuestMemoryResult<()> = mem.with_regions(|_, region| {
157+
assert_eq!(region.len(), region_size as GuestUsize);
158+
Ok(())
159+
});
160+
assert!(res.is_ok());
161+
let res: GuestMemoryResult<()> = mem.with_regions_mut(|_, region| {
162+
iterated_regions.push((region.start_addr(), region.len() as usize));
163+
Ok(())
164+
});
165+
assert!(res.is_ok());
166+
assert_eq!(regions, iterated_regions);
167+
assert_eq!(mem.num_regions(), 2);
168+
assert!(mem.find_region(GuestAddress(0x1000)).is_some());
169+
assert!(mem.find_region(GuestAddress(0x10000)).is_none());
170+
171+
assert!(regions
172+
.iter()
173+
.map(|x| (x.0, x.1))
174+
.eq(iterated_regions.iter().map(|x| *x)));
175+
176+
let mem2 = mem.into_inner();
177+
let res: GuestMemoryResult<()> = mem2.with_regions(|_, region| {
178+
assert_eq!(region.len(), region_size as GuestUsize);
179+
Ok(())
180+
});
181+
assert!(res.is_ok());
182+
let res: GuestMemoryResult<()> = mem2.with_regions_mut(|_, _| Ok(()));
183+
assert!(res.is_ok());
184+
assert_eq!(mem2.num_regions(), 2);
185+
assert!(mem2.find_region(GuestAddress(0x1000)).is_some());
186+
assert!(mem2.find_region(GuestAddress(0x10000)).is_none());
187+
188+
assert!(regions
189+
.iter()
190+
.map(|x| (x.0, x.1))
191+
.eq(iterated_regions.iter().map(|x| *x)));
192+
193+
let mem3 = mem2.memory();
194+
let res: GuestMemoryResult<()> = mem3.with_regions(|_, region| {
195+
assert_eq!(region.len(), region_size as GuestUsize);
196+
Ok(())
197+
});
198+
assert!(res.is_ok());
199+
let res: GuestMemoryResult<()> = mem3.with_regions_mut(|_, _| Ok(()));
200+
assert!(res.is_ok());
201+
assert_eq!(mem3.num_regions(), 2);
202+
assert!(mem3.find_region(GuestAddress(0x1000)).is_some());
203+
assert!(mem3.find_region(GuestAddress(0x10000)).is_none());
204+
}
205+
}

src/guest_memory.rs

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,8 @@ pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
264264
/// # use std::sync::Arc;
265265
/// # #[cfg(feature = "backend-mmap")]
266266
/// # use vm_memory::GuestMemoryMmap;
267+
/// # #[cfg(feature = "backend-atomic")]
268+
/// # use vm_memory::GuestMemoryAtomic;
267269
/// # use vm_memory::{GuestAddress, GuestMemory, GuestAddressSpace};
268270
///
269271
/// pub struct VirtioDevice<AS: GuestAddressSpace> {
@@ -295,6 +297,19 @@ pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
295297
/// VirtioDevice::new();
296298
/// another.activate(&mmap);
297299
/// # }
300+
///
301+
/// # #[cfg(all(feature = "backend-mmap", feature = "backend-atomic"))]
302+
/// # fn test_2() {
303+
/// // Using `VirtioDevice` with a mutable GuestMemoryMmap:
304+
/// let mut for_mutable_mmap: VirtioDevice<GuestMemoryAtomic<GuestMemoryMmap>> =
305+
/// VirtioDevice::new();
306+
/// let atomic = GuestMemoryAtomic::new(get_mmap());
307+
/// for_mutable_mmap.activate(atomic.clone());
308+
/// let mut another: VirtioDevice<GuestMemoryAtomic<GuestMemoryMmap>> =
309+
/// VirtioDevice::new();
310+
/// another.activate(atomic.clone());
311+
/// // atomic can be modified here...
312+
/// # }
298313
/// ```
299314
300315
pub trait GuestAddressSpace {

src/lib.rs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,11 @@ pub mod mmap;
4545
#[cfg(feature = "backend-mmap")]
4646
pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion};
4747

48+
#[cfg(feature = "backend-atomic")]
49+
pub mod atomic;
50+
#[cfg(feature = "backend-atomic")]
51+
pub use atomic::{GuestMemoryAtomic, GuestMemoryLoadGuard};
52+
4853
pub mod volatile_memory;
4954
pub use volatile_memory::{
5055
AtomicValued, Error as VolatileMemoryError, Result as VolatileMemoryResult, VolatileArrayRef,

0 commit comments

Comments
 (0)