Skip to content

Commit 9f4510e

Browse files
authored
Merge pull request #790 from wedsonaf/revocable
rust: use `MaybeUninit` instead of `ManuallyDrop` in revocable objects
2 parents d156172 + 21070cc commit 9f4510e

File tree

1 file changed

+18
-17
lines changed

1 file changed

+18
-17
lines changed

rust/kernel/revocable.rs

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ use crate::{bindings, sync::rcu};
99
use core::{
1010
cell::UnsafeCell,
1111
marker::PhantomData,
12-
mem::ManuallyDrop,
12+
mem::MaybeUninit,
1313
ops::Deref,
1414
ptr::drop_in_place,
1515
sync::atomic::{AtomicBool, Ordering},
@@ -63,32 +63,30 @@ use core::{
6363
/// v.revoke();
6464
/// assert_eq!(add_two(&v), None);
6565
/// ```
66-
pub struct Revocable<T: ?Sized> {
66+
pub struct Revocable<T> {
6767
is_available: AtomicBool,
68-
data: ManuallyDrop<UnsafeCell<T>>,
68+
data: MaybeUninit<UnsafeCell<T>>,
6969
}
7070

7171
// SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the
7272
// functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that
7373
// this isn't supported by the wrapped object.
74-
unsafe impl<T: ?Sized + Send> Send for Revocable<T> {}
74+
unsafe impl<T: Send> Send for Revocable<T> {}
7575

7676
// SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send`
7777
// from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop`
7878
// implementation of the wrapped object from an arbitrary thread.
79-
unsafe impl<T: ?Sized + Sync + Send> Sync for Revocable<T> {}
79+
unsafe impl<T: Sync + Send> Sync for Revocable<T> {}
8080

8181
impl<T> Revocable<T> {
8282
/// Creates a new revocable instance of the given data.
8383
pub const fn new(data: T) -> Self {
8484
Self {
8585
is_available: AtomicBool::new(true),
86-
data: ManuallyDrop::new(UnsafeCell::new(data)),
86+
data: MaybeUninit::new(UnsafeCell::new(data)),
8787
}
8888
}
89-
}
9089

91-
impl<T: ?Sized> Revocable<T> {
9290
/// Tries to access the \[revocable\] wrapped object.
9391
///
9492
/// Returns `None` if the object has been revoked and is therefore no longer accessible.
@@ -99,7 +97,9 @@ impl<T: ?Sized> Revocable<T> {
9997
pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
10098
let guard = rcu::read_lock();
10199
if self.is_available.load(Ordering::Relaxed) {
102-
Some(RevocableGuard::new(self.data.get(), guard))
100+
// SAFETY: Since `self.is_available` is true, data is initialised and has to remain
101+
// valid because the RCU read side lock prevents it from being dropped.
102+
Some(unsafe { RevocableGuard::new(self.data.assume_init_ref().get(), guard) })
103103
} else {
104104
None
105105
}
@@ -115,8 +115,9 @@ impl<T: ?Sized> Revocable<T> {
115115
/// object.
116116
pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> {
117117
if self.is_available.load(Ordering::Relaxed) {
118-
// SAFETY: Given that the RCU read side lock is held, data has to remain valid.
119-
Some(unsafe { &*self.data.get() })
118+
// SAFETY: Since `self.is_available` is true, data is initialised and has to remain
119+
// valid because the RCU read side lock prevents it from being dropped.
120+
Some(unsafe { &*self.data.assume_init_ref().get() })
120121
} else {
121122
None
122123
}
@@ -139,20 +140,20 @@ impl<T: ?Sized> Revocable<T> {
139140

140141
// SAFETY: We know `self.data` is valid because only one CPU can succeed the
141142
// `compare_exchange` above that takes `is_available` from `true` to `false`.
142-
unsafe { drop_in_place(self.data.get()) };
143+
unsafe { drop_in_place(self.data.assume_init_ref().get()) };
143144
}
144145
}
145146
}
146147

147-
impl<T: ?Sized> Drop for Revocable<T> {
148+
impl<T> Drop for Revocable<T> {
148149
fn drop(&mut self) {
149150
// Drop only if the data hasn't been revoked yet (in which case it has already been
150151
// dropped).
151152
if *self.is_available.get_mut() {
152153
// SAFETY: We know `self.data` is valid because no other CPU has changed
153154
// `is_available` to `false` yet, and no other CPU can do it anymore because this CPU
154155
// holds the only reference (mutable) to `self` now.
155-
unsafe { drop_in_place(self.data.get()) };
156+
unsafe { drop_in_place(self.data.assume_init_ref().get()) };
156157
}
157158
}
158159
}
@@ -165,13 +166,13 @@ impl<T: ?Sized> Drop for Revocable<T> {
165166
/// # Invariants
166167
///
167168
/// The RCU read-side lock is held while the guard is alive.
168-
pub struct RevocableGuard<'a, T: ?Sized> {
169+
pub struct RevocableGuard<'a, T> {
169170
data_ref: *const T,
170171
_rcu_guard: rcu::Guard,
171172
_p: PhantomData<&'a ()>,
172173
}
173174

174-
impl<T: ?Sized> RevocableGuard<'_, T> {
175+
impl<T> RevocableGuard<'_, T> {
175176
fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self {
176177
Self {
177178
data_ref,
@@ -181,7 +182,7 @@ impl<T: ?Sized> RevocableGuard<'_, T> {
181182
}
182183
}
183184

184-
impl<T: ?Sized> Deref for RevocableGuard<'_, T> {
185+
impl<T> Deref for RevocableGuard<'_, T> {
185186
type Target = T;
186187

187188
fn deref(&self) -> &Self::Target {

0 commit comments

Comments
 (0)