|
28 | 28 | //!
|
29 | 29 | //! C header: [`include/linux/workqueue.h`](../../../../include/linux/workqueue.h)
|
30 | 30 |
|
31 |
| -use crate::{bindings, prelude::*, sync::LockClassKey, types::Opaque}; |
| 31 | +use crate::{bindings, prelude::*, sync::Arc, sync::LockClassKey, types::Opaque}; |
| 32 | +use alloc::boxed::Box; |
32 | 33 | use core::marker::PhantomData;
|
| 34 | +use core::pin::Pin; |
33 | 35 |
|
34 | 36 | /// Creates a [`Work`] initialiser with the given name and a newly-created lock class.
|
35 | 37 | #[macro_export]
|
@@ -363,6 +365,99 @@ macro_rules! impl_has_work {
|
363 | 365 | )*};
|
364 | 366 | }
|
365 | 367 |
|
| 368 | +unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Arc<T> |
| 369 | +where |
| 370 | + T: WorkItem<ID, Pointer = Self>, |
| 371 | + T: HasWork<T, ID>, |
| 372 | +{ |
| 373 | + unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { |
| 374 | + // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`. |
| 375 | + let ptr = ptr as *mut Work<T, ID>; |
| 376 | + // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`. |
| 377 | + let ptr = unsafe { T::work_container_of(ptr) }; |
| 378 | + // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership. |
| 379 | + let arc = unsafe { Arc::from_raw(ptr) }; |
| 380 | + |
| 381 | + T::run(arc) |
| 382 | + } |
| 383 | +} |
| 384 | + |
| 385 | +unsafe impl<T, const ID: u64> RawWorkItem<ID> for Arc<T> |
| 386 | +where |
| 387 | + T: WorkItem<ID, Pointer = Self>, |
| 388 | + T: HasWork<T, ID>, |
| 389 | +{ |
| 390 | + type EnqueueOutput = Result<(), Self>; |
| 391 | + |
| 392 | + unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput |
| 393 | + where |
| 394 | + F: FnOnce(*mut bindings::work_struct) -> bool, |
| 395 | + { |
| 396 | + // Casting between const and mut is not a problem as long as the pointer is a raw pointer. |
| 397 | + let ptr = Arc::into_raw(self).cast_mut(); |
| 398 | + |
| 399 | + // SAFETY: Pointers into an `Arc` point at a valid value. |
| 400 | + let work_ptr = unsafe { T::raw_get_work(ptr) }; |
| 401 | + // SAFETY: `raw_get_work` returns a pointer to a valid value. |
| 402 | + let work_ptr = unsafe { Work::raw_get(work_ptr) }; |
| 403 | + |
| 404 | + if queue_work_on(work_ptr) { |
| 405 | + Ok(()) |
| 406 | + } else { |
| 407 | + // SAFETY: The work queue has not taken ownership of the pointer. |
| 408 | + Err(unsafe { Arc::from_raw(ptr) }) |
| 409 | + } |
| 410 | + } |
| 411 | +} |
| 412 | + |
| 413 | +unsafe impl<T, const ID: u64> WorkItemPointer<ID> for Pin<Box<T>> |
| 414 | +where |
| 415 | + T: WorkItem<ID, Pointer = Self>, |
| 416 | + T: HasWork<T, ID>, |
| 417 | +{ |
| 418 | + unsafe extern "C" fn run(ptr: *mut bindings::work_struct) { |
| 419 | + // SAFETY: The `__enqueue` method always uses a `work_struct` stored in a `Work<T, ID>`. |
| 420 | + let ptr = ptr as *mut Work<T, ID>; |
| 421 | + // SAFETY: This computes the pointer that `__enqueue` got from `Arc::into_raw`. |
| 422 | + let ptr = unsafe { T::work_container_of(ptr) }; |
| 423 | + // SAFETY: This pointer comes from `Arc::into_raw` and we've been given back ownership. |
| 424 | + let boxed = unsafe { Box::from_raw(ptr) }; |
| 425 | + // SAFETY: The box was already pinned when it was enqueued. |
| 426 | + let pinned = unsafe { Pin::new_unchecked(boxed) }; |
| 427 | + |
| 428 | + T::run(pinned) |
| 429 | + } |
| 430 | +} |
| 431 | + |
| 432 | +unsafe impl<T, const ID: u64> RawWorkItem<ID> for Pin<Box<T>> |
| 433 | +where |
| 434 | + T: WorkItem<ID, Pointer = Self>, |
| 435 | + T: HasWork<T, ID>, |
| 436 | +{ |
| 437 | + type EnqueueOutput = (); |
| 438 | + |
| 439 | + unsafe fn __enqueue<F>(self, queue_work_on: F) -> Self::EnqueueOutput |
| 440 | + where |
| 441 | + F: FnOnce(*mut bindings::work_struct) -> bool, |
| 442 | + { |
| 443 | + // SAFETY: We're not going to move `self` or any of its fields, so its okay to temporarily |
| 444 | + // remove the `Pin` wrapper. |
| 445 | + let boxed = unsafe { Pin::into_inner_unchecked(self) }; |
| 446 | + let ptr = Box::into_raw(boxed); |
| 447 | + |
| 448 | + // SAFETY: Pointers into a `Box` point at a valid value. |
| 449 | + let work_ptr = unsafe { T::raw_get_work(ptr) }; |
| 450 | + // SAFETY: `raw_get_work` returns a pointer to a valid value. |
| 451 | + let work_ptr = unsafe { Work::raw_get(work_ptr) }; |
| 452 | + |
| 453 | + if !queue_work_on(work_ptr) { |
| 454 | + // SAFETY: This method requires exclusive ownership of the box, so it cannot be in a |
| 455 | + // workqueue. |
| 456 | + unsafe { ::core::hint::unreachable_unchecked() } |
| 457 | + } |
| 458 | + } |
| 459 | +} |
| 460 | + |
366 | 461 | /// Returns the system work queue (`system_wq`).
|
367 | 462 | ///
|
368 | 463 | /// It is the one used by `schedule[_delayed]_work[_on]()`. Multi-CPU multi-threaded. There are
|
|
0 commit comments