|
1 | 1 | //! Event handling types.
|
2 | 2 |
|
3 | 3 | use crate as bevy_ecs;
|
| 4 | +use crate::batching::BatchingStrategy; |
4 | 5 | use crate::change_detection::MutUntyped;
|
5 | 6 | use crate::{
|
6 | 7 | change_detection::{DetectChangesMut, Mut},
|
@@ -30,7 +31,7 @@ pub trait Event: Send + Sync + 'static {}
|
30 | 31 | /// An `EventId` uniquely identifies an event stored in a specific [`World`].
|
31 | 32 | ///
|
32 | 33 | /// An `EventId` can among other things be used to trace the flow of an event from the point it was
|
33 |
| -/// sent to the point it was processed. |
| 34 | +/// sent to the point it was processed. `EventId`s increase montonically by send order. |
34 | 35 | ///
|
35 | 36 | /// [`World`]: crate::world::World
|
36 | 37 | pub struct EventId<E: Event> {
|
@@ -446,6 +447,46 @@ impl<'w, 's, E: Event> EventReader<'w, 's, E> {
|
446 | 447 | self.reader.read_with_id(&self.events)
|
447 | 448 | }
|
448 | 449 |
|
| 450 | + /// Returns a parallel iterator over the events this [`EventReader`] has not seen yet. |
| 451 | + /// See also [`for_each`](EventParIter::for_each). |
| 452 | + /// |
| 453 | + /// # Example |
| 454 | + /// ``` |
| 455 | + /// # use bevy_ecs::prelude::*; |
| 456 | + /// # use std::sync::atomic::{AtomicUsize, Ordering}; |
| 457 | + /// |
| 458 | + /// #[derive(Event)] |
| 459 | + /// struct MyEvent { |
| 460 | + /// value: usize, |
| 461 | + /// } |
| 462 | + /// |
| 463 | + /// #[derive(Resource, Default)] |
| 464 | + /// struct Counter(AtomicUsize); |
| 465 | + /// |
| 466 | + /// // setup |
| 467 | + /// let mut world = World::new(); |
| 468 | + /// world.init_resource::<Events<MyEvent>>(); |
| 469 | + /// world.insert_resource(Counter::default()); |
| 470 | + /// |
| 471 | + /// let mut schedule = Schedule::default(); |
| 472 | + /// schedule.add_systems(|mut events: EventReader<MyEvent>, counter: Res<Counter>| { |
| 473 | + /// events.par_read().for_each(|MyEvent { value }| { |
| 474 | + /// counter.0.fetch_add(*value, Ordering::Relaxed); |
| 475 | + /// }); |
| 476 | + /// }); |
| 477 | + /// for value in 0..100 { |
| 478 | + /// world.send_event(MyEvent { value }); |
| 479 | + /// } |
| 480 | + /// schedule.run(&mut world); |
| 481 | + /// let Counter(counter) = world.remove_resource::<Counter>().unwrap(); |
| 482 | + /// // all events were processed |
| 483 | + /// assert_eq!(counter.into_inner(), 4950); |
| 484 | + /// ``` |
| 485 | + /// |
| 486 | + pub fn par_read(&mut self) -> EventParIter<'_, E> { |
| 487 | + self.reader.par_read(&self.events) |
| 488 | + } |
| 489 | + |
449 | 490 | /// Determines the number of events available to be read from this [`EventReader`] without consuming any.
|
450 | 491 | pub fn len(&self) -> usize {
|
451 | 492 | self.reader.len(&self.events)
|
@@ -647,6 +688,11 @@ impl<E: Event> ManualEventReader<E> {
|
647 | 688 | EventIteratorWithId::new(self, events)
|
648 | 689 | }
|
649 | 690 |
|
| 691 | + /// See [`EventReader::par_read`] |
| 692 | + pub fn par_read<'a>(&'a mut self, events: &'a Events<E>) -> EventParIter<'a, E> { |
| 693 | + EventParIter::new(self, events) |
| 694 | + } |
| 695 | + |
650 | 696 | /// See [`EventReader::len`]
|
651 | 697 | pub fn len(&self, events: &Events<E>) -> usize {
|
652 | 698 | // The number of events in this reader is the difference between the most recent event
|
@@ -810,6 +856,135 @@ impl<'a, E: Event> ExactSizeIterator for EventIteratorWithId<'a, E> {
|
810 | 856 | }
|
811 | 857 | }
|
812 | 858 |
|
| 859 | +/// A parallel iterator over `Event`s. |
| 860 | +#[derive(Debug)] |
| 861 | +pub struct EventParIter<'a, E: Event> { |
| 862 | + reader: &'a mut ManualEventReader<E>, |
| 863 | + slices: [&'a [EventInstance<E>]; 2], |
| 864 | + batching_strategy: BatchingStrategy, |
| 865 | +} |
| 866 | + |
| 867 | +impl<'a, E: Event> EventParIter<'a, E> { |
| 868 | + /// Creates a new parallel iterator over `events` that have not yet been seen by `reader`. |
| 869 | + pub fn new(reader: &'a mut ManualEventReader<E>, events: &'a Events<E>) -> Self { |
| 870 | + let a_index = reader |
| 871 | + .last_event_count |
| 872 | + .saturating_sub(events.events_a.start_event_count); |
| 873 | + let b_index = reader |
| 874 | + .last_event_count |
| 875 | + .saturating_sub(events.events_b.start_event_count); |
| 876 | + let a = events.events_a.get(a_index..).unwrap_or_default(); |
| 877 | + let b = events.events_b.get(b_index..).unwrap_or_default(); |
| 878 | + |
| 879 | + let unread_count = a.len() + b.len(); |
| 880 | + // Ensure `len` is implemented correctly |
| 881 | + debug_assert_eq!(unread_count, reader.len(events)); |
| 882 | + reader.last_event_count = events.event_count - unread_count; |
| 883 | + |
| 884 | + Self { |
| 885 | + reader, |
| 886 | + slices: [a, b], |
| 887 | + batching_strategy: BatchingStrategy::default(), |
| 888 | + } |
| 889 | + } |
| 890 | + |
| 891 | + /// Changes the batching strategy used when iterating. |
| 892 | + /// |
| 893 | + /// For more information on how this affects the resultant iteration, see |
| 894 | + /// [`BatchingStrategy`]. |
| 895 | + pub fn batching_strategy(mut self, strategy: BatchingStrategy) -> Self { |
| 896 | + self.batching_strategy = strategy; |
| 897 | + self |
| 898 | + } |
| 899 | + |
| 900 | + /// Runs the provided closure for each unread event in parallel. |
| 901 | + /// |
| 902 | + /// Unlike normal iteration, the event order is not guaranteed in any form. |
| 903 | + /// |
| 904 | + /// # Panics |
| 905 | + /// If the [`ComputeTaskPool`] is not initialized. If using this from an event reader that is being |
| 906 | + /// initialized and run from the ECS scheduler, this should never panic. |
| 907 | + /// |
| 908 | + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool |
| 909 | + pub fn for_each<FN: Fn(&'a E) + Send + Sync + Clone>(self, func: FN) { |
| 910 | + self.for_each_with_id(move |e, _| func(e)); |
| 911 | + } |
| 912 | + |
| 913 | + /// Runs the provided closure for each unread event in parallel, like [`for_each`](Self::for_each), |
| 914 | + /// but additionally provides the `EventId` to the closure. |
| 915 | + /// |
| 916 | + /// Note that the order of iteration is not guaranteed, but `EventId`s are ordered by send order. |
| 917 | + /// |
| 918 | + /// # Panics |
| 919 | + /// If the [`ComputeTaskPool`] is not initialized. If using this from an event reader that is being |
| 920 | + /// initialized and run from the ECS scheduler, this should never panic. |
| 921 | + /// |
| 922 | + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool |
| 923 | + pub fn for_each_with_id<FN: Fn(&'a E, EventId<E>) + Send + Sync + Clone>(self, func: FN) { |
| 924 | + #[cfg(any(target_arch = "wasm32", not(feature = "multi-threaded")))] |
| 925 | + { |
| 926 | + self.into_iter().for_each(|(e, i)| func(e, i)); |
| 927 | + } |
| 928 | + |
| 929 | + #[cfg(all(not(target_arch = "wasm32"), feature = "multi-threaded"))] |
| 930 | + { |
| 931 | + let pool = bevy_tasks::ComputeTaskPool::get(); |
| 932 | + let thread_count = pool.thread_num(); |
| 933 | + if thread_count <= 1 { |
| 934 | + return self.into_iter().for_each(|(e, i)| func(e, i)); |
| 935 | + } |
| 936 | + |
| 937 | + let batch_size = self |
| 938 | + .batching_strategy |
| 939 | + .calc_batch_size(|| self.len(), thread_count); |
| 940 | + let chunks = self.slices.map(|s| s.chunks_exact(batch_size)); |
| 941 | + let remainders = chunks.each_ref().map(|c| c.remainder()); |
| 942 | + |
| 943 | + pool.scope(|scope| { |
| 944 | + for batch in chunks.into_iter().flatten().chain(remainders) { |
| 945 | + let func = func.clone(); |
| 946 | + scope.spawn(async move { |
| 947 | + for event in batch { |
| 948 | + func(&event.event, event.event_id); |
| 949 | + } |
| 950 | + }); |
| 951 | + } |
| 952 | + }); |
| 953 | + } |
| 954 | + } |
| 955 | + |
| 956 | + /// Returns the number of [`Event`]s to be iterated. |
| 957 | + pub fn len(&self) -> usize { |
| 958 | + self.slices.iter().map(|s| s.len()).sum() |
| 959 | + } |
| 960 | + |
| 961 | + /// Returns [`true`] if there are no events remaining in this iterator. |
| 962 | + pub fn is_empty(&self) -> bool { |
| 963 | + self.slices.iter().all(|x| x.is_empty()) |
| 964 | + } |
| 965 | +} |
| 966 | + |
| 967 | +impl<'a, E: Event> IntoIterator for EventParIter<'a, E> { |
| 968 | + type IntoIter = EventIteratorWithId<'a, E>; |
| 969 | + type Item = <Self::IntoIter as Iterator>::Item; |
| 970 | + |
| 971 | + fn into_iter(self) -> Self::IntoIter { |
| 972 | + let EventParIter { |
| 973 | + reader, |
| 974 | + slices: [a, b], |
| 975 | + .. |
| 976 | + } = self; |
| 977 | + let unread = a.len() + b.len(); |
| 978 | + let chain = a.iter().chain(b); |
| 979 | + EventIteratorWithId { |
| 980 | + reader, |
| 981 | + chain, |
| 982 | + unread, |
| 983 | + } |
| 984 | + } |
| 985 | +} |
| 986 | + |
| 987 | +#[doc(hidden)] |
813 | 988 | struct RegisteredEvent {
|
814 | 989 | component_id: ComponentId,
|
815 | 990 | // Required to flush the secondary buffer and drop events even if left unchanged.
|
@@ -1326,4 +1501,32 @@ mod tests {
|
1326 | 1501 | "Only sent two events; got more than two IDs"
|
1327 | 1502 | );
|
1328 | 1503 | }
|
| 1504 | + |
| 1505 | + #[cfg(feature = "multi-threaded")] |
| 1506 | + #[test] |
| 1507 | + fn test_events_par_iter() { |
| 1508 | + use std::{collections::HashSet, sync::mpsc}; |
| 1509 | + |
| 1510 | + use crate::prelude::*; |
| 1511 | + |
| 1512 | + let mut world = World::new(); |
| 1513 | + world.init_resource::<Events<TestEvent>>(); |
| 1514 | + for i in 0..100 { |
| 1515 | + world.send_event(TestEvent { i }); |
| 1516 | + } |
| 1517 | + |
| 1518 | + let mut schedule = Schedule::default(); |
| 1519 | + |
| 1520 | + schedule.add_systems(|mut events: EventReader<TestEvent>| { |
| 1521 | + let (tx, rx) = mpsc::channel(); |
| 1522 | + events.par_read().for_each(|event| { |
| 1523 | + tx.send(event.i).unwrap(); |
| 1524 | + }); |
| 1525 | + drop(tx); |
| 1526 | + |
| 1527 | + let observed: HashSet<_> = rx.into_iter().collect(); |
| 1528 | + assert_eq!(observed, HashSet::from_iter(0..100)); |
| 1529 | + }); |
| 1530 | + schedule.run(&mut world); |
| 1531 | + } |
1329 | 1532 | }
|
0 commit comments