diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs index 018e83e80a8..fd278cb73f4 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/integration_tests.rs @@ -502,6 +502,59 @@ pub async fn test_load_last_chunk(store: IndexeddbEventCacheStore) { assert_eq!(chunk_identifier_generator.current(), 42); } +pub async fn test_load_previous_chunk(store: IndexeddbEventCacheStore) { + let room_id = &DEFAULT_TEST_ROOM_ID; + let linked_chunk_id = LinkedChunkId::Room(room_id); + let event = |msg: &str| make_test_event(room_id, msg); + + // Case #1: no chunk at all, equivalent to having an nonexistent + // `before_chunk_identifier`. + let previous_chunk = + store.load_previous_chunk(linked_chunk_id, ChunkIdentifier::new(153)).await.unwrap(); + assert!(previous_chunk.is_none()); + + // Case #2: there is one chunk only: we request the previous on this + // one, it doesn't exist. + let updates = + vec![Update::NewItemsChunk { previous: None, new: ChunkIdentifier::new(42), next: None }]; + store.handle_linked_chunk_updates(linked_chunk_id, updates).await.unwrap(); + + let previous_chunk = + store.load_previous_chunk(linked_chunk_id, ChunkIdentifier::new(42)).await.unwrap(); + assert!(previous_chunk.is_none()); + + // Case #3: there are two chunks. + let updates = vec![ + // new chunk before the one that exists. + Update::NewItemsChunk { + previous: None, + new: ChunkIdentifier::new(7), + next: Some(ChunkIdentifier::new(42)), + }, + Update::PushItems { + at: Position::new(ChunkIdentifier::new(7), 0), + items: vec![event("brigand du jorat"), event("morbier")], + }, + ]; + store.handle_linked_chunk_updates(linked_chunk_id, updates).await.unwrap(); + + let previous_chunk = + store.load_previous_chunk(linked_chunk_id, ChunkIdentifier::new(42)).await.unwrap(); + + assert_matches!(previous_chunk, Some(previous_chunk) => { + assert_eq!(previous_chunk.identifier, 7); + assert!(previous_chunk.previous.is_none()); + assert_matches!(previous_chunk.next, Some(next) => { + assert_eq!(next, 42); + }); + assert_matches!(previous_chunk.content, ChunkContent::Items(items) => { + assert_eq!(items.len(), 2); + check_test_event(&items[0], "brigand du jorat"); + check_test_event(&items[1], "morbier"); + }); + }); +} + /// Macro for generating tests for IndexedDB implementation of /// [`EventCacheStore`] /// @@ -621,6 +674,13 @@ macro_rules! indexeddb_event_cache_store_integration_tests { $crate::event_cache_store::integration_tests::test_load_last_chunk(store) .await } + + #[async_test] + async fn test_load_previous_chunk() { + let store = get_event_cache_store().await.expect("Failed to get event cache store"); + $crate::event_cache_store::integration_tests::test_load_previous_chunk(store) + .await + } } }; } @@ -654,6 +714,13 @@ macro_rules! event_cache_store_integration_tests { event_cache_store.test_handle_updates_and_rebuild_linked_chunk().await; } + #[async_test] + async fn test_linked_chunk_incremental_loading() { + let event_cache_store = + get_event_cache_store().await.unwrap().into_event_cache_store(); + event_cache_store.test_linked_chunk_incremental_loading().await; + } + #[async_test] async fn test_rebuild_empty_linked_chunk() { let event_cache_store = @@ -661,6 +728,20 @@ macro_rules! event_cache_store_integration_tests { event_cache_store.test_rebuild_empty_linked_chunk().await; } + #[async_test] + async fn test_load_all_chunks_metadata() { + let event_cache_store = + get_event_cache_store().await.unwrap().into_event_cache_store(); + event_cache_store.test_load_all_chunks_metadata().await; + } + + #[async_test] + async fn test_clear_all_linked_chunks() { + let event_cache_store = + get_event_cache_store().await.unwrap().into_event_cache_store(); + event_cache_store.test_clear_all_linked_chunks().await; + } + #[async_test] async fn test_remove_room() { let event_cache_store = diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 95490a36ee2..15139ff1a2e 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -28,9 +28,10 @@ use matrix_sdk_base::{ RawChunk, Update, }, media::MediaRequestParameters, + timer, }; use ruma::{events::relation::RelationType, EventId, MxcUri, OwnedEventId, RoomId}; -use tracing::trace; +use tracing::{instrument, trace}; use web_sys::IdbTransactionMode; use crate::event_cache_store::{ @@ -123,23 +124,28 @@ macro_rules! impl_event_cache_store { } impl_event_cache_store! { + #[instrument(skip(self))] async fn try_take_leased_lock( &self, lease_duration_ms: u32, key: &str, holder: &str, ) -> Result { + let _timer = timer!("method"); self.memory_store .try_take_leased_lock(lease_duration_ms, key, holder) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip(self, updates))] async fn handle_linked_chunk_updates( &self, linked_chunk_id: LinkedChunkId<'_>, updates: Vec>, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); + let linked_chunk_id = linked_chunk_id.to_owned(); let room_id = linked_chunk_id.room_id(); @@ -258,10 +264,13 @@ impl_event_cache_store! { Ok(()) } + #[instrument(skip(self))] async fn load_all_chunks( &self, linked_chunk_id: LinkedChunkId<'_>, ) -> Result>, IndexeddbEventCacheStoreError> { + let _ = timer!("method"); + let linked_chunk_id = linked_chunk_id.to_owned(); let room_id = linked_chunk_id.room_id(); @@ -283,16 +292,47 @@ impl_event_cache_store! { Ok(raw_chunks) } + #[instrument(skip(self))] async fn load_all_chunks_metadata( &self, linked_chunk_id: LinkedChunkId<'_>, ) -> Result, IndexeddbEventCacheStoreError> { - self.memory_store - .load_all_chunks_metadata(linked_chunk_id) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) + // TODO: This call could possibly take a very long time and the + // amount of time increases linearly with the number of chunks + // it needs to load from the database. This will likely require + // some refactoring to deal with performance issues. + // + // For details on the performance penalties associated with this + // call, see https://github.com/matrix-org/matrix-rust-sdk/pull/5407. + // + // For how this was improved in the SQLite implementation, see + // https://github.com/matrix-org/matrix-rust-sdk/pull/5382. + let _ = timer!("method"); + + let linked_chunk_id = linked_chunk_id.to_owned(); + let room_id = linked_chunk_id.room_id(); + + let transaction = self.transaction( + &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], + IdbTransactionMode::Readwrite, + )?; + + let mut raw_chunks = Vec::new(); + let chunks = transaction.get_chunks_in_room(room_id).await?; + for chunk in chunks { + let chunk_id = ChunkIdentifier::new(chunk.identifier); + let num_items = transaction.get_events_count_by_chunk(room_id, &chunk_id).await?; + raw_chunks.push(ChunkMetadata { + num_items, + previous: chunk.previous.map(ChunkIdentifier::new), + identifier: ChunkIdentifier::new(chunk.identifier), + next: chunk.next.map(ChunkIdentifier::new), + }); + } + Ok(raw_chunks) } + #[instrument(skip(self))] async fn load_last_chunk( &self, linked_chunk_id: LinkedChunkId<'_>, @@ -300,6 +340,8 @@ impl_event_cache_store! { (Option>, ChunkIdentifierGenerator), IndexeddbEventCacheStoreError, > { + let _timer = timer!("method"); + let linked_chunk_id = linked_chunk_id.to_owned(); let room_id = linked_chunk_id.room_id(); let transaction = self.transaction( @@ -348,158 +390,206 @@ impl_event_cache_store! { } } + #[instrument(skip(self))] async fn load_previous_chunk( &self, linked_chunk_id: LinkedChunkId<'_>, before_chunk_identifier: ChunkIdentifier, ) -> Result>, IndexeddbEventCacheStoreError> { - self.memory_store - .load_previous_chunk(linked_chunk_id, before_chunk_identifier) - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) + let _timer = timer!("method"); + + let linked_chunk_id = linked_chunk_id.to_owned(); + let room_id = linked_chunk_id.room_id(); + let transaction = self.transaction( + &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], + IdbTransactionMode::Readonly, + )?; + if let Some(chunk) = transaction.get_chunk_by_id(room_id, &before_chunk_identifier).await? { + if let Some(previous_identifier) = chunk.previous { + let previous_identifier = ChunkIdentifier::new(previous_identifier); + return Ok(transaction.load_chunk_by_id(room_id, &previous_identifier).await?); + } + } + Ok(None) } + #[instrument(skip(self))] async fn clear_all_linked_chunks(&self) -> Result<(), IndexeddbEventCacheStoreError> { - self.memory_store - .clear_all_linked_chunks() - .await - .map_err(IndexeddbEventCacheStoreError::MemoryStore) + let _timer = timer!("method"); + + let transaction = self.transaction( + &[keys::LINKED_CHUNKS, keys::EVENTS, keys::GAPS], + IdbTransactionMode::Readwrite, + )?; + transaction.clear::().await?; + transaction.clear::().await?; + transaction.clear::().await?; + transaction.commit().await?; + Ok(()) } + #[instrument(skip(self, events))] async fn filter_duplicated_events( &self, linked_chunk_id: LinkedChunkId<'_>, events: Vec, ) -> Result, IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .filter_duplicated_events(linked_chunk_id, events) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip(self, event_id))] async fn find_event( &self, room_id: &RoomId, event_id: &EventId, ) -> Result, IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .find_event(room_id, event_id) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip(self, event_id, filters))] async fn find_event_relations( &self, room_id: &RoomId, event_id: &EventId, filters: Option<&[RelationType]>, ) -> Result)>, IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .find_event_relations(room_id, event_id, filters) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip(self, event))] async fn save_event( &self, room_id: &RoomId, event: Event, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .save_event(room_id, event) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] async fn add_media_content( &self, request: &MediaRequestParameters, content: Vec, ignore_policy: IgnoreMediaRetentionPolicy, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .add_media_content(request, content, ignore_policy) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] async fn replace_media_key( &self, from: &MediaRequestParameters, to: &MediaRequestParameters, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .replace_media_key(from, to) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] async fn get_media_content( &self, request: &MediaRequestParameters, ) -> Result>, IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .get_media_content(request) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] async fn remove_media_content( &self, request: &MediaRequestParameters, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .remove_media_content(request) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip(self))] async fn get_media_content_for_uri( &self, uri: &MxcUri, ) -> Result>, IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .get_media_content_for_uri(uri) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip(self))] async fn remove_media_content_for_uri( &self, uri: &MxcUri, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .remove_media_content_for_uri(uri) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] async fn set_media_retention_policy( &self, policy: MediaRetentionPolicy, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .set_media_retention_policy(policy) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] fn media_retention_policy(&self) -> MediaRetentionPolicy { + let _timer = timer!("method"); self.memory_store.media_retention_policy() } + #[instrument(skip_all)] async fn set_ignore_media_retention_policy( &self, request: &MediaRequestParameters, ignore_policy: IgnoreMediaRetentionPolicy, ) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .set_ignore_media_retention_policy(request, ignore_policy) .await .map_err(IndexeddbEventCacheStoreError::MemoryStore) } + #[instrument(skip_all)] async fn clean_up_media_cache(&self) -> Result<(), IndexeddbEventCacheStoreError> { + let _timer = timer!("method"); self.memory_store .clean_up_media_cache() .await diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs index 45615d0ae0b..15435d4a5d6 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs @@ -580,6 +580,17 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_items_by_key_components::(room_id, range).await } + /// Query IndexedDB for number of events in the given position range in the + /// given room. + pub async fn get_events_count_by_position( + &self, + room_id: &RoomId, + range: impl Into>, + ) -> Result { + self.get_items_count_by_key_components::(room_id, range) + .await + } + /// Query IndexedDB for events in the given chunk in the given room. pub async fn get_events_by_chunk( &self, @@ -594,6 +605,21 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> { self.get_events_by_position(room_id, range).await } + /// Query IndexedDB for number of events in the given chunk in the given + /// room. + pub async fn get_events_count_by_chunk( + &self, + room_id: &RoomId, + chunk_id: &ChunkIdentifier, + ) -> Result { + let mut lower = IndexedEventPositionKey::lower_key_components(); + lower.chunk_identifier = chunk_id.index(); + let mut upper = IndexedEventPositionKey::upper_key_components(); + upper.chunk_identifier = chunk_id.index(); + let range = IndexedKeyRange::Bound(&lower, &upper); + self.get_events_count_by_position(room_id, range).await + } + /// Puts an event in the given room. If an event with the same key already /// exists, it will be overwritten. pub async fn put_event(