Skip to content

Commit be3af5e

Browse files
committed
refactor(event cache): push down absence of gap handling in back-paginations
This will be useful to do for threaded paginations too.
1 parent 38bbdf0 commit be3af5e

File tree

2 files changed

+60
-47
lines changed

2 files changed

+60
-47
lines changed

crates/matrix-sdk/src/event_cache/pagination.rs

Lines changed: 20 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,11 @@ use std::{sync::Arc, time::Duration};
1818

1919
use eyeball::{SharedObservable, Subscriber};
2020
use matrix_sdk_base::timeout::timeout;
21-
use matrix_sdk_common::linked_chunk::ChunkContent;
2221
use ruma::api::Direction;
2322
use tracing::{debug, instrument, trace};
2423

2524
use super::{
26-
room::{events::Gap, LoadMoreEventsBackwardsOutcome, RoomEventCacheInner},
25+
room::{LoadMoreEventsBackwardsOutcome, RoomEventCacheInner},
2726
BackPaginationOutcome, EventsOrigin, Result, RoomEventCacheUpdate,
2827
};
2928
use crate::{
@@ -264,7 +263,7 @@ impl RoomPagination {
264263
batch_size: u16,
265264
prev_token: Option<String>,
266265
) -> Result<Option<BackPaginationOutcome>> {
267-
let (events, new_gap) = {
266+
let (events, new_token) = {
268267
let Some(room) = self.inner.weak_room.get() else {
269268
// The client is shutting down, return an empty default response.
270269
return Ok(Some(BackPaginationOutcome {
@@ -281,48 +280,30 @@ impl RoomPagination {
281280
.await
282281
.map_err(|err| EventCacheError::BackpaginationError(Box::new(err)))?;
283282

284-
let new_gap = response.end.map(|prev_token| Gap { prev_token });
285-
286-
(response.chunk, new_gap)
283+
(response.chunk, response.end)
287284
};
288285

289-
// Make sure the `EventLinkedChunk` isn't updated while we are saving events
290-
// from backpagination.
291-
let mut state = self.inner.state.write().await;
292-
293-
// Check that the previous token still exists; otherwise it's a sign that the
294-
// room's timeline has been cleared.
295-
let prev_gap_chunk_id = if let Some(token) = prev_token {
296-
let gap_chunk_id = state.room_linked_chunk().chunk_identifier(|chunk| {
297-
matches!(chunk.content(), ChunkContent::Gap(Gap { ref prev_token }) if *prev_token == token)
298-
});
299-
300-
if gap_chunk_id.is_none() {
301-
// We got a previous-batch token from the linked chunk *before* running the
302-
// request, but it is missing *after* completing the
303-
// request.
304-
//
305-
// It may be a sign the linked chunk has been reset, but it's fine, per this
306-
// function's contract.
307-
return Ok(None);
286+
if let Some((outcome, timeline_event_diffs)) = self
287+
.inner
288+
.state
289+
.write()
290+
.await
291+
.handle_backpagination(events, new_token, prev_token)
292+
.await?
293+
{
294+
if !timeline_event_diffs.is_empty() {
295+
let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents {
296+
diffs: timeline_event_diffs,
297+
origin: EventsOrigin::Pagination,
298+
});
308299
}
309300

310-
gap_chunk_id
301+
Ok(Some(outcome))
311302
} else {
312-
None
313-
};
314-
315-
let (outcome, timeline_event_diffs) =
316-
state.handle_backpagination(events, new_gap, prev_gap_chunk_id).await?;
317-
318-
if !timeline_event_diffs.is_empty() {
319-
let _ = self.inner.sender.send(RoomEventCacheUpdate::UpdateTimelineEvents {
320-
diffs: timeline_event_diffs,
321-
origin: EventsOrigin::Pagination,
322-
});
303+
// The previous token has gone missing, so the timeline has been reset in the
304+
// meanwhile, but it's fine per this function's contract.
305+
Ok(None)
323306
}
324-
325-
Ok(Some(outcome))
326307
}
327308

328309
/// Returns a subscriber to the pagination status used for the

crates/matrix-sdk/src/event_cache/room/mod.rs

Lines changed: 40 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -503,8 +503,7 @@ mod private {
503503
},
504504
linked_chunk::{
505505
lazy_loader::{self},
506-
ChunkContent, ChunkIdentifier, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId,
507-
Position, Update,
506+
ChunkContent, ChunkIdentifierGenerator, ChunkMetadata, LinkedChunkId, Position, Update,
508507
},
509508
serde_helpers::extract_thread_root,
510509
sync::Timeline,
@@ -1262,6 +1261,8 @@ mod private {
12621261

12631262
/// Post-process new events, after they have been added to the in-memory
12641263
/// linked chunk.
1264+
///
1265+
/// Flushes updates to disk first.
12651266
async fn post_process_new_events(
12661267
&mut self,
12671268
events: Vec<Event>,
@@ -1568,13 +1569,43 @@ mod private {
15681569
Ok((has_new_gap, timeline_event_diffs))
15691570
}
15701571

1572+
/// Handle the result of a single back-pagination request.
1573+
///
1574+
/// If the `prev_token` is set, then this function will check that the
1575+
/// corresponding gap is present in the in-memory linked chunk.
1576+
/// If it's not the case, `Ok(None)` will be returned, and the
1577+
/// caller may decide to do something based on that (e.g. restart a
1578+
/// pagination).
15711579
#[must_use = "Propagate `VectorDiff` updates via `RoomEventCacheUpdate`"]
15721580
pub async fn handle_backpagination(
15731581
&mut self,
15741582
events: Vec<Event>,
1575-
mut new_gap: Option<Gap>,
1576-
prev_gap_id: Option<ChunkIdentifier>,
1577-
) -> Result<(BackPaginationOutcome, Vec<VectorDiff<Event>>), EventCacheError> {
1583+
mut new_token: Option<String>,
1584+
prev_token: Option<String>,
1585+
) -> Result<Option<(BackPaginationOutcome, Vec<VectorDiff<Event>>)>, EventCacheError>
1586+
{
1587+
// Check that the previous token still exists; otherwise it's a sign that the
1588+
// room's timeline has been cleared.
1589+
let prev_gap_id = if let Some(token) = prev_token {
1590+
// Find the corresponding gap in the in-memory linked chunk.
1591+
let gap_chunk_id = self.room_linked_chunk.chunk_identifier(|chunk| {
1592+
matches!(chunk.content(), ChunkContent::Gap(Gap { ref prev_token }) if *prev_token == token)
1593+
});
1594+
1595+
if gap_chunk_id.is_none() {
1596+
// We got a previous-batch token from the linked chunk *before* running the
1597+
// request, but it is missing *after* completing the request.
1598+
//
1599+
// It may be a sign the linked chunk has been reset, but it's fine, per this
1600+
// function's contract.
1601+
return Ok(None);
1602+
}
1603+
1604+
gap_chunk_id
1605+
} else {
1606+
None
1607+
};
1608+
15781609
let DeduplicationOutcome {
15791610
all_events: mut events,
15801611
in_memory_duplicated_event_ids,
@@ -1610,25 +1641,26 @@ mod private {
16101641
events.clear();
16111642
// The gap can be ditched too, as it won't be useful to backpaginate any
16121643
// further.
1613-
new_gap = None;
1644+
new_token = None;
16141645
};
16151646

16161647
// `/messages` has been called with `dir=b` (backwards), so the events are in
16171648
// the inverted order; reorder them.
16181649
let topo_ordered_events = events.iter().rev().cloned().collect::<Vec<_>>();
16191650

1651+
let new_gap = new_token.map(|prev_token| Gap { prev_token });
16201652
let reached_start = self.room_linked_chunk.finish_back_pagination(
16211653
prev_gap_id,
16221654
new_gap,
16231655
&topo_ordered_events,
16241656
);
16251657

1658+
// Note: this flushes updates to the store.
16261659
self.post_process_new_events(topo_ordered_events, false).await?;
16271660

16281661
let event_diffs = self.room_linked_chunk.updates_as_vector_diffs();
1629-
let backpagination_outcome = BackPaginationOutcome { events, reached_start };
16301662

1631-
Ok((backpagination_outcome, event_diffs))
1663+
Ok(Some((BackPaginationOutcome { events, reached_start }, event_diffs)))
16321664
}
16331665
}
16341666
}

0 commit comments

Comments
 (0)