Skip to content

Commit 129e9e1

Browse files
committed
refactor(event cache): move finishing a network paginations in a EventLinkedChunk method
1 parent f7df0eb commit 129e9e1

File tree

2 files changed

+103
-87
lines changed

2 files changed

+103
-87
lines changed

crates/matrix-sdk/src/event_cache/room/events.rs

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ use matrix_sdk_common::linked_chunk::{
2626
AsVector, Chunk, ChunkIdentifier, Error, Iter, IterBackward, LinkedChunk, ObservableUpdates,
2727
Position,
2828
};
29+
use tracing::trace;
2930

3031
/// This type represents a linked chunk of events for a single room or thread.
3132
#[derive(Debug)]
@@ -311,6 +312,99 @@ impl EventLinkedChunk {
311312
self.rchunks()
312313
.find_map(|chunk| as_variant!(chunk.content(), ChunkContent::Gap(gap) => gap.clone()))
313314
}
315+
316+
/// Finish a network back-pagination for this linked chunk by updating the
317+
/// in-memory linked chunk with the results.
318+
///
319+
/// ## Arguments
320+
///
321+
/// - `prev_gap_id`: the identifier of the previous gap, if any.
322+
/// - `new_gap`: the new gap to insert, if any. If missing, we've likely
323+
/// reached the start of the timeline.
324+
/// - `events`: new events to insert, in the topological ordering (i.e. from
325+
/// oldest to most recent).
326+
///
327+
/// ## Returns
328+
///
329+
/// Returns a boolean indicating whether we've hit the start of the
330+
/// timeline/linked chunk.
331+
pub fn finish_back_pagination(
332+
&mut self,
333+
prev_gap_id: Option<ChunkIdentifier>,
334+
new_gap: Option<Gap>,
335+
events: &[Event],
336+
) -> bool {
337+
let first_event_pos = self.events().next().map(|(item_pos, _)| item_pos);
338+
339+
// First, insert events.
340+
let insert_new_gap_pos = if let Some(gap_id) = prev_gap_id {
341+
// There is a prior gap, let's replace it with the new events!
342+
trace!("replacing previous gap with the back-paginated events");
343+
344+
// Replace the gap with the events we just deduplicated. This might get rid of
345+
// the underlying gap, if the conditions are favorable to
346+
// us.
347+
self.replace_gap_at(gap_id, events.to_vec())
348+
.expect("gap_identifier is a valid chunk id we read previously")
349+
} else if let Some(pos) = first_event_pos {
350+
// No prior gap, but we had some events: assume we need to prepend events
351+
// before those.
352+
trace!("inserted events before the first known event");
353+
354+
self.insert_events_at(events.to_vec(), pos)
355+
.expect("pos is a valid position we just read above");
356+
357+
Some(pos)
358+
} else {
359+
// No prior gap, and no prior events: push the events.
360+
trace!("pushing events received from back-pagination");
361+
362+
self.push_events(events.to_vec());
363+
364+
// A new gap may be inserted before the new events, if there are any.
365+
self.events().next().map(|(item_pos, _)| item_pos)
366+
};
367+
368+
// And insert the new gap if needs be.
369+
//
370+
// We only do this when at least one new, non-duplicated event, has been added
371+
// to the chunk. Otherwise it means we've back-paginated all the
372+
// known events.
373+
let has_new_gap = new_gap.is_some();
374+
if let Some(new_gap) = new_gap {
375+
if let Some(new_pos) = insert_new_gap_pos {
376+
self.insert_gap_at(new_gap, new_pos)
377+
.expect("events_chunk_pos represents a valid chunk position");
378+
} else {
379+
self.push_gap(new_gap);
380+
}
381+
}
382+
383+
// There could be an inconsistency between the network (which thinks we hit the
384+
// start of the timeline) and the disk (which has the initial empty
385+
// chunks), so tweak the `reached_start` value so that it reflects the
386+
// disk state in priority instead.
387+
388+
let has_gaps = self.chunks().any(|chunk| chunk.is_gap());
389+
390+
// Whether the first chunk has no predecessors or not.
391+
let first_chunk_is_definitive_head =
392+
self.chunks().next().map(|chunk| chunk.is_definitive_head());
393+
394+
let network_reached_start = !has_new_gap;
395+
let reached_start =
396+
!has_gaps && first_chunk_is_definitive_head.unwrap_or(network_reached_start);
397+
398+
trace!(
399+
?network_reached_start,
400+
?has_gaps,
401+
?first_chunk_is_definitive_head,
402+
?reached_start,
403+
"finished handling network back-pagination"
404+
);
405+
406+
reached_start
407+
}
314408
}
315409

316410
// Methods related to lazy-loading.

crates/matrix-sdk/src/event_cache/room/mod.rs

Lines changed: 9 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -1591,10 +1591,6 @@ mod private {
15911591
mut new_gap: Option<Gap>,
15921592
prev_gap_id: Option<ChunkIdentifier>,
15931593
) -> Result<(BackPaginationOutcome, Vec<VectorDiff<Event>>), EventCacheError> {
1594-
// If there's no new gap (previous batch token), then we've reached the start of
1595-
// the timeline.
1596-
let network_reached_start = new_gap.is_none();
1597-
15981594
let DeduplicationOutcome {
15991595
all_events: mut events,
16001596
in_memory_duplicated_event_ids,
@@ -1633,91 +1629,17 @@ mod private {
16331629
new_gap = None;
16341630
};
16351631

1636-
// Reverse the order of the events as `/messages` has been called with `dir=b`
1637-
// (backwards). The `EventLinkedChunk` API expects the first event to be the
1638-
// oldest. Let's re-order them for this block.
1639-
let reversed_events = events.iter().rev().cloned().collect::<Vec<_>>();
1640-
1641-
let first_event_pos =
1642-
self.room_linked_chunk.events().next().map(|(item_pos, _)| item_pos);
1643-
1644-
// First, insert events.
1645-
let insert_new_gap_pos = if let Some(gap_id) = prev_gap_id {
1646-
// There is a prior gap, let's replace it by new events!
1647-
if all_duplicates {
1648-
assert!(reversed_events.is_empty());
1649-
}
1650-
1651-
trace!("replacing previous gap with the back-paginated events");
1652-
1653-
// Replace the gap with the events we just deduplicated. This might get rid of
1654-
// the underlying gap, if the conditions are favorable to
1655-
// us.
1656-
self.room_linked_chunk
1657-
.replace_gap_at(gap_id, reversed_events.clone())
1658-
.expect("gap_identifier is a valid chunk id we read previously")
1659-
} else if let Some(pos) = first_event_pos {
1660-
// No prior gap, but we had some events: assume we need to prepend events
1661-
// before those.
1662-
trace!("inserted events before the first known event");
1663-
1664-
self.room_linked_chunk
1665-
.insert_events_at(reversed_events.clone(), pos)
1666-
.expect("pos is a valid position we just read above");
1667-
1668-
Some(pos)
1669-
} else {
1670-
// No prior gap, and no prior events: push the events.
1671-
trace!("pushing events received from back-pagination");
1672-
1673-
self.room_linked_chunk.push_events(reversed_events.clone());
1674-
1675-
// A new gap may be inserted before the new events, if there are any.
1676-
self.room_linked_chunk.events().next().map(|(item_pos, _)| item_pos)
1677-
};
1632+
// `/messages` has been called with `dir=b` (backwards), so the events are in
1633+
// the inverted order; reorder them.
1634+
let topo_ordered_events = events.iter().rev().cloned().collect::<Vec<_>>();
16781635

1679-
// And insert the new gap if needs be.
1680-
//
1681-
// We only do this when at least one new, non-duplicated event, has been added
1682-
// to the chunk. Otherwise it means we've back-paginated all the
1683-
// known events.
1684-
if let Some(new_gap) = new_gap {
1685-
if let Some(new_pos) = insert_new_gap_pos {
1686-
self.room_linked_chunk
1687-
.insert_gap_at(new_gap, new_pos)
1688-
.expect("events_chunk_pos represents a valid chunk position");
1689-
} else {
1690-
self.room_linked_chunk.push_gap(new_gap);
1691-
}
1692-
}
1693-
1694-
self.post_process_new_events(reversed_events, false).await?;
1695-
1696-
// There could be an inconsistency between the network (which thinks we hit the
1697-
// start of the timeline) and the disk (which has the initial empty
1698-
// chunks), so tweak the `reached_start` value so that it reflects the disk
1699-
// state in priority instead.
1700-
let reached_start = {
1701-
// There are no gaps.
1702-
let has_gaps = self.room_linked_chunk.chunks().any(|chunk| chunk.is_gap());
1703-
1704-
// The first chunk has no predecessors.
1705-
let first_chunk_is_definitive_head =
1706-
self.room_linked_chunk.chunks().next().map(|chunk| chunk.is_definitive_head());
1707-
1708-
let reached_start =
1709-
!has_gaps && first_chunk_is_definitive_head.unwrap_or(network_reached_start);
1710-
1711-
trace!(
1712-
?network_reached_start,
1713-
?has_gaps,
1714-
?first_chunk_is_definitive_head,
1715-
?reached_start,
1716-
"finished handling network back-pagination"
1717-
);
1636+
let reached_start = self.room_linked_chunk.finish_back_pagination(
1637+
prev_gap_id,
1638+
new_gap,
1639+
&topo_ordered_events,
1640+
);
17181641

1719-
reached_start
1720-
};
1642+
self.post_process_new_events(topo_ordered_events, false).await?;
17211643

17221644
let event_diffs = self.room_linked_chunk.updates_as_vector_diffs();
17231645
let backpagination_outcome = BackPaginationOutcome { events, reached_start };

0 commit comments

Comments
 (0)