Skip to content

Commit e8c2d27

Browse files
committed
refactor(event cache): slightly tweak logic around prev-batch token suppression
1 parent bff600a commit e8c2d27

File tree

1 file changed

+34
-32
lines changed
  • crates/matrix-sdk/src/event_cache/room

1 file changed

+34
-32
lines changed

crates/matrix-sdk/src/event_cache/room/mod.rs

Lines changed: 34 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -390,16 +390,6 @@ impl RoomEventCacheInner {
390390

391391
let mut state = self.state.write().await;
392392

393-
// Ditch the previous-batch token if the sync isn't limited and we've seen at
394-
// least one event in the past.
395-
//
396-
// In this case (and only this one), we should definitely know what the head of
397-
// the timeline is (either we know about all the events, or we have a
398-
// gap somewhere), since storage is enabled by default.
399-
if !timeline.limited && state.events().events().next().is_some() {
400-
prev_batch = None;
401-
}
402-
403393
let (
404394
DeduplicationOutcome {
405395
all_events: events,
@@ -409,6 +399,22 @@ impl RoomEventCacheInner {
409399
all_duplicates,
410400
) = state.collect_valid_and_duplicated_events(timeline.events).await?;
411401

402+
// If the timeline isn't limited, and we already knew about some past events,
403+
// then this definitely know what the timeline head is (either we know
404+
// about all the events persisted in storage, or we have a gap
405+
// somewhere). In this case, we can ditch the previous-batch
406+
// token, which is an optimization to avoid unnecessary future back-pagination
407+
// requests.
408+
//
409+
// We can also ditch it, if we knew about all the events that came from sync,
410+
// viz. they were all deduplicated. In this case, using the
411+
// previous-batch token would only result in fetching other events we
412+
// knew about. This is slightly incorrect in the presence of
413+
// network splits, but this has shown to be Good Enough™.
414+
if !timeline.limited && state.events().events().next().is_some() || all_duplicates {
415+
prev_batch = None;
416+
}
417+
412418
// During a sync, when a duplicated event is found, the old event is removed and
413419
// the new event is added.
414420
//
@@ -434,23 +440,20 @@ impl RoomEventCacheInner {
434440
// there was a gap, we'd have received an unknown event at the tail of
435441
// the room's timeline (unless the server reordered sync events since the last
436442
// time we sync'd).
437-
if !all_duplicates {
438-
if let Some(prev_token) = &prev_batch {
439-
// As a tiny optimization: remove the last chunk if it's an empty event
440-
// one, as it's not useful to keep it before a gap.
441-
let prev_chunk_to_remove =
442-
room_events.rchunks().next().and_then(|chunk| {
443-
(chunk.is_items() && chunk.num_items() == 0)
444-
.then_some(chunk.identifier())
445-
});
446-
447-
room_events.push_gap(Gap { prev_token: prev_token.clone() });
448-
449-
if let Some(prev_chunk_to_remove) = prev_chunk_to_remove {
450-
room_events.remove_empty_chunk_at(prev_chunk_to_remove).expect(
451-
"we just checked the chunk is there, and it's an empty item chunk",
452-
);
453-
}
443+
if let Some(prev_token) = &prev_batch {
444+
// As a tiny optimization: remove the last chunk if it's an empty event
445+
// one, as it's not useful to keep it before a gap.
446+
let prev_chunk_to_remove = room_events.rchunks().next().and_then(|chunk| {
447+
(chunk.is_items() && chunk.num_items() == 0)
448+
.then_some(chunk.identifier())
449+
});
450+
451+
room_events.push_gap(Gap { prev_token: prev_token.clone() });
452+
453+
if let Some(prev_chunk_to_remove) = prev_chunk_to_remove {
454+
room_events.remove_empty_chunk_at(prev_chunk_to_remove).expect(
455+
"we just checked the chunk is there, and it's an empty item chunk",
456+
);
454457
}
455458
}
456459

@@ -462,11 +465,10 @@ impl RoomEventCacheInner {
462465

463466
timeline_event_diffs.extend(new_timeline_event_diffs);
464467

465-
if timeline.limited && prev_batch.is_some() && !all_duplicates {
466-
// If there was a previous batch token for a limited timeline, and there's at
467-
// least one non-duplicated new event, unload the chunks so it
468-
// only contains the last one; otherwise, there might be a valid
469-
// gap in between, and observers may not render it (yet).
468+
if timeline.limited && prev_batch.is_some() {
469+
// If there was a previous batch token for a limited timeline, unload the chunks
470+
// so it only contains the last one; otherwise, there might be a
471+
// valid gap in between, and observers may not render it (yet).
470472
//
471473
// We must do this *after* the above call to `.with_events_mut`, so the new
472474
// events and gaps are properly persisted to storage.

0 commit comments

Comments
 (0)