@@ -35,6 +35,7 @@ use std::{
35
35
36
36
use eyeball:: { SharedObservable , Subscriber } ;
37
37
use eyeball_im:: VectorDiff ;
38
+ use futures_util:: future:: { join_all, try_join_all} ;
38
39
use matrix_sdk_base:: {
39
40
deserialized_responses:: { AmbiguityChange , TimelineEvent } ,
40
41
event_cache:: store:: { EventCacheStoreError , EventCacheStoreLock } ,
@@ -459,18 +460,73 @@ impl EventCacheInner {
459
460
460
461
/// Clears all the room's data.
461
462
async fn clear_all_rooms ( & self ) -> Result < ( ) > {
462
- // Note: one must NOT clear the `by_room` map, because if something subscribed
463
- // to a room update, they would never get any new update for that room, since
464
- // re-creating the `RoomEventCache` would create a new unrelated sender.
465
-
466
- // Note 2: we don't need to clear the [`Self::events`] map, because events are
467
- // immutable in the Matrix protocol.
463
+ // Okay, here's where things get complicated.
464
+ //
465
+ // On the one hand, `by_room` may include storage for *some* rooms that we know
466
+ // about, but not *all* of them. Any room that hasn't been loaded in the
467
+ // client, or touched by a sync, will remain unloaded in memory, so it
468
+ // will be missing from `self.by_room`. As a result, we need to make
469
+ // sure that we're hitting the storage backend to *really* clear all the
470
+ // rooms, including those that haven't been loaded yet.
471
+ //
472
+ // On the other hand, one must NOT clear the `by_room` map, because if someone
473
+ // subscribed to a room update, they would never get any new update for
474
+ // that room, since re-creating the `RoomEventCache` would create a new,
475
+ // unrelated sender.
476
+ //
477
+ // So we need to *keep* the rooms in `by_room` alive, while clearing them in the
478
+ // store backend.
479
+ //
480
+ // As a result, for a short while, the in-memory linked chunks
481
+ // will be desynchronized from the storage. We need to be careful then. During
482
+ // that short while, we don't want *anyone* to touch the linked chunk
483
+ // (be it in memory or in the storage).
484
+ //
485
+ // And since that requirement applies to *any* room in `by_room` at the same
486
+ // time, we'll have to take the locks for *all* the live rooms, so as to
487
+ // properly clear the underlying storage.
488
+ //
489
+ // At this point, you might be scared about the potential for deadlocking. I am
490
+ // as well, but I'm convinced we're fine:
491
+ // 1. the lock for `by_room` is usually held only for a short while, and
492
+ // independently of the other two kinds.
493
+ // 2. the state may acquire the store cross-process lock internally, but only
494
+ // while the state's methods are called (so it's always transient). As a
495
+ // result, as soon as we've acquired the state locks, the store lock ought to
496
+ // be free.
497
+ // 3. The store lock is held explicitly only in a small scoped area below.
498
+ // 4. Then the store lock will be held internally when calling `reset()`, but at
499
+ // this point it's only held for a short while each time, so rooms will take
500
+ // turn to acquire it.
468
501
469
502
let rooms = self . by_room . write ( ) . await ;
470
- for room in rooms. values ( ) {
471
- room. clear ( ) . await ?;
503
+
504
+ // Collect all the rooms' state locks, first: we can clear the storage only when
505
+ // nobody will touch it at the same time.
506
+ let room_locks = join_all (
507
+ rooms. values ( ) . map ( |room| async move { ( room, room. inner . state . write ( ) . await ) } ) ,
508
+ )
509
+ . await ;
510
+
511
+ // Clear the storage for all the rooms, using the storage facility.
512
+ if let Some ( store) = self . store . get ( ) {
513
+ let store_guard = store. lock ( ) . await ?;
514
+ store_guard. clear_all_rooms_chunks ( ) . await ?;
472
515
}
473
516
517
+ // At this point, all the in-memory linked chunks are desynchronized from the
518
+ // storage. Resynchronize them manually by calling reset(), and
519
+ // propagate updates to observers.
520
+ try_join_all ( room_locks. into_iter ( ) . map ( |( room, mut state_guard) | async move {
521
+ let updates_as_vector_diffs = state_guard. reset ( ) . await ?;
522
+ let _ = room. inner . sender . send ( RoomEventCacheUpdate :: UpdateTimelineEvents {
523
+ diffs : updates_as_vector_diffs,
524
+ origin : EventsOrigin :: Cache ,
525
+ } ) ;
526
+ Ok :: < _ , EventCacheError > ( ( ) )
527
+ } ) )
528
+ . await ?;
529
+
474
530
Ok ( ( ) )
475
531
}
476
532
0 commit comments