@@ -8,7 +8,7 @@ use hir::db::DefDatabase;
8
8
use ide_db:: {
9
9
base_db:: {
10
10
salsa:: { Database , ParallelDatabase , Snapshot } ,
11
- CrateGraph , CrateId , SourceDatabase , SourceDatabaseExt ,
11
+ Cancelled , CrateGraph , CrateId , SourceDatabase , SourceDatabaseExt ,
12
12
} ,
13
13
FxIndexMap ,
14
14
} ;
@@ -54,84 +54,81 @@ pub(crate) fn parallel_prime_caches(
54
54
builder. build ( )
55
55
} ;
56
56
57
- crossbeam_utils:: thread:: scope ( move |s| {
58
- let ( work_sender, work_receiver) = crossbeam_channel:: unbounded ( ) ;
59
- let ( progress_sender, progress_receiver) = crossbeam_channel:: unbounded ( ) ;
57
+ let ( work_sender, work_receiver) = crossbeam_channel:: unbounded ( ) ;
58
+ let ( progress_sender, progress_receiver) = crossbeam_channel:: unbounded ( ) ;
60
59
61
- enum ParallelPrimeCacheWorkerProgress {
62
- BeginCrate { crate_id : CrateId , crate_name : String } ,
63
- EndCrate { crate_id : CrateId } ,
64
- }
65
-
66
- let prime_caches_worker = move |db : Snapshot < RootDatabase > | {
67
- while let Ok ( ( crate_id, crate_name) ) = work_receiver. recv ( ) {
68
- progress_sender
69
- . send ( ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } ) ?;
60
+ enum ParallelPrimeCacheWorkerProgress {
61
+ BeginCrate { crate_id : CrateId , crate_name : String } ,
62
+ EndCrate { crate_id : CrateId } ,
63
+ }
70
64
71
- // This also computes the DefMap
72
- db. import_map ( crate_id) ;
73
-
74
- progress_sender. send ( ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } ) ?;
75
- }
65
+ let prime_caches_worker = move |db : Snapshot < RootDatabase > | {
66
+ while let Ok ( ( crate_id, crate_name) ) = work_receiver. recv ( ) {
67
+ progress_sender
68
+ . send ( ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } ) ?;
76
69
77
- Ok :: < _ , crossbeam_channel :: SendError < _ > > ( ( ) )
78
- } ;
70
+ // This also computes the DefMap
71
+ db . import_map ( crate_id ) ;
79
72
80
- for _ in 0 ..num_worker_threads {
81
- let worker = prime_caches_worker. clone ( ) ;
82
- let db = db. snapshot ( ) ;
83
- s. spawn ( move |_| worker ( db) ) ;
73
+ progress_sender. send ( ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } ) ?;
84
74
}
85
75
86
- let crates_total = crates_to_prime. len ( ) ;
87
- let mut crates_done = 0 ;
88
-
89
- // an index map is used to preserve ordering so we can sort the progress report in order of
90
- // "longest crate to index" first
91
- let mut crates_currently_indexing =
92
- FxIndexMap :: with_capacity_and_hasher ( num_worker_threads as _ , Default :: default ( ) ) ;
76
+ Ok :: < _ , crossbeam_channel:: SendError < _ > > ( ( ) )
77
+ } ;
93
78
94
- while !crates_to_prime. is_empty ( ) {
95
- db. unwind_if_cancelled ( ) ;
79
+ for _ in 0 ..num_worker_threads {
80
+ let worker = prime_caches_worker. clone ( ) ;
81
+ let db = db. snapshot ( ) ;
82
+ std:: thread:: spawn ( move || Cancelled :: catch ( || worker ( db) ) ) ;
83
+ }
84
+
85
+ let crates_total = crates_to_prime. pending ( ) ;
86
+ let mut crates_done = 0 ;
87
+
88
+ // an index map is used to preserve ordering so we can sort the progress report in order of
89
+ // "longest crate to index" first
90
+ let mut crates_currently_indexing =
91
+ FxIndexMap :: with_capacity_and_hasher ( num_worker_threads as _ , Default :: default ( ) ) ;
92
+
93
+ while !crates_to_prime. is_empty ( ) {
94
+ db. unwind_if_cancelled ( ) ;
95
+
96
+ for crate_id in & mut crates_to_prime {
97
+ work_sender
98
+ . send ( (
99
+ crate_id,
100
+ graph[ crate_id] . display_name . as_deref ( ) . unwrap_or_default ( ) . to_string ( ) ,
101
+ ) )
102
+ . ok ( ) ;
103
+ }
96
104
97
- for crate_id in & mut crates_to_prime {
98
- work_sender
99
- . send ( (
100
- crate_id,
101
- graph[ crate_id] . display_name . as_deref ( ) . unwrap_or_default ( ) . to_string ( ) ,
102
- ) )
103
- . ok ( ) ;
105
+ let worker_progress = match progress_receiver. recv ( ) {
106
+ Ok ( p) => p,
107
+ Err ( _) => {
108
+ // our workers may have died from a cancelled task, so we'll check and re-raise here.
109
+ db. unwind_if_cancelled ( ) ;
110
+ break ;
104
111
}
112
+ } ;
113
+ match worker_progress {
114
+ ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } => {
115
+ crates_currently_indexing. insert ( crate_id, crate_name) ;
116
+ }
117
+ ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } => {
118
+ crates_currently_indexing. remove ( & crate_id) ;
119
+ crates_to_prime. mark_done ( crate_id) ;
120
+ crates_done += 1 ;
121
+ }
122
+ } ;
105
123
106
- let worker_progress = match progress_receiver. recv ( ) {
107
- Ok ( p) => p,
108
- Err ( _) => {
109
- // our workers may have died from a cancelled task, so we'll check and re-raise here.
110
- db. unwind_if_cancelled ( ) ;
111
- break ;
112
- }
113
- } ;
114
- match worker_progress {
115
- ParallelPrimeCacheWorkerProgress :: BeginCrate { crate_id, crate_name } => {
116
- crates_currently_indexing. insert ( crate_id, crate_name) ;
117
- }
118
- ParallelPrimeCacheWorkerProgress :: EndCrate { crate_id } => {
119
- crates_currently_indexing. remove ( & crate_id) ;
120
- crates_to_prime. mark_done ( crate_id) ;
121
- crates_done += 1 ;
122
- }
123
- } ;
124
-
125
- let progress = ParallelPrimeCachesProgress {
126
- crates_currently_indexing : crates_currently_indexing. values ( ) . cloned ( ) . collect ( ) ,
127
- crates_done,
128
- crates_total,
129
- } ;
130
-
131
- cb ( progress) ;
132
- }
133
- } )
134
- . unwrap ( ) ;
124
+ let progress = ParallelPrimeCachesProgress {
125
+ crates_currently_indexing : crates_currently_indexing. values ( ) . cloned ( ) . collect ( ) ,
126
+ crates_done,
127
+ crates_total,
128
+ } ;
129
+
130
+ cb ( progress) ;
131
+ }
135
132
}
136
133
137
134
fn compute_crates_to_prime ( db : & RootDatabase , graph : & CrateGraph ) -> FxHashSet < CrateId > {
0 commit comments