@@ -247,6 +247,8 @@ impl<T: Idx> BitRelations<BitSet<T>> for BitSet<T> {
247
247
}
248
248
}
249
249
250
+ // Applies a function to mutate a bitset, and returns true if any
251
+ // of the applications return true
250
252
fn sequential_update < T : Idx > (
251
253
mut self_update : impl FnMut ( T ) -> bool ,
252
254
it : impl Iterator < Item = T > ,
@@ -258,6 +260,8 @@ fn sequential_update<T: Idx>(
258
260
changed
259
261
}
260
262
263
+ // Optimization of intersection for SparseBitSet that's generic
264
+ // over the RHS
261
265
fn sparse_intersect < T : Idx > (
262
266
set : & mut SparseBitSet < T > ,
263
267
other_contains : impl Fn ( & T ) -> bool ,
@@ -267,6 +271,10 @@ fn sparse_intersect<T: Idx>(
267
271
set. elems . len ( ) != size
268
272
}
269
273
274
+ // Optimization of dense/sparse intersection. The resulting set is
275
+ // guaranteed to be at most the size of the sparse set, and hence can be
276
+ // represented as a sparse set. Therefore the sparse set is copied and filtered,
277
+ // then returned as the new set.
270
278
fn dense_sparse_intersect < T : Idx > (
271
279
dense : & BitSet < T > ,
272
280
sparse : & SparseBitSet < T > ,
@@ -303,6 +311,10 @@ impl<T: Idx> BitRelations<HybridBitSet<T>> for BitSet<T> {
303
311
match other {
304
312
HybridBitSet :: Sparse ( sparse) => {
305
313
let ( updated, changed) = dense_sparse_intersect ( self , sparse) ;
314
+
315
+ // We can't directly assign the BitSet to the SparseBitSet, and
316
+ // doing `*self = updated.to_dense()` would cause a drop / reallocation. Instead,
317
+ // the BitSet is cleared and `updated` is copied into `self`.
306
318
self . clear ( ) ;
307
319
for elem in updated. iter ( ) {
308
320
self . insert ( * elem) ;
0 commit comments