@@ -91,6 +91,8 @@ pub trait DB {
91
91
#[ derive( Clone ) ]
92
92
pub struct WickDB < S : Storage + Clone + ' static > {
93
93
inner : Arc < DBImpl < S > > ,
94
+ shutdown_batch_processing_thread : ( Sender < ( ) > , Receiver < ( ) > ) ,
95
+ shutdown_compaction_thread : ( Sender < ( ) > , Receiver < ( ) > ) ,
94
96
}
95
97
96
98
pub type WickDBIterator < S > = DBIterator <
@@ -152,15 +154,19 @@ impl<S: Storage + Clone> DB for WickDB<S> {
152
154
153
155
fn close ( & mut self ) -> Result < ( ) > {
154
156
self . inner . is_shutting_down . store ( true , Ordering :: Release ) ;
155
- match & self . inner . db_lock {
156
- Some ( lock) => lock. unlock ( ) ,
157
- None => Ok ( ( ) ) ,
158
- }
157
+ self . inner . schedule_close_batch ( ) ;
158
+ let _ = self . shutdown_batch_processing_thread . 1 . recv ( ) ;
159
+ // Send a signal to avoid blocking forever
160
+ let _ = self . inner . do_compaction . 0 . send ( ( ) ) ;
161
+ let _ = self . shutdown_compaction_thread . 1 . recv ( ) ;
162
+ self . inner . close ( ) ?;
163
+ debug ! ( "DB {} closed" , & self . inner. db_name) ;
164
+ Ok ( ( ) )
159
165
}
160
166
161
167
fn destroy ( & mut self ) -> Result < ( ) > {
162
168
let db = self . inner . clone ( ) ;
163
- db . is_shutting_down . store ( true , Ordering :: Release ) ;
169
+ self . close ( ) ? ;
164
170
db. env . remove_dir ( & db. db_name , true )
165
171
}
166
172
@@ -173,6 +179,7 @@ impl<S: Storage + Clone> WickDB<S> {
173
179
/// Create a new WickDB
174
180
pub fn open_db ( mut options : Options , db_name : & ' static str , storage : S ) -> Result < Self > {
175
181
options. initialize ( db_name. to_owned ( ) , & storage) ;
182
+ debug ! ( "Open db: '{}'" , db_name) ;
176
183
let mut db = DBImpl :: new ( options, db_name, storage) ;
177
184
let ( mut edit, should_save_manifest) = db. recover ( ) ?;
178
185
let mut versions = db. versions . lock ( ) . unwrap ( ) ;
@@ -188,16 +195,20 @@ impl<S: Storage + Clone> WickDB<S> {
188
195
if should_save_manifest {
189
196
edit. set_prev_log_number ( 0 ) ;
190
197
edit. set_log_number ( versions. log_number ( ) ) ;
198
+ dbg ! ( "log_and_apply in open_db" ) ;
191
199
versions. log_and_apply ( & mut edit) ?;
192
200
}
193
201
194
202
let current = versions. current ( ) ;
195
203
db. delete_obsolete_files ( versions) ;
196
204
let wick_db = WickDB {
197
205
inner : Arc :: new ( db) ,
206
+ shutdown_batch_processing_thread : crossbeam_channel:: bounded ( 1 ) ,
207
+ shutdown_compaction_thread : crossbeam_channel:: bounded ( 1 ) ,
198
208
} ;
199
209
wick_db. process_compaction ( ) ;
200
210
wick_db. process_batch ( ) ;
211
+ // Schedule a compaction to current version for potential unfinished work
201
212
wick_db. inner . maybe_schedule_compaction ( current) ;
202
213
Ok ( wick_db)
203
214
}
@@ -223,9 +234,17 @@ impl<S: Storage + Clone> WickDB<S> {
223
234
// 5. Update sequence of version set
224
235
fn process_batch ( & self ) {
225
236
let db = self . inner . clone ( ) ;
237
+ let shutdown = self . shutdown_batch_processing_thread . 0 . clone ( ) ;
226
238
thread:: spawn ( move || {
227
239
loop {
228
240
if db. is_shutting_down . load ( Ordering :: Acquire ) {
241
+ // Cleanup all the batch queue
242
+ let mut queue = db. batch_queue . lock ( ) . unwrap ( ) ;
243
+ while let Some ( batch) = queue. pop_front ( ) {
244
+ let _ = batch. signal . send ( Err ( Error :: DBClosed (
245
+ "DB is closing. Clean up all the batch in queue" . to_owned ( ) ,
246
+ ) ) ) ;
247
+ }
229
248
break ;
230
249
}
231
250
let first = {
@@ -236,6 +255,9 @@ impl<S: Storage + Clone> WickDB<S> {
236
255
}
237
256
queue. pop_front ( ) . unwrap ( )
238
257
} ;
258
+ if first. stop_process {
259
+ break ;
260
+ }
239
261
let force = first. force_mem_compaction ;
240
262
// TODO: The VersionSet is locked when processing `make_room_for_write`
241
263
match db. make_room_for_write ( force) {
@@ -313,13 +335,16 @@ impl<S: Storage + Clone> WickDB<S> {
313
335
}
314
336
}
315
337
}
338
+ shutdown. send ( ( ) ) . unwrap ( ) ;
339
+ debug ! ( "batch processing thread shut down" ) ;
316
340
} ) ;
317
341
}
318
342
319
343
// Process a compaction work when receiving the signal.
320
344
// The compaction might run recursively since we produce new table files.
321
345
fn process_compaction ( & self ) {
322
346
let db = self . inner . clone ( ) ;
347
+ let shutdown = self . shutdown_compaction_thread . 0 . clone ( ) ;
323
348
thread:: spawn ( move || {
324
349
while let Ok ( ( ) ) = db. do_compaction . 1 . recv ( ) {
325
350
if db. is_shutting_down . load ( Ordering :: Acquire ) {
@@ -338,6 +363,8 @@ impl<S: Storage + Clone> WickDB<S> {
338
363
let current = db. versions . lock ( ) . unwrap ( ) . current ( ) ;
339
364
db. maybe_schedule_compaction ( current) ;
340
365
}
366
+ shutdown. send ( ( ) ) . unwrap ( ) ;
367
+ debug ! ( "compaction thread shut down" ) ;
341
368
} ) ;
342
369
}
343
370
}
@@ -389,9 +416,18 @@ unsafe impl<S: Storage + Clone> Send for DBImpl<S> {}
389
416
impl < S : Storage + Clone > Drop for DBImpl < S > {
390
417
#[ allow( unused_must_use) ]
391
418
fn drop ( & mut self ) {
419
+ if !self . is_shutting_down . load ( Ordering :: Acquire ) {
420
+ let _ = self . close ( ) ;
421
+ }
422
+ }
423
+ }
424
+
425
+ impl < S : Storage + Clone > DBImpl < S > {
426
+ fn close ( & self ) -> Result < ( ) > {
392
427
self . is_shutting_down . store ( true , Ordering :: Release ) ;
393
- if let Some ( lock) = self . db_lock . as_ref ( ) {
394
- lock. unlock ( ) ;
428
+ match & self . db_lock {
429
+ Some ( lock) => lock. unlock ( ) ,
430
+ None => Ok ( ( ) ) ,
395
431
}
396
432
}
397
433
}
@@ -492,13 +528,16 @@ impl<S: Storage + Clone + 'static> DBImpl<S> {
492
528
new_db. set_log_number ( 0 ) ;
493
529
new_db. set_next_file ( 2 ) ;
494
530
new_db. set_last_sequence ( 0 ) ;
531
+ // Create manifest
495
532
let manifest_filenum = 1 ;
496
533
let manifest_filename =
497
534
generate_filename ( self . db_name , FileType :: Manifest , manifest_filenum) ;
535
+ debug ! ( "Create manifest file: {}" , & manifest_filename) ;
498
536
let manifest = self . env . create ( manifest_filename. as_str ( ) ) ?;
499
537
let mut manifest_writer = Writer :: new ( manifest) ;
500
538
let mut record = vec ! [ ] ;
501
539
new_db. encode_to ( & mut record) ;
540
+ debug ! ( "Append manifest record: {:?}" , & new_db) ;
502
541
match manifest_writer. add_record ( & record) {
503
542
Ok ( ( ) ) => update_current ( & self . env , self . db_name , manifest_filenum) ?,
504
543
Err ( e) => {
@@ -530,7 +569,7 @@ impl<S: Storage + Clone + 'static> DBImpl<S> {
530
569
let prev_log = versions. prev_log_number ( ) ;
531
570
let all_files = self . env . list ( self . db_name ) ?;
532
571
let mut logs_to_recover = vec ! [ ] ;
533
- for filename in all_files. iter ( ) {
572
+ for filename in all_files {
534
573
if let Some ( ( file_type, file_number) ) = parse_filename ( filename) {
535
574
if file_type == FileType :: Log && ( file_number >= min_log || file_number == prev_log)
536
575
{
@@ -701,6 +740,20 @@ impl<S: Storage + Clone + 'static> DBImpl<S> {
701
740
}
702
741
}
703
742
743
+ // Schedule a WriteBatch to close batch processing thread for gracefully shutting down db
744
+ fn schedule_close_batch ( & self ) {
745
+ let ( send, _) = crossbeam_channel:: bounded ( 0 ) ;
746
+ let task = BatchTask {
747
+ stop_process : true ,
748
+ force_mem_compaction : false ,
749
+ batch : WriteBatch :: default ( ) ,
750
+ signal : send,
751
+ options : WriteOptions :: default ( ) ,
752
+ } ;
753
+ self . batch_queue . lock ( ) . unwrap ( ) . push_back ( task) ;
754
+ self . process_batch_sem . notify_all ( ) ;
755
+ }
756
+
704
757
// Schedule the WriteBatch and wait for the result from the receiver.
705
758
// This function wakes up the thread in `process_batch`.
706
759
// An empty `WriteBatch` will trigger a force memtable compaction.
@@ -718,6 +771,7 @@ impl<S: Storage + Clone + 'static> DBImpl<S> {
718
771
}
719
772
let ( send, recv) = crossbeam_channel:: bounded ( 0 ) ;
720
773
let task = BatchTask {
774
+ stop_process : false ,
721
775
force_mem_compaction,
722
776
batch,
723
777
signal : send,
@@ -747,7 +801,8 @@ impl<S: Storage + Clone + 'static> DBImpl<S> {
747
801
// Group several batches from queue
748
802
while !queue. is_empty ( ) {
749
803
let current = queue. pop_front ( ) . unwrap ( ) ;
750
- if current. options . sync && !grouped. options . sync {
804
+ if current. stop_process || ( current. options . sync && !grouped. options . sync ) {
805
+ // Do not include a stop process batch
751
806
// Do not include a sync write into a batch handled by a non-sync write.
752
807
queue. push_front ( current) ;
753
808
break ;
@@ -1291,6 +1346,7 @@ impl<S: Storage + Clone + 'static> DBImpl<S> {
1291
1346
1292
1347
// A wrapper struct for scheduling `WriteBatch`
1293
1348
struct BatchTask {
1349
+ stop_process : bool , // flag for shutdown the batch processing thread gracefully
1294
1350
force_mem_compaction : bool ,
1295
1351
batch : WriteBatch ,
1296
1352
signal : Sender < Result < ( ) > > ,
@@ -1418,7 +1474,7 @@ mod tests {
1418
1474
}
1419
1475
1420
1476
fn new_test_options ( o : TestOption ) -> Options {
1421
- match o {
1477
+ let opt = match o {
1422
1478
TestOption :: Default => Options :: default ( ) ,
1423
1479
TestOption :: Reuse => {
1424
1480
let mut o = Options :: default ( ) ;
@@ -1436,10 +1492,12 @@ mod tests {
1436
1492
o. compression = CompressionType :: NoCompression ;
1437
1493
o
1438
1494
}
1439
- }
1495
+ } ;
1496
+ opt
1440
1497
}
1441
1498
struct DBTest {
1442
- store : MemStorage , // With the same db's inner storage
1499
+ store : MemStorage , // Used as the db's inner storage
1500
+ opt : Options , // Used as the db's options
1443
1501
db : WickDB < MemStorage > ,
1444
1502
}
1445
1503
@@ -1477,8 +1535,16 @@ mod tests {
1477
1535
fn new ( opt : Options ) -> Self {
1478
1536
let store = MemStorage :: default ( ) ;
1479
1537
let name = "db_test" ;
1480
- let db = WickDB :: open_db ( opt, name, store. clone ( ) ) . unwrap ( ) ;
1481
- DBTest { store, db }
1538
+ let db = WickDB :: open_db ( opt. clone ( ) , name, store. clone ( ) ) . unwrap ( ) ;
1539
+ DBTest { store, opt, db }
1540
+ }
1541
+
1542
+ // Close the inner db without destroy the contents and establish a new WickDB on same db path with same option
1543
+ fn reopen ( & mut self ) -> Result < ( ) > {
1544
+ self . db . close ( ) ?;
1545
+ let db = WickDB :: open_db ( self . opt . clone ( ) , self . db . inner . db_name , self . store . clone ( ) ) ?;
1546
+ self . db = db;
1547
+ Ok ( ( ) )
1482
1548
}
1483
1549
1484
1550
// Put entries with default `WriteOptions`
@@ -1641,8 +1707,8 @@ mod tests {
1641
1707
let store = MemStorage :: default ( ) ;
1642
1708
let name = "db_test" ;
1643
1709
let opt = new_test_options ( TestOption :: Default ) ;
1644
- let db = WickDB :: open_db ( opt, name, store. clone ( ) ) . unwrap ( ) ;
1645
- DBTest { store, db }
1710
+ let db = WickDB :: open_db ( opt. clone ( ) , name, store. clone ( ) ) . unwrap ( ) ;
1711
+ DBTest { store, opt , db }
1646
1712
}
1647
1713
}
1648
1714
@@ -2064,4 +2130,30 @@ mod tests {
2064
2130
assert_iter_entry ( & iter, "a" , "va" ) ;
2065
2131
}
2066
2132
}
2133
+
2134
+ #[ test]
2135
+ fn test_reopen_with_empty_db ( ) {
2136
+ for mut t in default_cases ( ) {
2137
+ t. reopen ( ) . unwrap ( ) ;
2138
+ t. reopen ( ) . unwrap ( ) ;
2139
+ }
2140
+ }
2141
+
2142
+ #[ test]
2143
+ fn test_recover_with_entries ( ) {
2144
+ for mut t in default_cases ( ) {
2145
+ t. put_entries ( vec ! [ ( "foo" , "v1" ) , ( "baz" , "v5" ) ] ) ;
2146
+ t. reopen ( ) . unwrap ( ) ;
2147
+ assert_eq ! ( t. get( "foo" , None ) . unwrap( ) , "v1" ) ;
2148
+ assert_eq ! ( t. get( "baz" , None ) . unwrap( ) , "v5" ) ;
2149
+
2150
+ t. put_entries ( vec ! [ ( "bar" , "v2" ) , ( "foo" , "v3" ) ] ) ;
2151
+ t. reopen ( ) . unwrap ( ) ;
2152
+ assert_eq ! ( t. get( "foo" , None ) . unwrap( ) , "v3" ) ;
2153
+ t. put ( "foo" , "v4" ) . unwrap ( ) ;
2154
+ assert_eq ! ( t. get( "bar" , None ) . unwrap( ) , "v2" ) ;
2155
+ assert_eq ! ( t. get( "foo" , None ) . unwrap( ) , "v4" ) ;
2156
+ assert_eq ! ( t. get( "baz" , None ) . unwrap( ) , "v5" ) ;
2157
+ }
2158
+ }
2067
2159
}
0 commit comments