@@ -17,6 +17,7 @@ use crate::{
17
17
io:: { self , page_pool:: FatPage , IoCommand , IoHandle , IoKind , PagePool , PAGE_SIZE } ,
18
18
page_cache:: { Page , PageCache } ,
19
19
store:: { BucketInfo , DirtyPage } ,
20
+ task:: { join_task, spawn_task, TaskResult } ,
20
21
} ;
21
22
22
23
use self :: { ht_file:: HTOffsets , meta_map:: MetaMap } ;
@@ -29,26 +30,18 @@ mod meta_map;
29
30
mod wal;
30
31
pub ( crate ) mod writeout;
31
32
32
- /// An error that can happen during bitbox's sync.
33
- #[ derive( Debug ) ]
34
- pub enum SyncError {
35
- /// During assigning a bucket to a page, the allocator gave up, meaning that the occupancy rate
36
- /// is too high.
37
- BucketExhaustion ,
38
- /// An error occurred while writing to the WAL file.
39
- WalWrite ( std:: io:: Error ) ,
40
- }
33
+ /// During assigning a bucket to a page, the allocator gave up, meaning that the occupancy rate
34
+ /// is too high.
35
+ #[ derive( fmt:: Debug ) ]
36
+ pub struct BucketExhaustion ;
41
37
42
- impl fmt:: Display for SyncError {
38
+ impl fmt:: Display for BucketExhaustion {
43
39
fn fmt ( & self , f : & mut fmt:: Formatter < ' _ > ) -> fmt:: Result {
44
- match self {
45
- SyncError :: BucketExhaustion => write ! ( f, "bucket exhaustion" ) ,
46
- SyncError :: WalWrite ( e) => write ! ( f, "wal write error: {}" , e) ,
47
- }
40
+ write ! ( f, "bucket exhaustion" )
48
41
}
49
42
}
50
43
51
- impl std:: error:: Error for SyncError { }
44
+ impl std:: error:: Error for BucketExhaustion { }
52
45
53
46
/// The index of a bucket within the map.
54
47
#[ derive( Debug , Clone , Copy , PartialEq , Eq ) ]
@@ -189,7 +182,7 @@ impl DB {
189
182
Vec < ( u64 , Arc < FatPage > ) > ,
190
183
Vec < ( PageId , Option < ( Page , BucketIndex ) > ) > ,
191
184
) ,
192
- SyncError ,
185
+ BucketExhaustion ,
193
186
> {
194
187
wal_blob_builder. reset ( sync_seqn) ;
195
188
@@ -224,15 +217,15 @@ impl DB {
224
217
BucketInfo :: Known ( bucket) => ( false , bucket) ,
225
218
BucketInfo :: FreshWithNoDependents => {
226
219
let bucket = allocate_bucket ( & page_id, & mut meta_map, & self . shared . seed )
227
- . ok_or ( SyncError :: BucketExhaustion ) ?;
220
+ . ok_or ( BucketExhaustion ) ?;
228
221
( true , bucket)
229
222
}
230
223
BucketInfo :: FreshOrDependent ( maybe_bucket) => match maybe_bucket. get ( ) {
231
224
Some ( bucket) => ( false , bucket) ,
232
225
None => {
233
226
let bucket =
234
227
allocate_bucket ( & page_id, & mut meta_map, & self . shared . seed )
235
- . ok_or ( SyncError :: BucketExhaustion ) ?;
228
+ . ok_or ( BucketExhaustion ) ?;
236
229
// Propagate changes to dependents.
237
230
maybe_bucket. set ( bucket) ;
238
231
( true , bucket)
@@ -302,20 +295,27 @@ impl DB {
302
295
pub struct SyncController {
303
296
db : DB ,
304
297
/// The channel to send the result of the pre-meta sync errors. Option is to allow `take`.
305
- pre_meta_result_tx : Option < Sender < Result < ( ) , SyncError > > > ,
298
+ pre_meta_result_tx : Option < Sender < TaskResult < std :: io :: Result < ( ) > > > > ,
306
299
/// he channel to receive the result of the pre-meta sync errors.
307
- pre_meta_result_rx : Receiver < Result < ( ) , SyncError > > ,
300
+ pre_meta_result_rx : Receiver < TaskResult < std:: io:: Result < ( ) > > > ,
301
+ /// The channel to send the result of the begin_sync task. Option is to allow `take`.
302
+ begin_sync_result_tx : Option < Sender < TaskResult < Result < ( ) , BucketExhaustion > > > > ,
303
+ /// The channel to receive the result of the the begin_sync task.
304
+ begin_sync_result_rx : Receiver < TaskResult < Result < ( ) , BucketExhaustion > > > ,
308
305
/// The pages along with their page numbers to write out to the HT file.
309
306
ht_to_write : Arc < Mutex < Option < Vec < ( u64 , Arc < FatPage > ) > > > > ,
310
307
}
311
308
312
309
impl SyncController {
313
310
fn new ( db : DB ) -> Self {
314
311
let ( pre_meta_result_tx, pre_meta_result_rx) = crossbeam_channel:: bounded ( 1 ) ;
312
+ let ( begin_sync_result_tx, begin_sync_result_rx) = crossbeam_channel:: bounded ( 1 ) ;
315
313
Self {
316
314
db,
317
315
pre_meta_result_tx : Some ( pre_meta_result_tx) ,
318
316
pre_meta_result_rx,
317
+ begin_sync_result_tx : Some ( begin_sync_result_tx) ,
318
+ begin_sync_result_rx,
319
319
ht_to_write : Arc :: new ( Mutex :: new ( None ) ) ,
320
320
}
321
321
}
@@ -335,25 +335,17 @@ impl SyncController {
335
335
let wal_blob_builder = self . db . shared . wal_blob_builder . clone ( ) ;
336
336
// UNWRAP: safe because begin_sync is called only once.
337
337
let pre_meta_result_tx = self . pre_meta_result_tx . take ( ) . unwrap ( ) ;
338
- self . db . shared . sync_tp . execute ( move || {
338
+ let begin_sync_task = move || {
339
339
let mut wal_blob_builder = wal_blob_builder. lock ( ) ;
340
- let ( ht_pages, cache_updates) = match bitbox. prepare_sync (
340
+
341
+ // if fails The sync coordinator will poison the database and all further commits will
342
+ // be rejected. Therefore, there is no need to perform cleanup.
343
+ let ( ht_pages, cache_updates) = bitbox. prepare_sync (
341
344
sync_seqn,
342
345
& page_pool,
343
346
updated_pages,
344
347
& mut * wal_blob_builder,
345
- ) {
346
- Ok ( v) => v,
347
- Err ( SyncError :: BucketExhaustion ) => {
348
- // Bail the commit.
349
- //
350
- // The sync coordinator will poison the database and all further commits will
351
- // be rejected. Therefore, there is no need to perform cleanup.
352
- let _ = pre_meta_result_tx. send ( Err ( SyncError :: BucketExhaustion ) ) ;
353
- return ;
354
- }
355
- Err ( SyncError :: WalWrite ( _) ) => unreachable ! ( ) ,
356
- } ;
348
+ ) ?;
357
349
drop ( wal_blob_builder) ;
358
350
359
351
// Set the hash-table pages before spawning WAL writeout so they don't race with it.
@@ -364,31 +356,38 @@ impl SyncController {
364
356
// evict and drop old pages outside of the critical path.
365
357
page_cache. batch_update ( cache_updates) ;
366
358
page_cache. evict ( ) ;
367
- } ) ;
359
+ Ok ( ( ) )
360
+ } ;
361
+ // UNWRAP: safe because begin_sync is called only once.
362
+ let begin_sync_result_tx = self . begin_sync_result_tx . take ( ) . unwrap ( ) ;
363
+ spawn_task (
364
+ & self . db . shared . sync_tp ,
365
+ begin_sync_task,
366
+ begin_sync_result_tx,
367
+ ) ;
368
368
}
369
369
370
- fn spawn_wal_writeout ( pre_meta_result_tx : Sender < Result < ( ) , SyncError > > , bitbox : DB ) {
370
+ fn spawn_wal_writeout ( pre_meta_result_tx : Sender < TaskResult < std :: io :: Result < ( ) > > > , bitbox : DB ) {
371
371
let bitbox = bitbox. clone ( ) ;
372
372
let tp = bitbox. shared . sync_tp . clone ( ) ;
373
- tp . execute ( move || {
373
+ let wal_writeout_task = move || {
374
374
let wal_blob_builder = bitbox. shared . wal_blob_builder . lock ( ) ;
375
375
let wal_slice = wal_blob_builder. as_slice ( ) ;
376
- let wal_result =
377
- writeout :: write_wal ( & bitbox . shared . wal_fd , wal_slice ) . map_err ( SyncError :: WalWrite ) ;
378
- let _ = pre_meta_result_tx . send ( wal_result ) ;
379
- } ) ;
376
+ writeout :: write_wal ( & bitbox . shared . wal_fd , wal_slice )
377
+ } ;
378
+
379
+ spawn_task ( & tp , wal_writeout_task , pre_meta_result_tx ) ;
380
380
}
381
381
382
382
/// Wait for the pre-meta operations to complete.
383
383
///
384
384
/// This includes WAL file to be written out.
385
385
///
386
386
/// Must be invoked by the sync thread. Blocking.
387
- pub fn wait_pre_meta ( & self ) -> Result < ( ) , SyncError > {
388
- match self . pre_meta_result_rx . recv ( ) {
389
- Ok ( wal_result) => wal_result,
390
- Err ( _) => panic ! ( "unexpected hungup" ) ,
391
- }
387
+ pub fn wait_pre_meta ( & self ) -> anyhow:: Result < ( ) > {
388
+ join_task ( & self . begin_sync_result_rx ) ?;
389
+ join_task ( & self . pre_meta_result_rx ) ?;
390
+ Ok ( ( ) )
392
391
}
393
392
394
393
/// Write out the HT pages and truncate the WAL file.
0 commit comments