@@ -14,7 +14,6 @@ use starcoin_chain_api::{
14
14
ExecutedBlock , MintedUncleNumber , TransactionInfoWithProof , VerifiedBlock , VerifyBlockField ,
15
15
} ;
16
16
use starcoin_config:: ChainNetworkID ;
17
- use starcoin_consensus:: dag:: types:: ghostdata:: GhostdagData ;
18
17
use starcoin_consensus:: { BlockDAG , Consensus , FlexiDagStorage } ;
19
18
use starcoin_crypto:: hash:: PlainCryptoHash ;
20
19
use starcoin_crypto:: HashValue ;
@@ -48,6 +47,7 @@ use std::iter::Extend;
48
47
use std:: option:: Option :: { None , Some } ;
49
48
use std:: { collections:: HashMap , sync:: Arc } ;
50
49
50
+
51
51
pub struct ChainStatusWithBlock {
52
52
pub status : ChainStatus ,
53
53
pub head : Block ,
@@ -79,6 +79,7 @@ pub struct BlockChain {
79
79
vm_metrics : Option < VMMetrics > ,
80
80
dag_accumulator : Option < MerkleAccumulator > ,
81
81
net : ChainNetworkID ,
82
+ dag : BlockDAG ,
82
83
}
83
84
84
85
impl BlockChain {
@@ -88,11 +89,18 @@ impl BlockChain {
88
89
storage : Arc < dyn Store > ,
89
90
net : ChainNetworkID ,
90
91
vm_metrics : Option < VMMetrics > ,
92
+ dag_store : FlexiDagStorage ,
93
+ dag_genesis : HashValue ,
91
94
) -> Result < Self > {
92
95
let head = storage
93
96
. get_block_by_hash ( head_block_hash) ?
94
97
. ok_or_else ( || format_err ! ( "Can not find block by hash {:?}" , head_block_hash) ) ?;
95
- Self :: new_with_uncles ( time_service, head, None , storage, net, vm_metrics)
98
+ let dag_genesis_header = storage
99
+ . get_block_header_by_hash ( head_block_hash) ?
100
+ . ok_or_else ( || format_err ! ( "Can not find block by hash {:?}" , head_block_hash) ) ?;
101
+ let mut dag = BlockDAG :: new ( dag_genesis, 8 , dag_store) ;
102
+ dag. init_with_genesis ( DagHeader :: new_genesis ( dag_genesis_header) ) ?;
103
+ Self :: new_with_uncles ( time_service, head, None , storage, net, vm_metrics, dag)
96
104
}
97
105
98
106
fn get_dag_data (
@@ -124,6 +132,7 @@ impl BlockChain {
124
132
storage : Arc < dyn Store > ,
125
133
net : ChainNetworkID ,
126
134
vm_metrics : Option < VMMetrics > ,
135
+ dag : BlockDAG ,
127
136
) -> Result < Self > {
128
137
let block_info = storage
129
138
. get_block_info ( head_block. id ( ) ) ?
@@ -178,6 +187,7 @@ impl BlockChain {
178
187
vm_metrics,
179
188
dag_accumulator,
180
189
net,
190
+ dag,
181
191
} ;
182
192
watch ( CHAIN_WATCH_NAME , "n1251" ) ;
183
193
match uncles {
@@ -194,6 +204,8 @@ impl BlockChain {
194
204
genesis_epoch : Epoch ,
195
205
genesis_block : Block ,
196
206
net : ChainNetworkID ,
207
+ dag_store : FlexiDagStorage ,
208
+ dag_genesis : HashValue ,
197
209
) -> Result < Self > {
198
210
debug_assert ! ( genesis_block. header( ) . is_genesis( ) ) ;
199
211
let txn_accumulator = MerkleAccumulator :: new_empty (
@@ -227,7 +239,7 @@ impl BlockChain {
227
239
new_tips,
228
240
dag_accumulator. get_info ( ) ,
229
241
) ?;
230
- Self :: new ( time_service, executed_block. block . id ( ) , storage, net, None )
242
+ Self :: new ( time_service, executed_block. block . id ( ) , storage, net, None , dag_store , dag_genesis )
231
243
}
232
244
233
245
pub fn current_epoch_uncles_size ( & self ) -> u64 {
@@ -405,15 +417,15 @@ impl BlockChain {
405
417
}
406
418
407
419
pub fn verify_with_verifier < V > ( & mut self , block : Block ) -> Result < VerifiedBlock >
408
- where
409
- V : BlockVerifier ,
420
+ where
421
+ V : BlockVerifier ,
410
422
{
411
423
V :: verify_block ( self , block)
412
424
}
413
425
414
426
pub fn apply_with_verifier < V > ( & mut self , block : Block ) -> Result < ExecutedBlock >
415
- where
416
- V : BlockVerifier ,
427
+ where
428
+ V : BlockVerifier ,
417
429
{
418
430
let verified_block = self . verify_with_verifier :: < V > ( block) ?;
419
431
watch ( CHAIN_WATCH_NAME , "n1" ) ;
@@ -435,6 +447,166 @@ impl BlockChain {
435
447
self . connect ( ExecutedBlock { block, block_info } )
436
448
}
437
449
450
+ fn execute_dag_block ( & self , verified_block : VerifiedBlock ) -> Result < ExecutedBlock > {
451
+ let block = verified_block. 0 ;
452
+ let blues = block. uncles ( ) . expect ( "Blue blocks must exist" ) ;
453
+ let ( selected_parent, blues) = blues. split_at ( 1 ) ;
454
+ let selected_parent = selected_parent[ 0 ] . clone ( ) ;
455
+ let block_info = self . storage . get_block_info ( selected_parent. id ( ) ) ?. expect ( "selected parent must executed" ) ;
456
+ let header = block. header ( ) ;
457
+
458
+ let block_id = header. id ( ) ;
459
+
460
+ let block_metadata = block. to_metadata ( selected_parent. gas_used ( ) ) ;
461
+ let mut transaction = vec ! [ Transaction :: BlockMetadata ( block_metadata) ] ;
462
+ for blue in blues {
463
+ let blue_block = self . storage . get_block_by_hash ( blue. parent_hash ( ) ) ?. expect ( "block blue need exist" ) ;
464
+ transaction. extend ( blue_block. transactions ( ) . iter ( ) . cloned ( ) . map ( Transaction :: UserTransaction ) )
465
+ }
466
+ transaction. extend (
467
+ block. transactions ( ) . iter ( ) . cloned ( ) . map ( Transaction :: UserTransaction ) ,
468
+ ) ;
469
+
470
+ watch ( CHAIN_WATCH_NAME , "n21" ) ;
471
+ let executed_data = starcoin_executor:: block_execute (
472
+ & self . statedb ,
473
+ transaction. clone ( ) ,
474
+ self . epoch . block_gas_limit ( ) , //TODO: Fix me
475
+ self . vm_metrics . clone ( ) ,
476
+ ) ?;
477
+ watch ( CHAIN_WATCH_NAME , "n22" ) ;
478
+ let state_root = executed_data. state_root ;
479
+ let vec_transaction_info = & executed_data. txn_infos ;
480
+ verify_block ! (
481
+ VerifyBlockField :: State ,
482
+ state_root == header. state_root( ) ,
483
+ "verify block:{:?} state_root fail" ,
484
+ block_id,
485
+ ) ;
486
+ let block_gas_used = vec_transaction_info
487
+ . iter ( )
488
+ . fold ( 0u64 , |acc, i| acc. saturating_add ( i. gas_used ( ) ) ) ;
489
+ verify_block ! (
490
+ VerifyBlockField :: State ,
491
+ block_gas_used == header. gas_used( ) ,
492
+ "invalid block: gas_used is not match"
493
+ ) ;
494
+
495
+ verify_block ! (
496
+ VerifyBlockField :: State ,
497
+ vec_transaction_info. len( ) == transaction. len( ) ,
498
+ "invalid txn num in the block"
499
+ ) ;
500
+ let txn_accumulator= info_2_accumulator (
501
+ block_info. txn_accumulator_info ,
502
+ AccumulatorStoreType :: Transaction ,
503
+ self . storage . as_ref ( ) ,
504
+ ) ;
505
+ let block_accumulator= info_2_accumulator (
506
+ block_info. block_accumulator_info ,
507
+ AccumulatorStoreType :: Block ,
508
+ self . storage . as_ref ( ) ,
509
+ ) ;
510
+ let transaction_global_index = txn_accumulator. num_leaves ( ) ;
511
+
512
+ // txn accumulator verify.
513
+ let executed_accumulator_root = {
514
+ let included_txn_info_hashes: Vec < _ > =
515
+ vec_transaction_info. iter ( ) . map ( |info| info. id ( ) ) . collect ( ) ;
516
+ txn_accumulator. append ( & included_txn_info_hashes) ?
517
+ } ;
518
+
519
+
520
+ verify_block ! (
521
+ VerifyBlockField :: State ,
522
+ executed_accumulator_root == header. txn_accumulator_root( ) ,
523
+ "verify block: txn accumulator root mismatch"
524
+ ) ;
525
+
526
+ watch ( CHAIN_WATCH_NAME , "n23" ) ;
527
+ self . statedb
528
+ . flush ( )
529
+ . map_err ( BlockExecutorError :: BlockChainStateErr ) ?;
530
+ // If chain state is matched, and accumulator is matched,
531
+ // then, we save flush states, and save block data.
532
+ watch ( CHAIN_WATCH_NAME , "n24" ) ;
533
+ txn_accumulator
534
+ . flush ( )
535
+ . map_err ( |_err| BlockExecutorError :: BlockAccumulatorFlushErr ) ?;
536
+
537
+ let pre_total_difficulty = block_info. total_difficulty ;
538
+ let total_difficulty = pre_total_difficulty + header. difficulty ( ) ;
539
+
540
+ block_accumulator. append ( & [ block_id] ) ?;
541
+ block_accumulator. flush ( ) ?;
542
+
543
+ let txn_accumulator_info: AccumulatorInfo = txn_accumulator. get_info ( ) ;
544
+ let block_accumulator_info: AccumulatorInfo = block_accumulator. get_info ( ) ;
545
+ let block_info = BlockInfo :: new (
546
+ block_id,
547
+ total_difficulty,
548
+ txn_accumulator_info,
549
+ block_accumulator_info,
550
+ ) ;
551
+
552
+ watch ( CHAIN_WATCH_NAME , "n25" ) ;
553
+
554
+ // save block's transaction relationship and save transaction
555
+
556
+ let block_id = block. id ( ) ;
557
+ let txn_infos = executed_data. txn_infos ;
558
+ let txn_events = executed_data. txn_events ;
559
+ let txn_table_infos = executed_data
560
+ . txn_table_infos
561
+ . into_iter ( )
562
+ . collect :: < Vec < _ > > ( ) ;
563
+
564
+ debug_assert ! (
565
+ txn_events. len( ) == txn_infos. len( ) ,
566
+ "events' length should be equal to txn infos' length"
567
+ ) ;
568
+ let txn_info_ids: Vec < _ > = txn_infos. iter ( ) . map ( |info| info. id ( ) ) . collect ( ) ;
569
+ for ( info_id, events) in txn_info_ids. iter ( ) . zip ( txn_events. into_iter ( ) ) {
570
+ self . storage . save_contract_events ( * info_id, events) ?;
571
+ }
572
+
573
+ self . storage . save_transaction_infos (
574
+ txn_infos
575
+ . into_iter ( )
576
+ . enumerate ( )
577
+ . map ( |( transaction_index, info) | {
578
+ RichTransactionInfo :: new (
579
+ block_id,
580
+ block. header ( ) . number ( ) ,
581
+ info,
582
+ transaction_index as u32 ,
583
+ transaction_global_index
584
+ . checked_add ( transaction_index as u64 )
585
+ . expect ( "transaction_global_index overflow." ) ,
586
+ )
587
+ } )
588
+ . collect ( ) ,
589
+ ) ?;
590
+
591
+ let txn_id_vec = transaction
592
+ . iter ( )
593
+ . map ( |user_txn| user_txn. id ( ) )
594
+ . collect :: < Vec < HashValue > > ( ) ;
595
+ // save transactions
596
+ self . storage . save_transaction_batch ( transaction) ?;
597
+
598
+ // save block's transactions
599
+ self . storage . save_block_transaction_ids ( block_id, txn_id_vec) ?;
600
+ self . storage . save_block_txn_info_ids ( block_id, txn_info_ids) ?;
601
+ self . storage . commit_block ( block. clone ( ) ) ?;
602
+ self . storage . save_block_info ( block_info. clone ( ) ) ?;
603
+
604
+ self . storage . save_table_infos ( txn_table_infos) ?;
605
+
606
+ watch ( CHAIN_WATCH_NAME , "n26" ) ;
607
+ Ok ( ExecutedBlock { block, block_info } )
608
+ }
609
+
438
610
//TODO consider move this logic to BlockExecutor
439
611
fn execute_block_and_save (
440
612
storage : & dyn Store ,
@@ -682,9 +854,9 @@ impl BlockChain {
682
854
683
855
return Ok ( next_tips_info
684
856
== self
685
- . dag_accumulator
686
- . as_ref ( )
687
- . map ( |accumulator| accumulator. get_info ( ) ) ) ;
857
+ . dag_accumulator
858
+ . as_ref ( )
859
+ . map ( |accumulator| accumulator. get_info ( ) ) ) ;
688
860
}
689
861
}
690
862
@@ -909,6 +1081,7 @@ impl ChainReader for BlockChain {
909
1081
self . storage . clone ( ) ,
910
1082
self . net . clone ( ) ,
911
1083
self . vm_metrics . clone ( ) ,
1084
+ self . dag . clone ( ) ,
912
1085
//TODO: check missing blocks need to be clean
913
1086
)
914
1087
}
@@ -957,6 +1130,7 @@ impl ChainReader for BlockChain {
957
1130
)
958
1131
}
959
1132
1133
+
960
1134
fn get_transaction_infos (
961
1135
& self ,
962
1136
start_index : u64 ,
@@ -1157,6 +1331,12 @@ impl BlockChain {
1157
1331
}
1158
1332
Ok ( event_with_infos)
1159
1333
}
1334
+
1335
+ fn connect_dag ( & mut self , executed_block : ExecutedBlock ) -> Result < ExecutedBlock > {
1336
+ let ( block, block_info) = ( executed_block. block ( ) , executed_block. block_info ( ) ) ;
1337
+
1338
+ Ok ( executed_block)
1339
+ }
1160
1340
}
1161
1341
1162
1342
impl ChainWriter for BlockChain {
@@ -1172,10 +1352,12 @@ impl ChainWriter for BlockChain {
1172
1352
. expect ( "dag blocks must have tips" )
1173
1353
. clone ( ) ,
1174
1354
)
1175
- . expect ( "failed to calculate the tips hash" )
1355
+ . expect ( "failed to calculate the tips hash" )
1176
1356
== executed_block. block ( ) . header ( ) . parent_hash ( ) ;
1177
1357
}
1178
1358
}
1359
+
1360
+
1179
1361
fn connect ( & mut self , executed_block : ExecutedBlock ) -> Result < ExecutedBlock > {
1180
1362
let ( block, block_info) = ( executed_block. block ( ) , executed_block. block_info ( ) ) ;
1181
1363
if self . status . status . tips_hash . is_some ( ) {
0 commit comments