84
84
85
85
errInsertionInterrupted = errors .New ("insertion is interrupted" )
86
86
errChainStopped = errors .New ("blockchain is stopped" )
87
+ errInvalidOldChain = errors .New ("invalid old chain" )
88
+ errInvalidNewChain = errors .New ("invalid new chain" )
87
89
88
90
CheckpointCh = make (chan int )
89
91
)
@@ -1476,7 +1478,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
1476
1478
if reorg {
1477
1479
// Reorganise the chain if the parent is not the head block
1478
1480
if block .ParentHash () != currentBlock .Hash () {
1479
- if err := bc .reorg (currentBlock , block ); err != nil {
1481
+ if err := bc .reorg (currentBlock . Header () , block . Header () ); err != nil {
1480
1482
return NonStatTy , err
1481
1483
}
1482
1484
}
@@ -1491,9 +1493,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
1491
1493
bc .writeHeadBlock (block , false )
1492
1494
// prepare set of masternodes for the next epoch
1493
1495
if bc .chainConfig .XDPoS != nil && ((block .NumberU64 () % bc .chainConfig .XDPoS .Epoch ) == (bc .chainConfig .XDPoS .Epoch - bc .chainConfig .XDPoS .Gap )) {
1494
- err := bc .UpdateM1 ()
1495
- if err != nil {
1496
- log .Crit ("Error when update masternodes set. Stopping node" , "err" , err , "blockNum" , block .NumberU64 ())
1496
+ if err := bc .UpdateM1 (); err != nil {
1497
+ log .Crit ("Fail to update masternodes during writeBlockWithState" , "number" , block .Number , "hash" , block .Hash ().Hex (), "err" , err )
1497
1498
}
1498
1499
}
1499
1500
}
@@ -2275,153 +2276,223 @@ func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
2275
2276
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
2276
2277
// blocks and inserts them to be part of the new canonical chain and accumulates
2277
2278
// potential missing transactions and post an event about them.
2278
- func (bc * BlockChain ) reorg (oldBlock , newBlock * types.Block ) error {
2279
+ func (bc * BlockChain ) reorg (oldHead , newHead * types.Header ) error {
2280
+ log .Warn ("Reorg" , "OldHash" , oldHead .Hash ().Hex (), "OldNum" , oldHead .Number , "NewHash" , newHead .Hash ().Hex (), "NewNum" , newHead .Number )
2281
+
2279
2282
var (
2280
- newChain types.Blocks
2281
- oldChain types.Blocks
2282
- commonBlock * types.Block
2283
- deletedTxs types.Transactions
2284
- addedTxs types.Transactions
2285
- deletedLogs []* types.Log
2283
+ newChain []* types.Header
2284
+ oldChain []* types.Header
2285
+ commonBlock * types.Header
2286
2286
)
2287
- log .Warn ("Reorg" , "oldBlock hash" , oldBlock .Hash ().Hex (), "number" , oldBlock .NumberU64 (), "newBlock hash" , newBlock .Hash ().Hex (), "number" , newBlock .NumberU64 ())
2288
-
2289
- // first reduce whoever is higher bound
2290
- if oldBlock .NumberU64 () > newBlock .NumberU64 () {
2291
- // reduce old chain
2292
- for ; oldBlock != nil && oldBlock .NumberU64 () != newBlock .NumberU64 (); oldBlock = bc .GetBlock (oldBlock .ParentHash (), oldBlock .NumberU64 ()- 1 ) {
2293
- oldChain = append (oldChain , oldBlock )
2294
- deletedTxs = append (deletedTxs , oldBlock .Transactions ()... )
2295
- if logs := bc .collectLogs (oldBlock , true ); len (logs ) > 0 {
2296
- deletedLogs = append (deletedLogs , logs ... )
2297
- }
2287
+
2288
+ // Reduce the longer chain to the same number as the shorter one
2289
+ if oldHead .Number .Uint64 () > newHead .Number .Uint64 () {
2290
+ // Old chain is longer, gather all transactions and logs as deleted ones
2291
+ for ; oldHead != nil && oldHead .Number .Uint64 () != newHead .Number .Uint64 (); oldHead = bc .GetHeader (oldHead .ParentHash , oldHead .Number .Uint64 ()- 1 ) {
2292
+ oldChain = append (oldChain , oldHead )
2298
2293
}
2299
2294
} else {
2300
- // reduce new chain and append new chain blocks for inserting later on
2301
- for ; newBlock != nil && newBlock . NumberU64 () != oldBlock . NumberU64 (); newBlock = bc .GetBlock ( newBlock .ParentHash (), newBlock . NumberU64 ()- 1 ) {
2302
- newChain = append (newChain , newBlock )
2295
+ // New chain is longer, stash all blocks away for subsequent insertion
2296
+ for ; newHead != nil && newHead . Number . Uint64 () != oldHead . Number . Uint64 (); newHead = bc .GetHeader ( newHead .ParentHash , newHead . Number . Uint64 ()- 1 ) {
2297
+ newChain = append (newChain , newHead )
2303
2298
}
2304
2299
}
2305
- if oldBlock == nil {
2306
- return errors . New ( "invalid old chain" )
2300
+ if oldHead == nil {
2301
+ return errInvalidOldChain
2307
2302
}
2308
- if newBlock == nil {
2309
- return errors . New ( "invalid new chain" )
2303
+ if newHead == nil {
2304
+ return errInvalidNewChain
2310
2305
}
2311
2306
2307
+ // Both sides of the reorg are at the same number, reduce both until the common
2308
+ // ancestor is found
2312
2309
for {
2313
- if oldBlock .Hash () == newBlock .Hash () {
2314
- commonBlock = oldBlock
2310
+ // If the common ancestor was found, bail out
2311
+ if oldHead .Hash () == newHead .Hash () {
2312
+ commonBlock = oldHead
2315
2313
break
2316
2314
}
2315
+ // Remove an old block as well as stash away a new block
2316
+ oldChain = append (oldChain , oldHead )
2317
+ newChain = append (newChain , newHead )
2317
2318
2318
- oldChain = append (oldChain , oldBlock )
2319
- newChain = append (newChain , newBlock )
2320
- deletedTxs = append (deletedTxs , oldBlock .Transactions ()... )
2321
- if logs := bc .collectLogs (oldBlock , true ); len (logs ) > 0 {
2322
- deletedLogs = append (deletedLogs , logs ... )
2319
+ // Step back with both chains
2320
+ oldHead = bc .GetHeader (oldHead .ParentHash , oldHead .Number .Uint64 ()- 1 )
2321
+ if oldHead == nil {
2322
+ return errInvalidOldChain
2323
2323
}
2324
-
2325
- oldBlock , newBlock = bc .GetBlock (oldBlock .ParentHash (), oldBlock .NumberU64 ()- 1 ), bc .GetBlock (newBlock .ParentHash (), newBlock .NumberU64 ()- 1 )
2326
- if oldBlock == nil {
2327
- return errors .New ("invalid old chain" )
2328
- }
2329
- if newBlock == nil {
2330
- return errors .New ("invalid new chain" )
2324
+ newHead = bc .GetHeader (newHead .ParentHash , newHead .Number .Uint64 ()- 1 )
2325
+ if newHead == nil {
2326
+ return errInvalidNewChain
2331
2327
}
2332
2328
}
2333
2329
2334
2330
// Ensure XDPoS engine committed block will be not reverted
2335
2331
if xdpos , ok := bc .Engine ().(* XDPoS.XDPoS ); ok {
2336
2332
latestCommittedBlock := xdpos .EngineV2 .GetLatestCommittedBlockInfo ()
2337
2333
if latestCommittedBlock != nil {
2338
- currentBlock := bc .CurrentBlock ()
2339
- currentBlock .Number ().Cmp (latestCommittedBlock .Number )
2340
- cmp := commonBlock .Number ().Cmp (latestCommittedBlock .Number )
2334
+ cmp := commonBlock .Number .Cmp (latestCommittedBlock .Number )
2341
2335
if cmp < 0 {
2342
2336
for _ , oldBlock := range oldChain {
2343
- if oldBlock .Number () .Cmp (latestCommittedBlock .Number ) == 0 {
2337
+ if oldBlock .Number .Cmp (latestCommittedBlock .Number ) == 0 {
2344
2338
if oldBlock .Hash () != latestCommittedBlock .Hash {
2345
- log .Error ("Impossible reorg, please file an issue" , "oldnum " , oldBlock .Number () , "oldhash " , oldBlock .Hash (), "committed hash " , latestCommittedBlock .Hash )
2339
+ log .Error ("Impossible reorg, please file an issue" , "OldNum " , oldBlock .Number , "OldHash " , oldBlock .Hash (). Hex () , "LatestCommittedHash " , latestCommittedBlock .Hash . Hex () )
2346
2340
} else {
2347
- log .Warn ("Stop reorg, blockchain is under forking attack" , "old committed num " , oldBlock .Number () , "old committed hash " , oldBlock .Hash ())
2348
- return fmt .Errorf ("stop reorg, blockchain is under forking attack. old committed num %d, hash %x " , oldBlock .Number () , oldBlock .Hash ())
2341
+ log .Warn ("Stop reorg, blockchain is under forking attack" , "OldCommittedNum " , oldBlock .Number , "OldCommittedHash " , oldBlock .Hash (). Hex ())
2342
+ return fmt .Errorf ("stop reorg, blockchain is under forking attack. OldCommitted num %d, hash %s " , oldBlock .Number , oldBlock .Hash (). Hex ())
2349
2343
}
2350
2344
}
2351
2345
}
2352
2346
} else if cmp == 0 {
2353
2347
if commonBlock .Hash () != latestCommittedBlock .Hash {
2354
- log .Error ("Impossible reorg, please file an issue" , "oldnum " , commonBlock .Number (), "oldhash " , commonBlock .Hash (), "committed hash " , latestCommittedBlock .Hash )
2348
+ log .Error ("Impossible reorg, please file an issue" , "OldNum " , commonBlock .Number . Uint64 (), "OldHash " , commonBlock .Hash (). Hex () , "LatestCommittedHash " , latestCommittedBlock .Hash . Hex () )
2355
2349
}
2356
2350
}
2357
2351
}
2358
2352
}
2359
2353
2360
2354
// Ensure the user sees large reorgs
2361
2355
if len (oldChain ) > 0 && len (newChain ) > 0 {
2362
- logFn := log .Warn
2356
+ logFn := log .Info
2357
+ msg := "Chain reorg detected"
2363
2358
if len (oldChain ) > 63 {
2359
+ msg = "Large chain reorg detected"
2364
2360
logFn = log .Warn
2365
2361
}
2366
- logFn ("Chain split detected" , "number" , commonBlock .Number () , "hash" , commonBlock .Hash (),
2367
- "drop" , len (oldChain ), "dropfrom" , oldChain [0 ].Hash (), "add" , len (newChain ), "addfrom" , newChain [0 ].Hash ())
2362
+ logFn (msg , "number" , commonBlock .Number , "hash" , commonBlock .Hash (). Hex (),
2363
+ "drop" , len (oldChain ), "dropfrom" , oldChain [0 ].Hash (). Hex () , "add" , len (newChain ), "addfrom" , newChain [0 ].Hash (). Hex ())
2368
2364
blockReorgAddMeter .Mark (int64 (len (newChain )))
2369
2365
blockReorgDropMeter .Mark (int64 (len (oldChain )))
2370
2366
blockReorgMeter .Mark (1 )
2367
+ } else if len (newChain ) > 0 {
2368
+ // Special case happens in the post merge stage that current head is
2369
+ // the ancestor of new head while these two blocks are not consecutive
2370
+ log .Info ("Extend chain" , "add" , len (newChain ), "number" , newChain [0 ].Number , "hash" , newChain [0 ].Hash ())
2371
+ blockReorgAddMeter .Mark (int64 (len (newChain )))
2371
2372
} else {
2372
- log .Error ("Impossible reorg, please file an issue" , "oldnum" , oldBlock .Number (), "oldhash" , oldBlock .Hash (), "newnum" , newBlock .Number (), "newhash" , newBlock .Hash ())
2373
+ // len(newChain) == 0 && len(oldChain) > 0
2374
+ // rewind the canonical chain to a lower point.
2375
+ log .Error ("Impossible reorg, please file an issue" , "oldnum" , oldHead .Number , "oldhash" , oldHead .Hash (), "oldblocks" , len (oldChain ), "newnum" , newHead .Number , "newhash" , newHead .Hash (), "newblocks" , len (newChain ))
2373
2376
}
2374
2377
2375
- // Insert the new chain(except the head block(reverse order)),
2376
- // taking care of the proper incremental order.
2377
- for i := len (newChain ) - 1 ; i >= 0 ; i -- {
2378
- // insert the block in the canonical way, re-writing history
2379
- bc .writeHeadBlock (newChain [i ], true )
2378
+ // Acquire the tx-lookup lock before mutation. This step is essential
2379
+ // as the txlookups should be changed atomically, and all subsequent
2380
+ // reads should be blocked until the mutation is complete.
2381
+ // bc.txLookupLock.Lock()
2382
+
2383
+ // Reorg can be executed, start reducing the chain's old blocks and appending
2384
+ // the new blocks
2385
+ var (
2386
+ deletedTxs []common.Hash
2387
+ rebirthTxs []common.Hash
2388
+
2389
+ deletedLogs []* types.Log
2390
+ rebirthLogs []* types.Log
2391
+ )
2392
+
2393
+ // Deleted log emission on the API uses forward order, which is borked, but
2394
+ // we'll leave it in for legacy reasons.
2395
+ //
2396
+ // TODO(karalabe): This should be nuked out, no idea how, deprecate some APIs?
2397
+ {
2398
+ for i := len (oldChain ) - 1 ; i >= 0 ; i -- {
2399
+ block := bc .GetBlock (oldChain [i ].Hash (), oldChain [i ].Number .Uint64 ())
2400
+ if block == nil {
2401
+ return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
2402
+ }
2403
+ if logs := bc .collectLogs (block , true ); len (logs ) > 0 {
2404
+ deletedLogs = append (deletedLogs , logs ... )
2405
+ }
2406
+ if len (deletedLogs ) > 512 {
2407
+ go bc .rmLogsFeed .Send (RemovedLogsEvent {deletedLogs })
2408
+ deletedLogs = nil
2409
+ }
2410
+ // TODO(daniel): remove chainSideFeed, reference PR #30601
2411
+ // Also send event for blocks removed from the canon chain.
2412
+ // bc.chainSideFeed.Send(ChainSideEvent{Block: block})
2413
+ }
2414
+ if len (deletedLogs ) > 0 {
2415
+ go bc .rmLogsFeed .Send (RemovedLogsEvent {deletedLogs })
2416
+ }
2417
+ }
2380
2418
2381
- // Collect the new added transactions.
2382
- addedTxs = append (addedTxs , newChain [i ].Transactions ()... )
2419
+ // Undo old blocks in reverse order
2420
+ for i := 0 ; i < len (oldChain ); i ++ {
2421
+ // Collect all the deleted transactions
2422
+ block := bc .GetBlock (oldChain [i ].Hash (), oldChain [i ].Number .Uint64 ())
2423
+ if block == nil {
2424
+ return errInvalidOldChain // Corrupt database, mostly here to avoid weird panics
2425
+ }
2426
+ for _ , tx := range block .Transactions () {
2427
+ deletedTxs = append (deletedTxs , tx .Hash ())
2428
+ }
2429
+ // Collect deleted logs and emit them for new integrations
2430
+ // if logs := bc.collectLogs(block, true); len(logs) > 0 {
2431
+ // slices.Reverse(logs) // Emit revertals latest first, older then
2432
+ // }
2433
+ }
2383
2434
2435
+ // Apply new blocks in forward order
2436
+ for i := len (newChain ) - 1 ; i >= 0 ; i -- {
2437
+ // Collect all the included transactions
2438
+ block := bc .GetBlock (newChain [i ].Hash (), newChain [i ].Number .Uint64 ())
2439
+ if block == nil {
2440
+ return errInvalidNewChain // Corrupt database, mostly here to avoid weird panics
2441
+ }
2442
+ for _ , tx := range block .Transactions () {
2443
+ rebirthTxs = append (rebirthTxs , tx .Hash ())
2444
+ }
2445
+ // Collect inserted logs and emit them
2446
+ if logs := bc .collectLogs (block , false ); len (logs ) > 0 {
2447
+ rebirthLogs = append (rebirthLogs , logs ... )
2448
+ }
2449
+ if len (rebirthLogs ) > 512 {
2450
+ bc .logsFeed .Send (rebirthLogs )
2451
+ rebirthLogs = nil
2452
+ }
2453
+ // Update the head block
2454
+ bc .writeHeadBlock (block , true )
2384
2455
// prepare set of masternodes for the next epoch
2385
- if bc .chainConfig .XDPoS != nil && ((newChain [i ].NumberU64 () % bc .chainConfig .XDPoS .Epoch ) == (bc .chainConfig .XDPoS .Epoch - bc .chainConfig .XDPoS .Gap )) {
2386
- err := bc .UpdateM1 ()
2387
- if err != nil {
2388
- log .Crit ("Error when update masternodes set. Stopping node" , "err" , err , "blockNumber" , newChain [i ].NumberU64 ())
2456
+ if bc .chainConfig .XDPoS != nil && ((block .NumberU64 () % bc .chainConfig .XDPoS .Epoch ) == (bc .chainConfig .XDPoS .Epoch - bc .chainConfig .XDPoS .Gap )) {
2457
+ if err := bc .UpdateM1 (); err != nil {
2458
+ log .Crit ("Fail to update masternodes during reorg" , "number" , block .Number , "hash" , block .Hash ().Hex (), "err" , err )
2389
2459
}
2390
2460
}
2391
2461
}
2462
+ if len (rebirthLogs ) > 0 {
2463
+ bc .logsFeed .Send (rebirthLogs )
2464
+ }
2392
2465
2393
2466
// Delete useless indexes right now which includes the non-canonical
2394
2467
// transaction indexes, canonical chain indexes which above the head.
2395
- indexesBatch := bc .db .NewBatch ()
2396
- for _ , tx := range types .TxDifference (deletedTxs , addedTxs ) {
2397
- rawdb .DeleteTxLookupEntry (indexesBatch , tx .Hash ())
2468
+ batch := bc .db .NewBatch ()
2469
+ for _ , tx := range types .HashDifference (deletedTxs , rebirthTxs ) {
2470
+ rawdb .DeleteTxLookupEntry (batch , tx )
2471
+ }
2472
+ // Delete all hash markers that are not part of the new canonical chain.
2473
+ // Because the reorg function handles new chain head, all hash
2474
+ // markers greater than new chain head should be deleted.
2475
+ number := commonBlock .Number
2476
+ if len (newChain ) > 0 {
2477
+ number = newChain [0 ].Number
2398
2478
}
2399
- // Delete any canonical number assignments above the new head
2400
- number := bc .CurrentBlock ().NumberU64 ()
2401
- for i := number + 1 ; ; i ++ {
2479
+ for i := number .Uint64 () + 1 ; ; i ++ {
2402
2480
hash := rawdb .ReadCanonicalHash (bc .db , i )
2403
2481
if hash == (common.Hash {}) {
2404
2482
break
2405
2483
}
2406
- rawdb .DeleteCanonicalHash (indexesBatch , i )
2484
+ rawdb .DeleteCanonicalHash (batch , i )
2407
2485
}
2408
- if err := indexesBatch .Write (); err != nil {
2486
+ if err := batch .Write (); err != nil {
2409
2487
log .Crit ("Failed to delete useless indexes" , "err" , err )
2410
2488
}
2411
- // If any logs need to be fired, do it now. In theory we could avoid creating
2412
- // this goroutine if there are no events to fire, but realistcally that only
2413
- // ever happens if we're reorging empty blocks, which will only happen on idle
2414
- // networks where performance is not an issue either way.
2415
- if len (deletedLogs ) > 0 {
2416
- go bc .rmLogsFeed .Send (RemovedLogsEvent {deletedLogs })
2417
- }
2418
- if len (oldChain ) > 0 {
2419
- go func () {
2420
- for i := len (oldChain ) - 1 ; i >= 0 ; i -- {
2421
- bc .chainSideFeed .Send (ChainSideEvent {Block : oldChain [i ]})
2422
- }
2423
- }()
2424
- }
2489
+
2490
+ // Reset the tx lookup cache to clear stale txlookup cache.
2491
+ // bc.txLookupCache.Purge()
2492
+
2493
+ // Release the tx-lookup lock after mutation.
2494
+ // bc.txLookupLock.Unlock()
2495
+
2425
2496
return nil
2426
2497
}
2427
2498
0 commit comments