Skip to content

Commit

Permalink
Merge pull request zcash#6927 from daira/inv-buffer
Browse files Browse the repository at this point in the history
Backport fix for INV buffer blowup
  • Loading branch information
nuttycom authored Aug 19, 2024
2 parents 60b971f + 52cd44c commit 623bf72
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 27 deletions.
20 changes: 18 additions & 2 deletions qa/rpc-tests/test_framework/comptool.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
CInv,
msg_block,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_ping,
Expand Down Expand Up @@ -143,6 +144,11 @@ def send_getheaders(self):
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)

def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)

# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
Expand Down Expand Up @@ -374,15 +380,25 @@ def run(self):
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]

else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
Expand Down
37 changes: 12 additions & 25 deletions src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7145,7 +7145,7 @@ bool static ProcessMessage(const CChainParams& chainparams, CNode* pfrom, string

LOCK(cs_main);

std::vector<CInv> vToFetch;
const uint256* best_block{nullptr};

for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
Expand All @@ -7159,29 +7159,14 @@ bool static ProcessMessage(const CChainParams& chainparams, CNode* pfrom, string
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
// First request the headers preceding the announced block. In the normal fully-synced
// case where a new block is announced that succeeds the current tip (no reorganization),
// there are no such headers.
// Secondly, and only when we are close to being synced, we request the announced block directly,
// to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
// time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor.
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId());

if (chainActive.Tip()->GetBlockTime() > GetTime() - chainparams.GetConsensus().PoWTargetSpacing(pindexBestHeader->nHeight) * 20 &&
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vToFetch.push_back(inv);
// Mark block as in flight already, even though the actual "getdata" message only goes out
// later (within the same cs_main lock, though).
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
}
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
// Headers-first is the primary method of announcement on
// the network. If a node fell back to sending blocks by inv,
// it's probably for a re-org. The final block hash
// provided should be the highest, so send a getheaders and
// then fetch the blocks we need to catch up.
best_block = &inv.hash;
}
}
else
{
} else {
pfrom->AddKnownWTxId(WTxId(inv.hash, inv.hashAux));
if (fBlocksOnly)
LogPrint("net", "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id);
Expand All @@ -7195,8 +7180,10 @@ bool static ProcessMessage(const CChainParams& chainparams, CNode* pfrom, string
}
}

if (!vToFetch.empty())
pfrom->PushMessage("getdata", vToFetch);
if (best_block != nullptr) {
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), *best_block);
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom->id);
}
}


Expand Down

0 comments on commit 623bf72

Please sign in to comment.