diff --git a/src/bucket/BucketManager.cpp b/src/bucket/BucketManager.cpp index 358745e50c..38f8c3d919 100644 --- a/src/bucket/BucketManager.cpp +++ b/src/bucket/BucketManager.cpp @@ -1349,7 +1349,8 @@ BucketManager::loadCompleteLedgerState(HistoryArchiveState const& has) std::vector> hashes; for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) { - HistoryStateBucket const& hsb = has.currentBuckets.at(i - 1); + HistoryStateBucket const& hsb = + has.currentBuckets.at(i - 1); hashes.emplace_back(hexToBin256(hsb.snap), fmt::format(FMT_STRING("snap {:d}"), i - 1)); hashes.emplace_back(hexToBin256(hsb.curr), @@ -1526,7 +1527,7 @@ BucketManager::visitLedgerEntries( std::vector> hashes; for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - HistoryStateBucket const& hsb = has.currentBuckets.at(i); + HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr), fmt::format(FMT_STRING("curr {:d}"), i)); hashes.emplace_back(hexToBin256(hsb.snap), diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index a5b345bf82..4afed7db0e 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -8,6 +8,7 @@ #include "util/asio.h" #include "history/HistoryArchive.h" #include "bucket/BucketManager.h" +#include "bucket/HotArchiveBucketList.h" #include "bucket/LiveBucket.h" #include "bucket/LiveBucketList.h" #include "crypto/Hex.h" @@ -35,8 +36,6 @@ namespace stellar { -unsigned const HistoryArchiveState::HISTORY_ARCHIVE_STATE_VERSION = 1; - template std::string formatString(std::string const& templateString, Tokens const&... tokens) @@ -65,15 +64,33 @@ HistoryArchiveState::futuresAllResolved() const return false; } } + + for (auto const& level : hotArchiveBuckets) + { + if (level.next.isMerging()) + { + return false; + } + } return true; } bool HistoryArchiveState::futuresAllClear() const { - return std::all_of( - currentBuckets.begin(), currentBuckets.end(), - [](HistoryStateBucket const& bl) { return bl.next.isClear(); }); + if (!std::all_of(currentBuckets.begin(), currentBuckets.end(), + [](auto const& bl) { return bl.next.isClear(); })) + { + return false; + } + + if (hasHotArchiveBuckets()) + { + return std::all_of(hotArchiveBuckets.begin(), hotArchiveBuckets.end(), + [](auto const& bl) { return bl.next.isClear(); }); + } + + return true; } void @@ -87,19 +104,32 @@ HistoryArchiveState::resolveAllFutures() level.next.resolve(); } } + + for (auto& level : hotArchiveBuckets) + { + if (level.next.isMerging()) + { + level.next.resolve(); + } + } } void HistoryArchiveState::resolveAnyReadyFutures() { ZoneScoped; - for (auto& level : currentBuckets) - { - if (level.next.isMerging() && level.next.mergeComplete()) + auto resolveMerged = [](auto& buckets) { + for (auto& level : buckets) { - level.next.resolve(); + if (level.next.isMerging() && level.next.mergeComplete()) + { + level.next.resolve(); + } } - } + }; + + resolveMerged(currentBuckets); + resolveMerged(hotArchiveBuckets); } void @@ -141,7 +171,8 @@ HistoryArchiveState::load(std::string const& inFile) in.exceptions(std::ios::badbit); cereal::JSONInputArchive ar(in); serialize(ar); - if (version != HISTORY_ARCHIVE_STATE_VERSION) + if (version != HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22 && + version != HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) { CLOG_ERROR(History, "Unexpected history archive state version: {}", version); @@ -211,13 +242,19 @@ HistoryArchiveState::getBucketListHash() const // any difference in these algorithms anyways, so.. SHA256 totalHash; - for (auto const& level : currentBuckets) - { - SHA256 levelHash; - levelHash.add(hexToBin(level.curr)); - levelHash.add(hexToBin(level.snap)); - totalHash.add(levelHash.finish()); - } + auto hashBuckets = [&totalHash](auto const& buckets) { + for (auto const& level : buckets) + { + SHA256 levelHash; + levelHash.add(hexToBin(level.curr)); + levelHash.add(hexToBin(level.snap)); + totalHash.add(levelHash.finish()); + } + }; + + hashBuckets(currentBuckets); + hashBuckets(hotArchiveBuckets); + return totalHash.finish(); } @@ -229,39 +266,47 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const std::set inhibit; uint256 zero; inhibit.insert(binToHex(zero)); - for (auto b : other.currentBuckets) - { - inhibit.insert(b.curr); - if (b.next.isLive()) - { - b.next.resolve(); - } - if (b.next.hasOutputHash()) - { - inhibit.insert(b.next.getOutputHash()); - } - inhibit.insert(b.snap); - } std::vector ret; - for (size_t i = LiveBucketList::kNumLevels; i != 0; --i) - { - auto s = currentBuckets[i - 1].snap; - auto n = s; - if (currentBuckets[i - 1].next.hasOutputHash()) + auto processBuckets = [&inhibit, &ret](auto const& buckets, + auto const& otherBuckets) { + for (auto b : otherBuckets) { - n = currentBuckets[i - 1].next.getOutputHash(); + inhibit.insert(b.curr); + if (b.next.isLive()) + { + b.next.resolve(); + } + if (b.next.hasOutputHash()) + { + inhibit.insert(b.next.getOutputHash()); + } + inhibit.insert(b.snap); } - auto c = currentBuckets[i - 1].curr; - auto bs = {s, n, c}; - for (auto const& j : bs) + + for (size_t i = buckets.size(); i != 0; --i) { - if (inhibit.find(j) == inhibit.end()) + auto s = buckets[i - 1].snap; + auto n = s; + if (buckets[i - 1].next.hasOutputHash()) { - ret.push_back(j); - inhibit.insert(j); + n = buckets[i - 1].next.getOutputHash(); + } + auto c = buckets[i - 1].curr; + auto bs = {s, n, c}; + for (auto const& j : bs) + { + if (inhibit.find(j) == inhibit.end()) + { + ret.push_back(j); + inhibit.insert(j); + } } } - } + }; + + processBuckets(currentBuckets, other.currentBuckets); + processBuckets(hotArchiveBuckets, other.hotArchiveBuckets); + return ret; } @@ -270,13 +315,18 @@ HistoryArchiveState::allBuckets() const { ZoneScoped; std::set buckets; - for (auto const& level : currentBuckets) - { - buckets.insert(level.curr); - buckets.insert(level.snap); - auto nh = level.next.getHashes(); - buckets.insert(nh.begin(), nh.end()); - } + auto processBuckets = [&buckets](auto const& bucketList) { + for (auto const& level : bucketList) + { + buckets.insert(level.curr); + buckets.insert(level.snap); + auto nh = level.next.getHashes(); + buckets.insert(nh.begin(), nh.end()); + } + }; + + processBuckets(currentBuckets); + processBuckets(hotArchiveBuckets); return std::vector(buckets.begin(), buckets.end()); } @@ -302,11 +352,9 @@ HistoryArchiveState::containsValidBuckets(Application& app) const }; // Process bucket, return version - auto processBucket = [&](std::string const& bucketHash) { - auto bucket = app.getBucketManager().getBucketByHash( - hexToBin256(bucketHash)); - releaseAssert(bucket); + auto processBucket = [&](auto const& bucket) { int32_t version = 0; + releaseAssert(bucket); if (!bucket->isEmpty()) { version = bucket->getBucketVersion(); @@ -318,58 +366,89 @@ HistoryArchiveState::containsValidBuckets(Application& app) const return version; }; - // Iterate bottom-up, from oldest to newest buckets - for (uint32_t j = LiveBucketList::kNumLevels; j != 0; --j) - { - auto i = j - 1; - auto const& level = currentBuckets[i]; - - // Note: snap is always older than curr, and therefore must be processed - // first - if (!validateBucketVersion(processBucket(level.snap)) || - !validateBucketVersion(processBucket(level.curr))) + auto validateBucketList = [&](auto const& buckets, + uint32_t expectedLevels) { + if (buckets.size() != expectedLevels) { + CLOG_ERROR(History, "Invalid HAS: bucket list size mismatch"); return false; } - // Level 0 future buckets are always clear - if (i == 0) + using BucketT = + typename std::decay_t::value_type::bucket_type; + for (uint32_t j = expectedLevels; j != 0; --j) { - if (!level.next.isClear()) + + auto i = j - 1; + auto const& level = buckets[i]; + + // Note: snap is always older than curr, and therefore must be + // processed first + auto curr = app.getBucketManager().getBucketByHash( + hexToBin256(level.curr)); + auto snap = app.getBucketManager().getBucketByHash( + hexToBin256(level.snap)); + if (!validateBucketVersion(processBucket(snap)) || + !validateBucketVersion(processBucket(curr))) { - CLOG_ERROR(History, - "Invalid HAS: next must be clear at level 0"); return false; } - break; - } - // Validate "next" field - // Use previous level snap to determine "next" validity - auto const& prev = currentBuckets[i - 1]; - uint32_t prevSnapVersion = processBucket(prev.snap); + // Level 0 future buckets are always clear + if (i == 0) + { + if (!level.next.isClear()) + { + CLOG_ERROR(History, + "Invalid HAS: next must be clear at level 0"); + return false; + } + break; + } - if (!nonEmptySeen) - { - // No real buckets seen yet, move on - continue; - } - else if (protocolVersionStartsFrom( - prevSnapVersion, - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) - { - if (!level.next.isClear()) + // Validate "next" field + // Use previous level snap to determine "next" validity + auto const& prev = buckets[i - 1]; + auto prevSnap = app.getBucketManager().getBucketByHash( + hexToBin256(prev.snap)); + uint32_t prevSnapVersion = processBucket(prevSnap); + + if (!nonEmptySeen) { - CLOG_ERROR(History, "Invalid HAS: future must be cleared "); + // No real buckets seen yet, move on + continue; + } + else if (protocolVersionStartsFrom( + prevSnapVersion, + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + { + if (!level.next.isClear()) + { + CLOG_ERROR(History, "Invalid HAS: future must be cleared "); + return false; + } + } + else if (!level.next.hasOutputHash()) + { + CLOG_ERROR(History, + "Invalid HAS: future must have resolved output"); return false; } } - else if (!level.next.hasOutputHash()) - { - CLOG_ERROR(History, - "Invalid HAS: future must have resolved output"); - return false; - } + + return true; + }; + + if (!validateBucketList(currentBuckets, LiveBucketList::kNumLevels)) + { + return false; + } + + if (hasHotArchiveBuckets() && + !validateBucketList(hotArchiveBuckets, + HotArchiveBucketList::kNumLevels)) + { + return false; } return true; @@ -379,39 +458,50 @@ void HistoryArchiveState::prepareForPublish(Application& app) { ZoneScoped; - // Level 0 future buckets are always clear - releaseAssert(currentBuckets[0].next.isClear()); + auto prepareBucketList = [&](auto& buckets, size_t numLevels) { + using BucketT = + typename std::decay_t::value_type::bucket_type; - for (uint32_t i = 1; i < LiveBucketList::kNumLevels; i++) - { - auto& level = currentBuckets[i]; - auto& prev = currentBuckets[i - 1]; - - auto snap = app.getBucketManager().getBucketByHash( - hexToBin256(prev.snap)); - if (!level.next.isClear() && - protocolVersionStartsFrom( - snap->getBucketVersion(), - LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) - { - level.next.clear(); - } - else if (level.next.hasHashes() && !level.next.isLive()) + // Level 0 future buckets are always clear + releaseAssert(buckets[0].next.isClear()); + for (uint32_t i = 1; i < numLevels; i++) { - // Note: this `maxProtocolVersion` is over-approximate. The actual - // max for the ledger being published might be lower, but if the - // "true" (lower) max-value were actually in conflict with the state - // we're about to publish it should have caused an error earlier - // anyways, back when the bucket list and HAS for this state was - // initially formed. Since we're just reconstituting a HAS here, we - // assume it was legit when formed. Given that getting the true - // value here therefore doesn't seem to add much checking, and given - // that it'd be somewhat convoluted _to_ materialize the true value - // here, we're going to live with the approximate value for now. - uint32_t maxProtocolVersion = - app.getConfig().LEDGER_PROTOCOL_VERSION; - level.next.makeLive(app, maxProtocolVersion, i); + auto& level = buckets[i]; + auto& prev = buckets[i - 1]; + + auto snap = app.getBucketManager().getBucketByHash( + hexToBin256(prev.snap)); + if (!level.next.isClear() && + protocolVersionStartsFrom( + snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + { + level.next.clear(); + } + else if (level.next.hasHashes() && !level.next.isLive()) + { + // Note: this `maxProtocolVersion` is over-approximate. The + // actual max for the ledger being published might be lower, but + // if the "true" (lower) max-value were actually in conflict + // with the state we're about to publish it should have caused + // an error earlier anyways, back when the bucket list and HAS + // for this state was initially formed. Since we're just + // reconstituting a HAS here, we assume it was legit when + // formed. Given that getting the true value here therefore + // doesn't seem to add much checking, and given that it'd be + // somewhat convoluted _to_ materialize the true value here, + // we're going to live with the approximate value for now. + uint32_t maxProtocolVersion = + app.getConfig().LEDGER_PROTOCOL_VERSION; + level.next.makeLive(app, maxProtocolVersion, i); + } } + }; + + prepareBucketList(currentBuckets, LiveBucketList::kNumLevels); + if (hasHotArchiveBuckets()) + { + prepareBucketList(hotArchiveBuckets, HotArchiveBucketList::kNumLevels); } } @@ -419,7 +509,7 @@ HistoryArchiveState::HistoryArchiveState() : server(STELLAR_CORE_VERSION) { uint256 u; std::string s = binToHex(u); - HistoryStateBucket b; + HistoryStateBucket b; b.curr = s; b.snap = s; while (currentBuckets.size() < LiveBucketList::kNumLevels) @@ -437,7 +527,7 @@ HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, { for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - HistoryStateBucket b; + HistoryStateBucket b; auto& level = buckets.getLevel(i); b.curr = binToHex(level.getCurr()->getHash()); b.next = level.getNext(); @@ -446,6 +536,23 @@ HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, } } +HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, + LiveBucketList const& liveBuckets, + HotArchiveBucketList const& hotBuckets, + std::string const& passphrase) + : HistoryArchiveState(ledgerSeq, liveBuckets, passphrase) +{ + version = HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22; + for (uint32_t i = 0; i < HotArchiveBucketList::kNumLevels; ++i) + { + HistoryStateBucket b; + b.curr = binToHex(hotBuckets.getLevel(i).getCurr()->getHash()); + b.next = hotBuckets.getLevel(i).getNext(); + b.snap = binToHex(hotBuckets.getLevel(i).getSnap()->getHash()); + hotArchiveBuckets.push_back(b); + } +} + HistoryArchive::HistoryArchive(HistoryArchiveConfiguration const& config) : mConfig(config) { diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index d6ca646b42..10d68de337 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -4,8 +4,12 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/BucketUtils.h" #include "bucket/FutureBucket.h" +#include "bucket/HotArchiveBucket.h" +#include "bucket/HotArchiveBucketList.h" #include "main/Config.h" +#include "util/GlobalChecks.h" #include "xdr/Stellar-types.h" #include @@ -29,13 +33,16 @@ namespace stellar class Application; class LiveBucketList; class Bucket; +class LiveBucketList; +class HotArchiveBucketList; -struct HistoryStateBucket +template struct HistoryStateBucket { + BUCKET_TYPE_ASSERT(BucketT); + using bucket_type = BucketT; std::string curr; - // TODO: Add archival buckets to history - FutureBucket next; + FutureBucket next; std::string snap; template @@ -62,17 +69,27 @@ struct HistoryStateBucket */ struct HistoryArchiveState { - static unsigned const HISTORY_ARCHIVE_STATE_VERSION; + static inline unsigned const HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22 = + 1; + static inline unsigned const + HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22 = 2; - unsigned version{HISTORY_ARCHIVE_STATE_VERSION}; + unsigned version{HISTORY_ARCHIVE_STATE_VERSION_PRE_PROTOCOL_22}; std::string server; std::string networkPassphrase; uint32_t currentLedger{0}; - std::vector currentBuckets; + std::vector> currentBuckets; + std::vector> hotArchiveBuckets; HistoryArchiveState(); - HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& buckets, +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& liveBuckets, + HotArchiveBucketList const& hotBuckets, + std::string const& networkPassphrase); +#endif + + HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& liveBuckets, std::string const& networkPassphrase); static std::string baseName(); @@ -105,12 +122,22 @@ struct HistoryArchiveState { ar(CEREAL_NVP(networkPassphrase)); } - catch (cereal::Exception&) + catch (cereal::Exception& e) { // networkPassphrase wasn't parsed. - // This is expected when the input file does not contain it. + // This is expected when the input file does not contain it, but + // should only ever happen for older versions of History Archive + // State. + if (version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) + { + throw e; + } } ar(CEREAL_NVP(currentBuckets)); + if (version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) + { + ar(CEREAL_NVP(hotArchiveBuckets)); + } } template @@ -122,7 +149,18 @@ struct HistoryArchiveState { ar(CEREAL_NVP(networkPassphrase)); } + else + { + // New versions of HistoryArchiveState should always have a + // networkPassphrase. + releaseAssertOrThrow( + version < HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22); + } ar(CEREAL_NVP(currentBuckets)); + if (version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22) + { + ar(CEREAL_NVP(hotArchiveBuckets)); + } } // Return true if all futures are in FB_CLEAR state @@ -149,6 +187,12 @@ struct HistoryArchiveState void prepareForPublish(Application& app); bool containsValidBuckets(Application& app) const; + + bool + hasHotArchiveBuckets() const + { + return version >= HISTORY_ARCHIVE_STATE_VERSION_POST_PROTOCOL_22; + } }; class HistoryArchive : public std::enable_shared_from_this diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index a8d9e23c5c..2eefc86772 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -406,7 +406,23 @@ HistoryManagerImpl::queueCurrentHistory(uint32_t ledger) bl = mApp.getBucketManager().getLiveBucketList(); } - HistoryArchiveState has(ledger, bl, mApp.getConfig().NETWORK_PASSPHRASE); + HistoryArchiveState has; + auto ledgerVers = mApp.getLedgerManager() + .getLastClosedLedgerHeader() + .header.ledgerVersion; + if (protocolVersionIsBefore( + ledgerVers, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + auto hotBl = mApp.getBucketManager().getHotArchiveBucketList(); + has = HistoryArchiveState(ledger, bl, hotBl, + mApp.getConfig().NETWORK_PASSPHRASE); + } + else + { + has = HistoryArchiveState(ledger, bl, + mApp.getConfig().NETWORK_PASSPHRASE); + } CLOG_DEBUG(History, "Queueing publish state for ledger {}", ledger); mEnqueueTimes.emplace(ledger, std::chrono::steady_clock::now()); diff --git a/src/history/test/SerializeTests.cpp b/src/history/test/SerializeTests.cpp index cb03465b10..d9df52caf3 100644 --- a/src/history/test/SerializeTests.cpp +++ b/src/history/test/SerializeTests.cpp @@ -15,7 +15,8 @@ TEST_CASE("Serialization round trip", "[history]") std::vector testFiles = { "stellar-history.testnet.6714239.json", "stellar-history.livenet.15686975.json", - "stellar-history.testnet.6714239.networkPassphrase.json"}; + "stellar-history.testnet.6714239.networkPassphrase.json", + "stellar-history.testnet.6714239.networkPassphrase.v2.json"}; for (size_t i = 0; i < testFiles.size(); i++) { std::string fnPath = "testdata/"; diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index affd40e066..b1aeda1160 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -1740,8 +1740,20 @@ LedgerManagerImpl::storeCurrentLedger(LedgerHeader const& header, // Store the current HAS in the database; this is really just to // checkpoint the bucketlist so we can survive a restart and re-attach // to the buckets. - HistoryArchiveState has(header.ledgerSeq, bl, - mApp.getConfig().NETWORK_PASSPHRASE); + HistoryArchiveState has; + if (protocolVersionStartsFrom( + header.ledgerVersion, + BucketBase::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + auto hotBl = mApp.getBucketManager().getHotArchiveBucketList(); + has = HistoryArchiveState(header.ledgerSeq, bl, hotBl, + mApp.getConfig().NETWORK_PASSPHRASE); + } + else + { + has = HistoryArchiveState(header.ledgerSeq, bl, + mApp.getConfig().NETWORK_PASSPHRASE); + } mApp.getPersistentState().setState(PersistentState::kHistoryArchiveState, has.toString(), sess); diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 9d3b4565c0..2e959d636e 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -489,7 +489,7 @@ dumpStateArchivalStatistics(Config cfg) std::vector hashes; for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - HistoryStateBucket const& hsb = has.currentBuckets.at(i); + HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr)); hashes.emplace_back(hexToBin256(hsb.snap)); }