From 626b39b7e812cfa15a179ff82d6b4028580e5df2 Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:34:42 -0400 Subject: [PATCH] Squashed commit of the following: commit 3dfd8fe475ff4f0eca0ff2f211c553e16c78129d Author: Stephen Buttolph Date: Fri Jun 28 12:07:40 2024 -0400 Remove status usage from consensus (#3140) commit c587d91960e6e6aebd2ee595d1b7aa0446e2ee25 Author: Stephen Buttolph Date: Thu Jun 27 17:27:45 2024 -0400 Remove parent lookup from issue (#3132) commit 267c02023a59cc9dfa24f232f76b859102c3b364 Author: Stephen Buttolph Date: Thu Jun 27 16:57:22 2024 -0400 Replace `wasIssued` with `shouldIssueBlock` (#3131) commit 24e9952d37f63114d0988ffd384485f7b4cf2c86 Author: Stephen Buttolph Date: Thu Jun 27 14:16:30 2024 -0400 Simplify dependency registration (#3139) commit 318da000780e3c165eb0c01b05da315c19309971 Author: marun Date: Tue Jun 25 17:35:11 2024 +0200 [tmpnet] Enable bootstrap of subnets with disjoint validator sets (#3138) Co-authored-by: Alberto Benegiamo commit 1a9bc457d8c7a2c9b1336fdac67e9103898a696a Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon Jun 24 05:21:18 2024 -0700 [vms/platformvm] Cleanup execution config tests (#3137) commit 6eef08fe19e657e930d3824bdce87e315f4a4f1a Author: Alberto Benegiamo Date: Fri Jun 21 20:27:30 2024 +0200 Repackaged NextBlockTime and GetNextStakerChangeTime (#3134) commit cafd71c8c3f433faf92c207164e4a1e8548ce1e9 Author: marun Date: Fri Jun 21 20:26:01 2024 +0200 Emit version in JSON format for --json-version (#3129) commit d0c209421a98f69d37fbef1659705f20bdbcf02c Author: Darioush Jalali Date: Thu Jun 20 18:22:06 2024 -0700 bump protobuf (fixes some build issues) (#3142) commit a0741de1c1d1fd89c959f4f8ed84fb34594e0cdf Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Jun 20 09:48:16 2024 -0700 [vms/platformvm] Minor grammer fixes in `state` struct code comments (#3136) commit f8fa3cfd59ed7f52f1dc8baddd366457db453eda Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Jun 20 09:27:43 2024 -0700 [chains/atomic] Remove a nested if statement (#3135) Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> commit 28b4790cbcc00584ec4081cd09208825ef46d3ae Author: Stephen Buttolph Date: Wed Jun 19 16:27:57 2024 -0400 Remove block lookup from `deliver` (#3130) commit e740b44c209f43cf9785848298cd9d3bcc83b973 Author: Stephen Buttolph Date: Wed Jun 19 14:49:01 2024 -0400 Refactor `event.Blocker` into `job.Scheduler` (#3125) commit 2e72c7c29c498e7ffa5bb31ff18495e14e2cdfb7 Author: Stephen Buttolph Date: Tue Jun 18 11:42:55 2024 -0400 Remove .Status() from .Accepted() (#3124) commit 5d5b9cfc472dac10eb46e54af6ca18566d0b3ca8 Author: Stephen Buttolph Date: Tue Jun 18 11:11:28 2024 -0400 Remove `Decided` from the `Consensus` interface (#3123) commit 576b3927c54b0e7ad91602572f4aa8f9fe24d768 Author: Stephen Buttolph Date: Mon Jun 17 13:46:05 2024 -0400 Use health labels (#3122) commit 7455c9971e3dd06d3248daf716ce678cfb2abd16 Author: marun Date: Fri Jun 14 22:33:07 2024 +0200 [antithesis] Remove assertions incompatible with fault injection (#3104) Co-authored-by: Stephen Buttolph commit e99d1ba7da3f8f9cf0f9ddd04d08f2ad801f17a9 Author: Stephen Buttolph Date: Fri Jun 14 16:02:54 2024 -0400 Standardize wallet tx acceptance polling (#3110) commit f99a64a4012ed07455e69e25d987d891e74505a6 Author: Stephen Buttolph Date: Fri Jun 14 12:10:23 2024 -0400 Update C-chain wallet context (#3118) commit 347a3f89b2d590c936c85b65b5e30ba92a2ec4f9 Author: aaronbuchwald Date: Thu Jun 13 15:31:16 2024 -0400 Add early termination metrics case by case (#3093) commit fa37f5ab530e04948fcd3d2b2ceed4caae2a9f53 Author: Stephen Buttolph Date: Thu Jun 13 15:09:51 2024 -0400 Remove .Status() from .IsPreferred() (#3111) commit 2b14a72e7a8bc58e95906d767c2357a4032c29ba Author: marun Date: Thu Jun 13 20:28:48 2024 +0200 [e2e] Fix excessively verbose output from virtuous test (#3116) commit c72c21e749f57c36ae611a80993d3400e2dea85d Author: marun Date: Thu Jun 13 16:58:21 2024 +0200 [antithesis] Fix image publication job by quoting default tag value (#3112) commit fa05d628b2700498ce96641bb754a9551b5f0a30 Author: felipemadero Date: Thu Jun 13 11:22:00 2024 -0300 bump ledger-avalanche dependency to current main branch (#3115) commit d3a37392cbc0c498f5c484af7db8ea8dad201c93 Author: marun Date: Wed Jun 12 23:23:52 2024 +0200 [antithesis] Add ci jobs to trigger test runs (#3076) commit 5002b8244de7fb6e325a6e322ece5c6d2a1f8149 Author: aaronbuchwald Date: Wed Jun 12 15:27:13 2024 -0400 Error driven snowflake multi counter (#3092) commit 41e46d1178116eb7421fe2adbf2eaa7ef97b0f8b Author: Stephen Buttolph Date: Wed Jun 12 13:05:20 2024 -0400 Update versions for v1.11.8 (#3103) commit cd0c6e152965f44a2794935c41881fb207c8f78c Author: Stephen Buttolph Date: Mon Jun 10 12:46:35 2024 -0400 Use netip.AddrPort rather than ips.IPPort (#3094) commit 504766e54aa0128da620679660ef35072cb165fa Author: Alberto Benegiamo Date: Mon Jun 10 17:02:38 2024 +0200 X-chain - consolidate tx creation in unit tests (#2736) Signed-off-by: Alberto Benegiamo Co-authored-by: Stephen Buttolph commit c28af7dfd1a165116fcd668b1b472c002afe2259 Author: marun Date: Thu Jun 6 23:15:08 2024 +0200 [ci] Ensure focal arm64 builds all have their required dependencies (#3091) commit 6caa655782a7c76ee1b106107529ac52a31a8c29 Author: marun Date: Thu Jun 6 20:28:59 2024 +0200 [ci] Switch to gh workers for arm64 (#3090) commit 59bc3cfd590a6dd65ba1fd3b245ad0bf9de0ac40 Author: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Date: Thu Jun 6 14:25:04 2024 -0400 Add proposervm slot metrics (#3048) Signed-off-by: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> commit 7dca39699bebcab5c7a6fec3c3ec4c01f075f31f Author: Stephen Buttolph Date: Thu Jun 6 13:33:13 2024 -0400 Replace all chain namespaces with labels (#3053) commit 783fdfc9d52267865300acee56b10e918a6e80b6 Author: aaronbuchwald Date: Thu Jun 6 11:59:05 2024 -0400 Implement error driven snowflake hardcoded with a single beta (#2978) commit f1a9d2ab766ab7c88a11eb491988f7db04b7e310 Author: Stephen Buttolph Date: Thu Jun 6 11:28:09 2024 -0400 Fix race in test (#3089) commit 7d3415cd8febe448d4356098e1a1f810eecef92a Author: Stephen Buttolph Date: Thu Jun 6 06:43:37 2024 -0400 Small metrics cleanup (#3088) commit 1b82dce857823b3795205ffc2bac337cef0c4f25 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Jun 5 23:59:21 2024 -0400 [vms/platformvm] Rename `txstest.Builder` to `txstest.WalletFactory` (#2890) commit 657fe0bd2f8079042dae38f476141176bf9dcd21 Author: Stephen Buttolph Date: Wed Jun 5 22:14:59 2024 -0400 Remove rejection from `consensus.Add` (#3084) commit 08ed4ac62657b76555d839109d05140b16a966b7 Author: Stephen Buttolph Date: Wed Jun 5 20:51:30 2024 -0400 Remove avalanche metrics registerer from consensus context (#3087) commit e8ecbadec8eb1c336dd8927b0225499eceea6f34 Author: Stephen Buttolph Date: Wed Jun 5 17:57:41 2024 -0400 Update versions for v1.11.7 (#3083) Co-authored-by: Darioush Jalali commit 2cf7bd6a053029a808c6f0a6fd244dd50330f166 Author: Stephen Buttolph Date: Tue Jun 4 12:39:04 2024 -0400 Implement label gatherer (#3074) commit 0893516dd74e70b24de5b2030ad196e047ca139a Author: marun Date: Mon Jun 3 18:06:53 2024 +0200 [antithesis] Skip push for builder image (#3070) commit 9b30547a41b9ad424db5a5586f655086a6a19391 Author: marun Date: Mon Jun 3 18:06:17 2024 +0200 [tmpnet] Bootstrap subnets with a single node (#3005) Signed-off-by: marun Co-authored-by: Stephen Buttolph commit a982d257137c7181ea9cdf34821a253017312c13 Author: Stephen Buttolph Date: Mon Jun 3 10:30:50 2024 -0400 Select metric by label in e2e tests (#3073) commit b14689b4e902066c60d7d4ffee407dfcdd3b31c5 Author: stellrust Date: Sun Jun 2 00:59:43 2024 +0800 chore: fix function name (#3075) commit b419c2844982c891e8145c521e653c4010b325ff Author: Stephen Buttolph Date: Fri May 31 13:18:46 2024 -0400 Remove averager metrics namespace (#3072) commit 0e9ab7803c2d0f14701cacc25466a3809fb67fc2 Author: Stephen Buttolph Date: Fri May 31 12:01:46 2024 -0400 Remove db namespace (#3068) commit cb2a1772b7f3d68a8696037b24ae4acb1c80c97f Author: Stephen Buttolph Date: Fri May 31 11:25:50 2024 -0400 Remove network namespace (#3067) commit 1d7ba7a6b0993e763f335d85ec2da7eb2646c6bf Author: Stephen Buttolph Date: Fri May 31 11:10:52 2024 -0400 Only compact after executing a large number of blocks (#3065) commit e8c43f554e4e01354e728df8ae2a28b9916a6e3a Author: Stephen Buttolph Date: Fri May 31 10:10:03 2024 -0400 Remove unused metrics namespaces (#3062) commit b45e136382c29152d47ef08380a44a4e89b792a0 Author: Stephen Buttolph Date: Fri May 31 10:01:49 2024 -0400 Remove api namespace (#3066) commit 928d484f3bba95c959efeb81faff7884ab5713ad Author: marun Date: Fri May 31 15:57:45 2024 +0200 [ci] Remove perpetually failing govulncheck job (#3069) commit 349b25a549827feb3cd0cc18d0e8fa53ec3cc579 Author: Stephen Buttolph Date: Thu May 30 16:57:35 2024 -0400 Add pebbledb to docs (#3061) commit 8d18b618be290b0e0fedbbedc76398afba68e6ec Author: marun Date: Thu May 30 21:45:52 2024 +0200 [antithesis] Enable workload instrumentation (#3059) Signed-off-by: marun Co-authored-by: Stephen Buttolph commit ae4f88464556c31685d5a5886a69da02a72c0af6 Author: marun Date: Thu May 30 19:59:13 2024 +0200 [testing] Remove superfluous gomega dep (#3063) commit 3ccc4cb176658a0738e78c842db8c3368ec49725 Author: Stephen Buttolph Date: Thu May 30 10:33:23 2024 -0400 Implement `constants.VMName` (#3058) commit e3d889cbfc8a9614767ac90fd97b06bd37691723 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed May 29 19:45:11 2024 -0400 [vms/platformvm] Replace `GetSubnets` with `GetSubnetIDs` in `State` (#3055) commit 6f7e78aa10be2e8f88dc0e10af60d46d3c7926e2 Author: Stephen Buttolph Date: Wed May 29 14:38:33 2024 -0400 Add metrics client (#3057) Signed-off-by: Stephen Buttolph Co-authored-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> commit 75b9564891113362a58ac13fbafcc8fde479fc59 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed May 29 13:20:24 2024 -0400 [vms/platformvm] Return the correct owner in `platform.GetSubnets` after transfer (#3054) commit cf7b7a2c8771b2a2082a96d2f8a68035bfc417cc Author: Stephen Buttolph Date: Mon May 27 13:42:20 2024 -0400 Remove optional gatherer (#3052) commit 15ac8cd3473f56d8e39b5b08564b63efe9b32c8a Author: Stephen Buttolph Date: Fri May 24 15:39:19 2024 -0400 Remove subnet filter from Peer.TrackedSubnets() (#2975) commit 54c4b5384d0c8287975e1ee9df3a7aa8d03a9b41 Author: Stephen Buttolph Date: Fri May 24 11:56:20 2024 -0400 Expose canonical warp formatting function (#3049) commit 944d3db88aabe132f0c2040e0214c8305c477e28 Author: Stephen Buttolph Date: Thu May 23 11:58:55 2024 -0400 Update versions for v1.11.6 (#3047) Co-authored-by: Darioush Jalali commit c08fdae7aacee7648be6cee1b694a152eab927ef Author: Stephen Buttolph Date: Thu May 23 10:51:09 2024 -0400 Verify signatures during Parse (#3046) commit dd7a18f268cfe5b0b7632d2b58fc0ad0c631742b Author: Stephen Buttolph Date: Wed May 22 17:08:06 2024 -0400 Grab iterator at previously executed height (#3045) commit 5fe91a4dc68627d2a654b26362d578ba5a7930ac Author: Stephen Buttolph Date: Wed May 22 12:49:23 2024 -0400 Fix typo fix (#3044) commit 04d883ba6d5e699555efad3326294ec99933caa0 Author: Stephen Buttolph Date: Wed May 22 11:59:33 2024 -0400 Cleanup fee config passing (#3043) commit fc2d8cbe4a1a50cadb7cab2e27a0befdd6c0b106 Author: marun Date: Wed May 22 08:28:49 2024 -0700 [antithesis] Ensure node image is pushed (#3042) commit 4159a59281c82c28b81cede5ceccc76f78ea1f86 Author: marun Date: Tue May 21 17:12:23 2024 -0700 [antithesis] Add test setup for xsvm (#2982) Signed-off-by: marun Co-authored-by: Stephen Buttolph commit 6a894d0e2e762e39970d6094f8139bb25449ce93 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue May 21 14:43:41 2024 -0400 Prevent unnecessary bandwidth from activated ACPs (#3031) commit 301f14d2057b0efe2a59fc17d650b4e14310a6ca Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue May 21 14:13:36 2024 -0400 Fix ACP links (#3037) commit 85eac09bab1208ede8b8865dd185df6b2bcc5e44 Author: Stephen Buttolph Date: Tue May 21 13:19:11 2024 -0400 Change default staking key from RSA 4096 to secp256r1 (#3025) commit 066c3a6ec71bf741ac43ebdf54c876b27d4a6102 Author: Alberto Benegiamo Date: Tue May 21 19:17:55 2024 +0200 P-chain - introducing fees calculators (#2698) Signed-off-by: Alberto Benegiamo Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 34e7b2f680abd9d66678b6db18003fce42f68711 Author: marun Date: Tue May 21 08:54:45 2024 -0700 [tmpnet] Enable single node networks (#3003) Co-authored-by: Alberto Benegiamo commit 551f8d33cbed58a0159f29e200be76c3283b0347 Author: Stephen Buttolph Date: Mon May 20 22:03:28 2024 -0400 Fix negative ETA caused by rollback in vm.SetState (#3036) commit 7106666fd2e1bbe714c7e894da35a2a2cedd15e0 Author: marun Date: Mon May 20 16:38:13 2024 -0700 [tmpnet] Ensure tmpnet compatibility with windows (#3002) Signed-off-by: marun Co-authored-by: Stephen Buttolph commit 46bc6f5e86dd7ef0ecd8bddec7c6c2b84564019c Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon May 20 18:40:21 2024 -0400 `gossipping` -> `gossiping` (#3033) commit eb7ddd75e0bf13b4671b385a9bb7a364b2e8b553 Author: cocoyeal <150209682+cocoyeal@users.noreply.github.com> Date: Mon May 20 22:42:41 2024 +0800 Fix broken docs link (#3028) commit cab15c031364c4bd7b46056dc4ed3714a742d0d3 Author: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Date: Sun May 19 11:59:20 2024 -0400 [build] Update linter version (#3024) Signed-off-by: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit ddd6d25379e207c81832eae0778d1946a8a1d66c Author: Stephen Buttolph Date: Fri May 17 19:14:49 2024 -0400 Simplify sampler interface (#3026) commit 0928176698a743af837ec040b011231872821acc Author: Tsachi Herman <24438559+tsachiherman@users.noreply.github.com> Date: Thu May 16 15:12:24 2024 -0400 [vms/avm] fix linter error in benchmark : Use of weak random number generator (#3023) commit 138ce145dcd0144597dee5d7e22593d4f79cf1dc Author: Stephen Buttolph Date: Wed May 15 14:51:20 2024 -0400 Fix pebbledb memory corruption (#3020) commit 3dc4708ee7354030336abe4b7623c4e2b4cbd421 Author: Stephen Buttolph Date: Tue May 14 10:50:46 2024 -0400 Standardize peer logs (#3017) commit 8856ab494d1df88357b6db97b4577cc4f98c521d Author: Alberto Benegiamo Date: Tue May 14 16:19:17 2024 +0200 Repackaged upgrades times into upgrade package (#3019) commit e61e949b02e45724f6c0fa37a6cff5c6998c4aef Author: Stephen Buttolph Date: Mon May 13 18:44:59 2024 -0400 Remove pre-Durango networking checks (#3018) commit 4ac613835aa2cd64d6d6527fadf292133a09cb18 Author: Andy Sloane Date: Mon May 13 10:04:03 2024 -0700 boostrapper: compact blocks before iterating them (#2997) Signed-off-by: Andy Sloane commit c4c9800b58a8e523c2fc2bdfe30b88440d4fd049 Author: marun Date: Mon May 13 09:34:00 2024 -0700 [antithesis] Ensure images with a prefix are pushed (#3016) commit 85a7f2feb529dd570454ba2bdfa1cd36bc052a44 Author: Stephen Buttolph Date: Mon May 13 11:55:59 2024 -0400 Mark nodes as connected to the P-chain networking stack (#2981) commit 948cf7e57707059d4d515b8d64eece3f42970479 Author: cartnavoy <169027576+cartnavoy@users.noreply.github.com> Date: Sun May 12 21:53:27 2024 +0800 chore: fix function name comments (#3015) Signed-off-by: cartnavoy commit fb6b2d93838233ff009f033046fd9a34243b3f2f Author: marun Date: Fri May 10 11:03:56 2024 -0700 [antithesis] Refactor existing job to support xsvm test setup (#2976) commit ace88cef3921ef361cfe22283358a1e63b3b3795 Author: cocoyeal <150209682+cocoyeal@users.noreply.github.com> Date: Sat May 11 01:09:55 2024 +0800 Chore: fix typos (#3010) commit 22dc0d21c215947197622dca6dd5fffafc2e26e4 Author: Stephen Buttolph Date: Fri May 10 10:53:02 2024 -0400 Use gauges for time metrics (#3009) commit d8af66306a3ec63ddecf16bbb522f979688d414a Author: Andrei Lebedev Date: Fri May 10 00:49:04 2024 +1000 vms/txs/mempool: unify avm and platformvm mempool implementations (#2994) commit c10812b8292210df8a8f25b468c7dfec80cfea7a Author: marun Date: Wed May 8 11:41:19 2024 -0700 Update go version to 1.21.10 (#3004) commit 759df8e8ffd2b316f349d7ff77eafaae147f03d0 Author: Andy Sloane Date: Mon May 6 15:00:33 2024 -0700 prefixdb: fix Compact with nil limit (#3000) commit 64233361752cf6039ca966137c3b121cb49ce8d4 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon May 6 14:27:33 2024 -0400 Remove unused `Metadata` struct (#3001) commit e7163eb5a376dc755d1a2979a690799cad9a0fc7 Author: marun Date: Mon May 6 10:44:58 2024 -0700 Fix antithesis image publication (#2998) commit 104a00acdc8440aa67ba5ce0af18a784f728b36a Author: Stephen Buttolph Date: Mon May 6 10:51:04 2024 -0400 Cleanup compression metrics (#2992) commit e4ffba10a812f6763dcec73665ca00a417cbe9f9 Author: Stephen Buttolph Date: Mon May 6 10:51:00 2024 -0400 Cleanup meterdb metrics (#2991) commit 67a8b10abd9cf8644e819c0b28f7b25894cec7fc Author: hattizai Date: Mon May 6 02:30:36 2024 +0800 Chore: fix typos (#2993) commit e373a574f4291d788c121354f84d02d331933a10 Author: jujube <50985954+jujube@users.noreply.github.com> Date: Mon May 6 01:05:23 2024 +0800 fix: version application interface check (#2995) commit 91d9655356bdb1df8b66c0dd03c725d44c38e3bd Author: Stephen Buttolph Date: Fri May 3 17:09:59 2024 -0400 Use vectors for accepted tx and block metrics (#2990) commit fc6f4e73a5dd246ed6ffc8691c38eb6cd5fa6a0d Author: Stephen Buttolph Date: Fri May 3 16:21:35 2024 -0400 Use vector for router latency metrics (#2989) commit 1b2f3d9044012677153f774bb7d9ef84e72bb730 Author: marun Date: Fri May 3 13:05:11 2024 -0700 Simplify go version maintenance (#2977) commit bc4d747ad90c0b48dba46769b84a7c834f0edb06 Author: Stephen Buttolph Date: Fri May 3 15:47:23 2024 -0400 Use vector in message sender (#2988) commit 7467a40a4ed404c965bac53d2eacb9f8d820f8ed Author: Stephen Buttolph Date: Fri May 3 15:47:12 2024 -0400 Use vectors for message handler metrics (#2987) commit 067de22e836288415467b56e3466246e149d01aa Author: Stephen Buttolph Date: Fri May 3 14:24:08 2024 -0400 Use vectors for p2p message metrics (#2983) commit b158abd9fab5fc1ed0be180aa022f8442a177901 Author: Stephen Buttolph Date: Fri May 3 14:23:41 2024 -0400 Simplify gossip metrics (#2984) commit 6b132796cb60e944a0697c58d50b7ac01356982c Author: Stephen Buttolph Date: Fri May 3 12:49:43 2024 -0400 Use vectors in message queue metrics (#2985) commit 2f0216b332cd7d2aac56922c2f3f58edbc1cc1ec Author: Stephen Buttolph Date: Thu May 2 16:08:15 2024 -0400 Reduce p2p sdk metrics (#2980) commit fd447ed748db91d614e9ec3b7713b3711ce23a18 Author: Stephen Buttolph Date: Thu May 2 16:08:03 2024 -0400 Update metercacher to use vectors (#2979) commit 9a89b5e802bd740fce00bfaa1de88875ba301440 Author: aaronbuchwald Date: Wed May 1 13:40:56 2024 -0400 Consolidate record poll (#2970) commit f8d7b29a6771899a5317d6eae6e296a0153963d2 Author: Stephen Buttolph Date: Tue Apr 30 13:02:39 2024 -0400 Update versions for v1.11.5 (#2971) Co-authored-by: Darioush Jalali commit bda0e6d1ef41310d19fe2826fb01f6de6789e36a Author: Stephen Buttolph Date: Mon Apr 29 11:51:02 2024 -0400 Cleanup test block creation (#2973) commit eb7230350f3c5150dede0e098a2937a8bb218f2f Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Apr 26 12:21:35 2024 -0400 Bump bufbuild/buf-setup-action from 1.30.0 to 1.31.0 (#2923) Co-authored-by: Stephen Buttolph commit 1f5fba9b48f1b51ba49996effffc364b770637af Author: Stephen Buttolph Date: Wed Apr 24 15:24:55 2024 -0400 Abandon decided blocks (#2968) commit 4aec3dc350b1f0e8a37611c0a0b6a1dfc414be80 Author: aaronbuchwald Date: Wed Apr 24 14:50:41 2024 -0400 Consolidate beta (#2949) commit 1b90a27470eef8efc6d1e10dc3fb0a926ee92774 Author: Stephen Buttolph Date: Tue Apr 23 21:32:29 2024 -0400 Exit topological sort earlier (#2965) commit 162c916373a137d5f653222b8ecd1e9c7097a8c6 Author: Stephen Buttolph Date: Tue Apr 23 17:30:26 2024 -0400 Improve and test getProcessingAncestor (#2956) commit 2535387d3f579af22e1f52f93a8b7f4fc7060844 Author: Stephen Buttolph Date: Tue Apr 23 16:50:50 2024 -0400 Cleanup consensus engine tests (#2953) commit 043fd2d1a2602edba330d0509569b0adaaf8c214 Author: marun Date: Mon Apr 22 13:40:37 2024 -0700 [ci] Fix conditional guarding monitoring configuration (#2959) commit 337dfa5e00be2e63d7b15ce8f9db9e4076659bfc Author: Anna Smith <155628741+socialsister@users.noreply.github.com> Date: Tue Apr 23 03:49:36 2024 +0800 chore: fix function names in comments (#2957) Signed-off-by: socialsister commit 52dd10f64f760ada007ba5eb9e2788f70c239ad4 Author: Stephen Buttolph Date: Thu Apr 18 15:34:19 2024 -0400 Add manager validator set callbacks (#2950) commit eca19b7e5833ba5100bbd537d2e7bf2ccd74983b Author: Stephen Buttolph Date: Thu Apr 18 15:20:57 2024 -0400 Cleanup avalanche bootstrapping fetching (#2947) Signed-off-by: Stephen Buttolph Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 197179fdbac7095eb44856d8266f67c32fbe16d0 Author: Stephen Buttolph Date: Thu Apr 18 13:56:57 2024 -0400 Improve bootstrapping peer selection (#2946) Signed-off-by: Stephen Buttolph Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit d701e25609659282bcf9695fb2002f8d11e8f0e5 Author: marun Date: Thu Apr 18 07:37:01 2024 -0700 `e2e`: Add basic warp test with xsvm (#2043) Co-authored-by: Stephen Buttolph commit 6d8706e3775043eea29bee685adff63ee02e02ae Author: marun Date: Wed Apr 17 13:34:07 2024 -0700 [tmpnet] Add network reuse to e2e fixture (#2935) Signed-off-by: marun Co-authored-by: Stephen Buttolph commit bd3eb68ce20adfdd0a74b3624d32e308297bf464 Author: marun Date: Wed Apr 17 12:06:55 2024 -0700 [ci] Add govulncheck job and update x/net as per its recommendation (#2948) Signed-off-by: marun Co-authored-by: Stephen Buttolph commit 389a4ee95c357d8d0613f50b4f6caffb2f62f206 Author: Stephen Buttolph Date: Tue Apr 16 21:05:05 2024 -0400 Sync primary network checkpoints during bootstrapping (#2752) Signed-off-by: Stephen Buttolph Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 7975cb723fa17d909017db6578252642ba796a62 Author: Stephen Buttolph Date: Mon Apr 15 23:31:49 2024 -0400 Split ManuallyTrack into ManuallyTrack and ManuallyGossip (#2940) commit 8d0de006d5cb218a92c31946bed9503eb5acb572 Author: Stephen Buttolph Date: Mon Apr 15 21:58:45 2024 -0400 Remove unused `validators.Manager` mock (#2944) commit a60c6b24062f4a238a67796c209f596fc5f1b0ad Author: marun Date: Tue Apr 16 02:19:53 2024 +0200 CI: ensure image build job is compatible with merge queue (#2941) commit 39b10ad5cf0ab6b45a0311bf6fd39506e5783479 Author: Stephen Buttolph Date: Mon Apr 15 16:31:58 2024 -0400 Include consensus decisions into logs (#2943) commit e2d4a569c60fa971bb57cb611136c0811dd1465c Author: Stephen Buttolph Date: Fri Apr 12 15:25:21 2024 -0400 Specify golang patch version in go.mod (#2938) commit acd76c794ee2e69f42f5064a7207dbc2630ac334 Author: Stephen Buttolph Date: Fri Apr 12 11:49:48 2024 -0400 Improve networking README (#2937) commit a804ae14b93dffcda9a846b86ddf7c6685a5c796 Author: marun Date: Fri Apr 12 04:59:25 2024 +0200 Enable creation of multi-arch docker images (#2914) commit d143b2b665c6d8c4b9498109b5437de945afbca5 Author: Stephen Buttolph Date: Thu Apr 11 19:03:35 2024 -0400 Remove uptimes from Pong messages (#2936) commit f461ec4db86236641a73b455bfd7c2f97f7671fa Author: Dan Laine Date: Wed Apr 10 16:23:56 2024 -0400 Assign instead of append to `keys` slice (#2932) Co-authored-by: Stephen Buttolph commit 5c070f8ccefcf8ef5b7c77d17da88bccd05298fc Author: Stephen Buttolph Date: Wed Apr 10 15:48:29 2024 -0400 Add hashing interface to merkledb (#2930) commit 4fecb498f6b8577edf85e6b67b29863a42460eb7 Author: Stephen Buttolph Date: Wed Apr 10 13:55:53 2024 -0400 Improve logging of startup and errors in bootstrapping (#2933) commit 1d20461152b95841b27177fef3eb083881937ab9 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Apr 10 11:13:30 2024 -0400 [vms/proposervm] Remove `getForkHeight()` (#2929) commit 1ed9280d576b951222edf4fe952a45c9bb470998 Author: Stephen Buttolph Date: Wed Apr 10 10:19:27 2024 -0400 Optimize intermediateNodeDB.constructDBKey (#2928) commit 416fbdf1f783c40f21e7009a9f06d192e69ba9b5 Author: Stephen Buttolph Date: Tue Apr 9 20:40:14 2024 -0400 Optimize merkledb metrics (#2927) commit 3c7832e2685abf9e64c8980600d4c33fdb86c10d Author: Stephen Buttolph Date: Tue Apr 9 17:14:31 2024 -0400 Remove duplicate metrics increment (#2926) commit e8904ae11aa144ae601bb9795682d11c3fb178ee Author: Stephen Buttolph Date: Tue Apr 9 16:32:28 2024 -0400 Update versions for v1.11.4 (#2916) commit 1040ceb981c4024a44852aa234269547efa8be3e Author: Stephen Buttolph Date: Mon Apr 8 18:21:51 2024 -0400 Remove memory allocations from merkledb iteration (#2925) commit 5d763515836b9da9a7da21481cfd39f418c48519 Author: Stephen Buttolph Date: Mon Apr 8 16:51:43 2024 -0400 Remove value_node_db batch (#2922) commit 0ba0ace96f81a79f4f0d99fc5b7464e23a273822 Author: Stephen Buttolph Date: Mon Apr 8 16:12:57 2024 -0400 Refactor `MerkleDB.commitChanges` (#2921) commit b9033b00b1070002a671bd4d0c71c8dd767a157d Author: Stephen Buttolph Date: Mon Apr 8 15:39:50 2024 -0400 Implement `utils.BytesPool` to replace `sync.Pool` for byte slices (#2920) commit 323d03b991df624564d833bfc67ce0f3e52ac21d Author: Stephen Buttolph Date: Fri Apr 5 16:30:01 2024 -0400 Fix comment and remove unneeded allocation (#2919) commit 4a1d0bbaf0449c6ebca2a6ea2d8543e245b3c846 Author: Stephen Buttolph Date: Fri Apr 5 13:38:59 2024 -0400 Allow pre-allocating `linked.Hashmap` (#2918) commit 88d304c31c55167beccf1b613e671634d89c106f Author: Stephen Buttolph Date: Fri Apr 5 13:00:11 2024 -0400 Add `.Clear()` to `linked.Hashmap` (#2917) commit 0eea0bd820d6c74830edf5cc69a1e4a74247ec28 Author: Stephen Buttolph Date: Thu Apr 4 18:24:18 2024 -0400 Remove cancellation for Send*AppRequest messages (#2915) commit cca7419b8745bbee1032aa137229c859bfbbcad6 Author: Stephen Buttolph Date: Thu Apr 4 11:14:15 2024 -0400 Fix MerkleDB crash recovery (#2913) commit b44feeb8d4a7f137f54dd252a8b001a53ef47d14 Author: Stephen Buttolph Date: Wed Apr 3 19:49:53 2024 -0400 Remove `linked.Hashmap` locking (#2911) commit 0fea82ed36376d32809e983be1b2573f71b7cd9d Author: Stephen Buttolph Date: Wed Apr 3 19:00:22 2024 -0400 Avoid allocating new list entries (#2910) commit d3a74eb2d491945f011afda7ce914cbd930262e1 Author: Stephen Buttolph Date: Wed Apr 3 17:57:33 2024 -0400 Use generic linked list (#2909) commit 93b90006917df93df6de10a819c4005d81777445 Author: Stephen Buttolph Date: Wed Apr 3 17:29:21 2024 -0400 Remove full message from error logs (#2912) commit e1954bbc4479de99e999b616e750c69bbb2393bc Author: Stephen Buttolph Date: Wed Apr 3 17:27:29 2024 -0400 Implement generic `linked.List` (#2908) Co-authored-by: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> commit f786a242f85bc73da5d38e586817544f046c9eea Author: marun Date: Wed Apr 3 19:18:29 2024 +0200 [tmpnet] Misc cleanup to support xsvm warp test PR (#2903) commit d10393133420af982d73d136c4376b99ef845d4a Author: Stephen Buttolph Date: Wed Apr 3 12:47:16 2024 -0400 Rename linkedhashmap package to `linked` (#2907) commit d4507bf8bb41472e3872b3c04415b39454ca5a09 Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue Apr 2 20:31:00 2024 -0400 Remove AddEphemeralNode (#2887) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit d7452d38435b87ac25479537ddd303ee1d1e3286 Author: Stephen Buttolph Date: Tue Apr 2 20:21:29 2024 -0400 Reuse key buffers during hashing (#2902) commit 5be62de093bb6f344cc3fc9e66588eeb23530c00 Author: Stephen Buttolph Date: Tue Apr 2 17:16:42 2024 -0400 Conditionally allocate WaitGroup memory (#2901) commit e7b14e490cce43090468f46f442c4bef153d6164 Author: Stephen Buttolph Date: Tue Apr 2 16:50:28 2024 -0400 Move bootstrapping queue out of common (#2856) commit 6699924d5f6d5bc4c9bdee1e1097574e643d1a54 Author: Stephen Buttolph Date: Tue Apr 2 16:44:05 2024 -0400 Optimize key creation in hashing (#2899) commit e4b82cf74cd468da0a37da419ac63a2240cf33fe Author: Stephen Buttolph Date: Tue Apr 2 16:23:45 2024 -0400 Remove usage of bytes.Buffer and bytes.Reader (#2896) commit 434db9cdaa043cd9439a6795b3a11627b5bc12a6 Author: Stephen Buttolph Date: Tue Apr 2 14:41:41 2024 -0400 Improve tracing of merkledb trie updates (#2897) commit f7def4dde17d150c79e1c089bcf858d461830076 Author: Stephen Buttolph Date: Tue Apr 2 13:47:56 2024 -0400 Improve performance of marshalling small keys (#2895) commit 00d4e0a3838e1aa7e10cd26f16bf2310a5946ad8 Author: Stephen Buttolph Date: Tue Apr 2 11:21:26 2024 -0400 Optimize hashing of leaf nodes (#2894) commit 617a9e2c1be63916b306dcf0c453d369cd70cfbd Author: Stephen Buttolph Date: Tue Apr 2 11:01:46 2024 -0400 Interval tree syncing integration (#2855) commit 9a0c85218c10f9916ed623f4de2302e61c3de995 Author: Stephen Buttolph Date: Tue Apr 2 09:38:12 2024 -0400 Remove memory alloc from encodeDBNode (#2893) commit 4163dcecf09429aedc2fafd110ec3780660aa2f2 Author: Stephen Buttolph Date: Fri Mar 29 18:20:48 2024 -0400 Move functions so that encode and decode are adjacent (#2892) commit ce8253cb91ffcda6464ad5c5d7654bc4c35a9d3c Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 29 16:56:45 2024 -0400 [vms/platformvm] Miscellaneous testing cleanups (#2891) commit 2dbc9ba9f37b3c9581739efa2752f61ca32ba80e Author: Stephen Buttolph Date: Fri Mar 29 16:20:53 2024 -0400 Optimize merkledb hashing (#2886) commit d046f29358b94765838669665dbf6b55c3419b2a Author: marun Date: Fri Mar 29 15:34:16 2024 +0100 `ci`: Skip monitoring if secrets are not present (#2880) commit 835d9ff49c8eb9c514fcbf71418db2a6de36cbe5 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 29 09:39:54 2024 -0400 [vms/platformvm] Minimize exported functions in `txstest` (#2888) commit b01d98d3441460e590916cbe6c512c9b0000ab5f Author: Stephen Buttolph Date: Fri Mar 29 00:43:33 2024 -0400 Remove merkledb codec struct (#2883) commit 10b881f24cfa4d889f44f298286bf21ffdb1998b Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Mar 28 21:54:25 2024 -0400 [components/avax] Remove `AtomicUTXOManager` interface (#2884) commit 2a0bd083259a71e505bced3a2d5a5b0743e30c2e Author: Stephen Buttolph Date: Thu Mar 28 19:53:12 2024 -0400 Optimize encodeUint (#2882) commit ec347cb8ea3bdf011fe399ef4dcb86180a7db0fd Author: Alberto Benegiamo Date: Fri Mar 29 00:19:14 2024 +0100 [vms/platformvm] Use `wallet` sdk in `txstest.Builder` (#2751) Co-authored-by: Stephen Buttolph Co-authored-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> commit 70cd8e1860db866b7e4abbbe094fe8e147665399 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Mar 28 17:27:12 2024 -0400 [vms/platformvm] Declare `maxPageSize` in `service.go` (#2881) commit 42aefda99557411b1389cbe8b1a7e11b9b92890e Author: Stephen Buttolph Date: Wed Mar 27 17:36:37 2024 -0400 Add tests for inefficient string formatting (#2878) commit cb9386ff2942616697e26212f2b39aa26a18f344 Author: marun Date: Wed Mar 27 21:52:17 2024 +0100 `tmpnet`: Improve subnet configuration (#2871) Signed-off-by: Stephen Buttolph Co-authored-by: Stephen Buttolph commit 2f1605785e24bdfd4953280f5e7a1d99d5f6d88f Author: Stephen Buttolph Date: Wed Mar 27 16:04:21 2024 -0400 Update health API readme (#2875) commit d60affa4f17e51233b4004a4f51a4dee75d4510a Author: Alberto Benegiamo Date: Wed Mar 27 17:09:52 2024 +0100 Cleanup codec constants (#2699) Co-authored-by: Stephen Buttolph commit 402f96f8b8e3d3882bc3d08d42e9173196dd4af9 Author: Stephen Buttolph Date: Tue Mar 26 17:06:43 2024 -0400 Implement interval tree to replace bootstrapping jobs queue (#2756) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 800020b3f1a1d1c1f7f155f75b80a7b3464b969a Author: Meaghan FitzGerald Date: Tue Mar 26 14:10:34 2024 -0600 docs migration (#2845) commit 0f6b123453f4d596fe7e7463d2f6a7ca8b141784 Author: marun Date: Tue Mar 26 19:12:18 2024 +0100 Add detail to tmpnet metrics documentation (#2854) Signed-off-by: Stephen Buttolph Co-authored-by: Stephen Buttolph commit f57f0f22e995b1dc7f431d9c4ae083a888fd912c Author: Stephen Buttolph Date: Tue Mar 26 13:59:34 2024 -0400 Reindex P-chain blocks (#2869) commit f945aa5dc477f6d5b33a8bdd79bbd53f1b137d73 Author: Stephen Buttolph Date: Tue Mar 26 13:41:51 2024 -0400 indicies -> indices (#2873) commit c896704c142bf84147204fac151a057a6e5f6576 Author: marun Date: Mon Mar 25 19:19:58 2024 +0100 `tmpnet`: Ensure nodes are properly detached from the parent process (#2859) Signed-off-by: marun Signed-off-by: Stephen Buttolph Co-authored-by: Stephen Buttolph Co-authored-by: Darioush Jalali commit 9833b45b3b766813d36df2811d35d2bd8942897e Author: Stephen Buttolph Date: Sat Mar 23 22:21:23 2024 -0400 Revert removal of legacy P-chain block parsing (#2866) commit 27bea09b16e86050acd9ca02da689522844311b9 Author: Stephen Buttolph Date: Fri Mar 22 18:01:50 2024 -0400 Push antithesis images (#2864) commit 95edd92983af356f5f277267099b83f4e25ba3d9 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Mar 22 10:46:12 2024 -0400 Bump github.com/consensys/gnark-crypto from 0.10.0 to 0.12.1 (#2862) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 7a67e5a8933891a9ad45a252467d6cdae4c63779 Author: Stephen Buttolph Date: Thu Mar 21 18:15:24 2024 -0400 Update versions for v1.11.3 (#2852) Co-authored-by: Darioush Jalali commit ffdb67f315e5edbb40666909b308b0d5346e2ee8 Author: Stephen Buttolph Date: Thu Mar 21 15:39:56 2024 -0400 Remove duplicate log (#2860) commit 14cdc04ea44a6b01fe1a308714e0a2e4e24b0213 Author: Stephen Buttolph Date: Thu Mar 21 11:43:13 2024 -0400 Remove useless bootstrapping metric (#2858) commit 4d9bfdd533cea2b2a35bdfd7fb8df82a7c6d8b40 Author: marun Date: Thu Mar 21 16:42:00 2024 +0100 `tmpnet`: Reuse dynamically-allocated API port across restarts (#2857) commit 23e541781bb8d4ccdc03aefa78b1cd0e11dd14a3 Author: Stephen Buttolph Date: Wed Mar 20 01:48:04 2024 -0400 Remove fallback validator height indexing (#2801) commit 01186111eef43c613f3aaba2be66e4b56513e6e5 Author: Alberto Benegiamo Date: Tue Mar 19 18:58:15 2024 +0100 X-Chain - repackaged wallet backends (#2762) Co-authored-by: Stephen Buttolph commit 9cef7d30858f382f163b3d9c1de0728442a0c2a0 Author: Alberto Benegiamo Date: Tue Mar 19 02:51:27 2024 +0100 P-Chain - repackaged wallet backends (#2757) Co-authored-by: Stephen Buttolph commit e88e56549cf9703f809c58329d149ec04a3cc6e0 Author: marun Date: Mon Mar 18 19:18:53 2024 +0100 `tmpnet`: Enable collection of logs and metrics (#2820) commit 6249babd7fec9b247ebcc20ea2f60738013907ae Author: Alberto Benegiamo Date: Mon Mar 18 19:11:00 2024 +0100 Dynamic Fees - Add E Upgrade boilerplate (#2597) Co-authored-by: Stephen Buttolph commit 6a3661b2512af004e607cdce62efc350a60d310b Author: aaronbuchwald Date: Sun Mar 17 01:45:33 2024 -0400 Remove verify height index (#2634) Co-authored-by: Stephen Buttolph commit a18c4a34b85e2f26a86b4189540835cbcd255c76 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat Mar 16 16:38:40 2024 +0000 Bump bufbuild/buf-setup-action from 1.29.0 to 1.30.0 (#2842) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit b62846fa8f09e85e8e4cfe49825c301d20124f84 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Mar 15 15:28:36 2024 +0000 Bump google.golang.org/protobuf from 1.32.0 to 1.33.0 (#2849) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 5eaa9c22bb64aef948a5120f139f93219add16c1 Author: Derek Rada Date: Fri Mar 15 08:00:29 2024 -0700 packer build (#2806) Co-authored-by: yevhenvolchenko commit 598018b1ee42edaee18fbf9266a34fa7553ef7a9 Author: marun Date: Thu Mar 14 20:38:07 2024 -0700 `tmpnet`: Add a UUID to temporary networks to support metrics collection (#2763) commit 4e2d0057411452891c3d8e466c51b866dac7acbf Author: Dan Laine Date: Thu Mar 14 16:22:59 2024 -0400 update merkledb readme to specify key length is in bits (#2840) commit 12cd5ec53a6fc6226256565c8048b79d3aae76eb Author: Stephen Buttolph Date: Tue Mar 12 20:52:28 2024 -0400 Allow configuring push gossip to send txs to validators by stake (#2835) commit f0166fdb2ae8b949290b8f7c053e18647f768d7d Author: Dan Laine Date: Tue Mar 12 19:14:20 2024 -0400 merkledb metric naming nits (#2844) commit 31c0ce368305df449b0f9b26ac40d458c4faca06 Author: Stephen Buttolph Date: Tue Mar 12 19:13:48 2024 -0400 Add Antithesis docker compose file (#2838) Signed-off-by: Stephen Buttolph Co-authored-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> commit 6649530f39ba17db4965e9d088f8d9c8cd4fb932 Author: Stephen Buttolph Date: Tue Mar 12 18:43:56 2024 -0400 Add antithesis PoC workload (#2796) commit ddf66eaed1659c2caa8531bbda83b360ca85900e Author: Alberto Benegiamo Date: Tue Mar 12 00:31:13 2024 +0100 P-chain: Improve GetValidatorsSet error expressivity (#2808) Co-authored-by: Stephen Buttolph commit f3abe3ca760e3579b863316431ec49706ca70ccd Author: Stephen Buttolph Date: Mon Mar 11 09:37:40 2024 -0400 Remove engine type handling for everything other than GetAncestors (#2800) commit 6ec6a62b7fd2adb6101b66d5aaa2352981fd6322 Author: Stephen Buttolph Date: Mon Mar 11 09:09:23 2024 -0400 Remove Durango codec check (#2818) commit 2196015bf199fb237a99eec11e8ac48a89aadc6c Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon Mar 11 09:09:02 2024 -0400 Remove Pre-Durango TLS certificate parsing logic (#2831) Co-authored-by: Stephen Buttolph commit d003d29d6670e9b3deb90d7ac85d66a402d20d89 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 8 23:12:16 2024 -0500 Remove legacy p2p message handling (#2833) Co-authored-by: Stephen Buttolph commit b15c7431c1ca820c6a56e7806510283af699f7d2 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 8 19:15:54 2024 -0500 [utils/compression] Remove gzip compressor (#2839) commit f02d4630ab98cc00839751c9814aed9bfa5f67e7 Author: Stephen Buttolph Date: Fri Mar 8 13:43:50 2024 -0500 Prevent zero length values in slices and maps in codec (#2819) commit d2d09c2bb15d3d44a4d6bf6ddaad8b14bd3e47de Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 8 13:35:29 2024 -0500 [vms/platformvm] Remove state pruning logic (#2825) commit 8fedfd99152d8af9dd186314f3ee057e484d989a Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 8 13:31:11 2024 -0500 [vms/avm] Remove `snow.Context` from `Network` (#2834) commit 73b6c60be489b2bc77a0b0fd5c9845ba1aa06010 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 8 12:14:50 2024 -0500 [vms/avm] Cleanup `GetTx` + remove state pruning logic (#2826) commit b8cd687c1e36cf3fa2722522ed55809668b6a780 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Mar 8 11:03:26 2024 -0500 [network/peer] Disconnect from peers who only send legacy version field (#2830) Co-authored-by: Stephen Buttolph commit 4be015b9696ed6cbc4a0597c9f2ed705cc3f83d3 Author: Stephen Buttolph Date: Fri Mar 8 10:55:34 2024 -0500 Combine AppGossip and AppGossipSpecific (#2836) commit a593cc4147b305153dba99655773c3f934dfe703 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Mar 7 17:45:11 2024 -0500 [snow/networking] Enforce `PreferredIDAtHeight` in `Chits` messages (#2827) commit 50ca08e6f92bdb46256df6b39a17707125a8fddb Author: Stephen Buttolph Date: Thu Mar 7 15:30:34 2024 -0500 Remove pre-Durango checks in BLS key verification (#2824) commit 1340ccef2844b8739fea5652ce21ad57ac422834 Author: Stephen Buttolph Date: Thu Mar 7 15:16:23 2024 -0500 Remove pre-Durango block building logic and verification (#2823) commit 67b1aa0639e80a3f81dc0fdced713547afa50f32 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Mar 6 16:21:35 2024 -0500 [vms/platformvm] Remove `ErrFutureStakeTime` check in `VerifyTx` (#2797) commit 639b9ca37034762143c712f4f11f48ad788a5a6f Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Mar 6 13:08:15 2024 -0500 [vms/platformvm] Remove `GetPendingValidators` API (#2817) commit 90a13f361bda434cbedcb62f515f102594cb4c79 Author: Stephen Buttolph Date: Wed Mar 6 12:44:15 2024 -0500 Remove put gossip (#2790) commit dc0362266e72a9cb09a66f054a2a1676a008c615 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Mar 6 11:47:43 2024 -0500 Use `BaseTx` in P-chain wallet (#2731) Co-authored-by: Alberto Benegiamo Co-authored-by: Stephen Buttolph commit dc2c5d0ba789420f776f4f0e8b953359d03a0f3f Author: Stephen Buttolph Date: Wed Mar 6 11:17:20 2024 -0500 Remove bitmaskCodec (#2792) commit 5793120edb33190b997068d0a7f99d1dddc66e55 Author: Stephen Buttolph Date: Wed Mar 6 10:57:34 2024 -0500 Remove peerlist push gossip (#2791) commit 66ae8ef310411aaa67cf98ad96c0d4b085e1cce0 Author: Stephen Buttolph Date: Wed Mar 6 10:51:17 2024 -0500 Cleanup consensus metrics (#2815) commit 6c760983499b5f09228e107a74e729c0b746172f Author: Stephen Buttolph Date: Tue Mar 5 20:30:40 2024 -0500 Update minimum golang version to v1.21.8 (#2814) commit 4c4bfaa452017ef6d7331850300bd5b5fd86c305 Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue Mar 5 18:23:57 2024 -0500 Cleanup Duplicate Transitive Constructor (#2812) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 90a2b13c22116a87670bf05a006401000fe304a6 Author: Stephen Buttolph Date: Tue Mar 5 14:26:57 2024 -0500 Remove unused engine interface (#2811) commit 11372a43e948dd238dd67e8cfec10755c0be5e58 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon Mar 4 05:20:47 2024 -0500 [vms/platformvm] Remove `platform.getMaxStakeAmount` (#2795) commit f546ca45621061c0058887cd248cd020065cd7f9 Author: Stephen Buttolph Date: Fri Mar 1 17:34:24 2024 -0500 Remove double spaces (#2802) commit 257912c07fe9a5cdba20bdb2f69d89fb0295fed2 Author: Gauthier Leonard Date: Fri Mar 1 21:33:03 2024 +0100 Add BLS keys + signers config for local network (#2794) commit ff4bf3fbec8a1f4696f8af2047c8fe1cd0a24589 Author: Stephen Buttolph Date: Fri Mar 1 14:59:49 2024 -0500 Cleanup BLS naming and documentation (#2798) commit daeacb18aad7e537bca729ecf9ea29ddbffc53a1 Author: Stephen Buttolph Date: Thu Feb 29 19:47:22 2024 -0500 Update versions for v1.11.2 (#2788) commit caef151c4ac566d4cd3531edc0d45ee2236f3505 Author: Stephen Buttolph Date: Thu Feb 29 13:30:47 2024 -0500 Increase gossip size on first push (#2787) Signed-off-by: Stephen Buttolph Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit d1312cb5a9db756f330867f30d5775e2949c48a9 Author: Dan Laine Date: Thu Feb 29 10:45:32 2024 -0500 `merkledb` comment accuracy fixes (#2780) Signed-off-by: Dan Laine Co-authored-by: Darioush Jalali commit c691cfd1569448c95d0f4f3bc9295c17c256b311 Author: Dan Laine Date: Thu Feb 29 10:45:16 2024 -0500 `merkledb` style nits (#2783) commit 472ba7aa150e16ecc7599ec93aad448b0d8f5e85 Author: Dan Laine Date: Thu Feb 29 09:42:47 2024 -0500 `merkledb` -- style nit, remove var name `newView` to reduce shadowing (#2784) commit d6aac85271d716b3cf9bea1aef74e6d0ea50be4d Author: Dan Laine Date: Thu Feb 29 09:41:26 2024 -0500 `merkledb` -- rename metrics and add missing call (#2781) commit 6e0afdca644cb5c59a3d7c62021e62ebda7bc826 Author: Dan Laine Date: Thu Feb 29 09:39:58 2024 -0500 `merkledb` -- fix `hasValue` in `recordNodeDeleted` (#2779) commit 97900f72338fe60a976e34927a43e2f56df6a03e Author: Dan Laine Date: Thu Feb 29 09:39:53 2024 -0500 `merkledb` -- move compressedKey declaration to avoid usage of stale values in loop (#2777) commit e9ca612faafda09abd5946a32d54d4872f42f1f5 Author: Stephen Buttolph Date: Thu Feb 29 01:02:18 2024 -0500 Move AppGossip configs from SubnetConfig into ChainConfig (#2785) commit c5da9469ba7ac42a11b685e03f627a96ed29919f Author: Patrick O'Grady Date: Wed Feb 28 19:58:13 2024 -0700 [network/p2p] Redesign Push Gossip (#2772) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit 6dcd8e8adaeacb6d9e7f46654bfbd93639cbd22a Author: Stephen Buttolph Date: Fri Feb 23 17:55:56 2024 -0500 Reenable the upgrade tests (#2769) commit 6322e19eb66626a5ad8f3006a314969b87fa687e Author: Chan Date: Fri Feb 23 17:55:06 2024 -0500 Upgrade opentelemetry to v1.22.0 (#2702) commit 02d68b88dc6b779f6a1d4b5cf68a017281555c83 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri Feb 23 17:19:23 2024 -0500 Remove `defaultAddress` helper from platformvm service tests (#2767) commit 7c0466735026d3cd963e21543b8d691fc6615b40 Author: Stephen Buttolph Date: Wed Feb 21 18:26:52 2024 -0500 Remove Deprecated Auth API (#2759) commit 3558a9dacf7849b15f139baae965dce4b9a45bec Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed Feb 21 17:39:23 2024 -0500 `vms/platformvm`: Remove all keystore APIs except `ExportKey` and `ListAddresses` (#2761) commit 85d6b5500d2e55e939fd38203574a2e91a357588 Author: Stephen Buttolph Date: Wed Feb 21 17:32:12 2024 -0500 Remove deprecated IPC API (#2760) commit c60f7d2dd10c87f57382885b59d6fb2c763eded7 Author: Stephen Buttolph Date: Wed Feb 21 16:30:01 2024 -0500 Update version to v1.11.0 (#2758) commit 4ee5f199cd64afc1ff2c433d77467c28a5567044 Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Tue Feb 20 12:53:32 2024 -0500 fix test sender (#2755) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit e0738a5d3d486b00fc6346aff58b280944bf769b Author: Stephen Buttolph Date: Tue Feb 20 11:45:02 2024 -0500 Add keys values to bimap (#2754) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- .github/actions/install-focal-deps/action.yml | 16 + .../actions/set-go-version-in-env/action.yml | 16 + .../set-go-version-in-env/go_version_env.sh | 14 + .../setup-go-for-project-v3/action.yml | 24 + .../actions/setup-go-for-project/action.yml | 25 + .../ubuntu-jammy-x86_64-public-ami.json | 60 - .../ubuntu-jammy-x86_64-public-ami.pkr.hcl | 81 + .github/workflows/buf-push.yml | 2 +- .github/workflows/build-linux-binaries.yml | 17 +- .github/workflows/build-macos-release.yml | 5 +- .github/workflows/build-public-ami.yml | 25 +- .../workflows/build-ubuntu-amd64-release.yml | 13 +- .../workflows/build-ubuntu-arm64-release.yml | 18 +- .github/workflows/build-win-release.yml | 5 +- .github/workflows/ci.yml | 195 +- .github/workflows/codeql-analysis.yml | 5 +- .github/workflows/fuzz.yml | 5 +- .github/workflows/fuzz_merkledb.yml | 5 +- .../workflows/notify-metrics-availability.sh | 19 + .../workflows/publish_antithesis_images.yml | 46 + .github/workflows/publish_docker_image.yml | 13 +- .github/workflows/publish_image.sh | 29 - .github/workflows/trigger-antithesis-runs.yml | 53 + .github/workflows/update-ami.py | 26 +- .golangci.yml | 28 +- CONTRIBUTING.md | 2 +- Dockerfile | 49 +- README.md | 13 +- RELEASES.md | 629 +++- api/admin/service.go | 2 +- api/admin/service.md | 442 +++ api/auth/auth.go | 304 -- api/auth/auth_test.go | 368 --- api/auth/claims.go | 18 - api/auth/response.go | 40 - api/auth/service.go | 74 - api/health/health.go | 30 +- api/health/metrics.go | 27 - api/health/service.md | 305 ++ api/health/worker.go | 61 +- api/info/client.go | 5 +- api/info/service.go | 11 +- api/info/service.md | 689 +++++ api/ipcs/client.go | 60 - api/ipcs/service.go | 139 - api/keystore/codec.go | 4 +- api/keystore/service.md | 290 ++ api/metrics/client.go | 68 + api/metrics/gatherer_test.go | 11 +- api/metrics/label_gatherer.go | 76 + api/metrics/label_gatherer_test.go | 217 ++ api/metrics/multi_gatherer.go | 78 +- api/metrics/multi_gatherer_test.go | 134 - api/metrics/optional_gatherer.go | 61 - api/metrics/optional_gatherer_test.go | 71 - api/metrics/prefix_gatherer.go | 88 + api/metrics/prefix_gatherer_test.go | 204 ++ api/metrics/service.md | 40 + api/server/metrics.go | 17 +- api/server/server.go | 8 +- api/server/wrapper.go | 11 - cache/lru_cache.go | 10 +- cache/lru_sized_cache.go | 8 +- cache/metercacher/cache.go | 49 +- cache/metercacher/metrics.go | 105 +- cache/unique_cache.go | 33 +- chains/atomic/codec.go | 3 +- chains/atomic/state.go | 10 +- chains/linearizable_vm.go | 3 - chains/manager.go | 437 ++- chains/subnets_test.go | 8 +- codec/codec.go | 2 + codec/hierarchycodec/codec.go | 14 +- codec/hierarchycodec/codec_test.go | 22 +- codec/linearcodec/codec.go | 25 +- codec/linearcodec/codec_test.go | 22 +- codec/manager.go | 6 +- codec/reflectcodec/type_codec.go | 84 +- codec/test_codec.go | 116 +- config/config.go | 155 +- config/config.md | 1410 +++++++++ config/config_test.go | 23 +- config/flags.go | 33 +- config/keys.go | 132 +- config/viper.go | 8 +- database/encdb/codec.go | 4 +- database/leveldb/db.go | 4 +- database/leveldb/db_test.go | 4 +- database/leveldb/metrics.go | 102 +- database/linkeddb/codec.go | 3 +- database/meterdb/db.go | 309 +- database/meterdb/db_test.go | 4 +- database/meterdb/metrics.go | 96 - database/{pebble => pebbledb}/batch.go | 26 +- database/{pebble => pebbledb}/batch_test.go | 4 +- database/{pebble => pebbledb}/db.go | 65 +- database/{pebble => pebbledb}/db_test.go | 4 +- database/{pebble => pebbledb}/iterator.go | 19 +- database/prefixdb/db.go | 147 +- database/prefixdb/db_test.go | 11 + database/test_database.go | 2 +- genesis/bootstrappers.go | 6 +- genesis/checkpoints.go | 35 + genesis/checkpoints.json | 612 ++++ genesis/generate/checkpoints/main.go | 121 + genesis/generate/validators/main.go | 63 + genesis/genesis.go | 1 - genesis/genesis_fuji.go | 3 +- genesis/genesis_local.go | 3 +- genesis/genesis_local.json | 32 +- genesis/genesis_mainnet.go | 3 +- genesis/genesis_test.go | 2 +- genesis/params.go | 34 +- genesis/validators.go | 34 + genesis/validators.json | 2529 ++++++++++++++++ go.mod | 118 +- go.sum | 331 +-- indexer/codec.go | 3 +- indexer/examples/p-chain/main.go | 6 +- indexer/examples/x-chain-blocks/main.go | 11 +- indexer/service.md | 584 ++++ ipcs/chainipc.go | 133 - ipcs/eventsocket.go | 186 -- ipcs/socket/socket.go | 272 -- ipcs/socket/socket_test.go | 71 - ipcs/socket/socket_unix.go | 85 - ipcs/socket/socket_windows.go | 34 - main/main.go | 19 +- message/creator.go | 4 - message/fields.go | 19 +- message/inbound_msg_builder.go | 14 +- message/inbound_msg_builder_test.go | 11 - message/internal_msg_builder.go | 104 +- message/messages.go | 128 +- message/messages_benchmark_test.go | 6 +- message/messages_test.go | 79 +- message/mock_outbound_message_builder.go | 65 +- message/outbound_msg_builder.go | 65 +- message/outbound_msg_builder_test.go | 1 - nat/nat.go | 37 +- nat/no_router.go | 23 +- nat/pmp.go | 8 +- nat/upnp.go | 16 +- network/README.md | 215 +- network/certs_test.go | 100 - network/config.go | 32 +- network/dialer/dialer.go | 6 +- network/dialer/dialer_test.go | 20 +- network/dialer_test.go | 27 +- network/example_test.go | 3 +- network/ip_tracker.go | 291 +- network/ip_tracker_test.go | 409 ++- network/listener_test.go | 11 +- network/metrics.go | 118 +- network/network.go | 194 +- network/network_test.go | 126 +- network/p2p/client.go | 57 +- network/p2p/gossip/gossip.go | 498 +++- network/p2p/gossip/gossip_test.go | 463 ++- network/p2p/gossip/gossipable.go | 2 + network/p2p/gossip/handler.go | 32 +- network/p2p/gossip/test_gossip.go | 5 + network/p2p/handler.go | 4 +- network/p2p/network.go | 105 +- network/p2p/network_test.go | 112 +- network/p2p/router.go | 207 +- network/p2p/throttler_handler.go | 3 +- network/p2p/validators.go | 109 +- network/p2p/validators_test.go | 136 +- network/peer/config.go | 15 +- network/peer/example_test.go | 11 +- network/peer/info.go | 5 +- network/peer/ip.go | 11 +- network/peer/ip_signer.go | 15 +- network/peer/ip_signer_test.go | 21 +- network/peer/ip_test.go | 34 +- network/peer/metrics.go | 254 +- network/peer/msg_length.go | 34 +- network/peer/msg_length_test.go | 76 +- network/peer/peer.go | 560 ++-- network/peer/peer_test.go | 444 ++- network/peer/set.go | 4 +- network/peer/test_peer.go | 23 +- network/peer/tls_config.go | 3 +- network/peer/upgrader.go | 30 +- network/test_cert_1.crt | 27 - network/test_cert_2.crt | 27 - network/test_cert_3.crt | 27 - network/test_key_1.key | 52 - network/test_key_2.key | 52 - network/test_key_3.key | 52 - network/test_network.go | 233 +- network/throttling/bandwidth_throttler.go | 7 +- .../throttling/bandwidth_throttler_test.go | 2 +- .../inbound_conn_upgrade_throttler.go | 27 +- .../inbound_conn_upgrade_throttler_test.go | 13 +- .../inbound_msg_buffer_throttler.go | 11 +- .../inbound_msg_buffer_throttler_test.go | 4 +- .../throttling/inbound_msg_byte_throttler.go | 32 +- .../inbound_msg_byte_throttler_test.go | 8 +- network/throttling/inbound_msg_throttler.go | 9 +- network/throttling/outbound_msg_throttler.go | 30 +- .../throttling/outbound_msg_throttler_test.go | 3 - network/tracked_ip.go | 9 +- network/tracked_ip_test.go | 50 + node/config.go | 34 +- node/node.go | 448 +-- node/overridden_manager.go | 8 +- proto/Dockerfile.buf | 22 - proto/README.md | 7 +- proto/appsender/appsender.proto | 13 +- proto/http/http.proto | 4 +- proto/message/tx.proto | 16 - proto/p2p/p2p.proto | 40 +- proto/pb/aliasreader/aliasreader.pb.go | 2 +- proto/pb/appsender/appsender.pb.go | 205 +- proto/pb/appsender/appsender_grpc.pb.go | 37 - proto/pb/http/http.pb.go | 6 +- .../http/responsewriter/responsewriter.pb.go | 2 +- proto/pb/io/reader/reader.pb.go | 2 +- proto/pb/io/writer/writer.pb.go | 2 +- proto/pb/keystore/keystore.pb.go | 2 +- proto/pb/message/tx.pb.go | 232 -- proto/pb/messenger/messenger.pb.go | 2 +- proto/pb/net/conn/conn.pb.go | 2 +- proto/pb/p2p/p2p.pb.go | 713 ++--- proto/pb/rpcdb/rpcdb.pb.go | 2 +- proto/pb/sdk/sdk.pb.go | 24 +- proto/pb/sharedmemory/sharedmemory.pb.go | 2 +- proto/pb/sync/sync.pb.go | 2 +- proto/pb/validatorstate/validator_state.pb.go | 2 +- proto/pb/vm/runtime/runtime.pb.go | 2 +- proto/pb/vm/vm.pb.go | 812 +++-- proto/pb/vm/vm_grpc.pb.go | 37 - proto/pb/warp/message.pb.go | 2 +- proto/sdk/sdk.proto | 2 - proto/vm/vm.proto | 8 +- .../build_antithesis_avalanchego_workload.sh | 11 + scripts/build_antithesis_images.sh | 146 + scripts/build_antithesis_xsvm_workload.sh | 11 + scripts/build_avalanche.sh | 28 - scripts/build_image.sh | 89 +- scripts/build_test.sh | 15 +- scripts/constants.sh | 21 +- scripts/lint.sh | 2 +- scripts/mocks.mockgen.source.txt | 1 - scripts/mocks.mockgen.txt | 2 +- scripts/protobuf_codegen.sh | 14 +- scripts/run_prometheus.sh | 120 + scripts/run_promtail.sh | 115 + scripts/tests.build_antithesis_images.sh | 67 + scripts/tests.build_image.sh | 84 + scripts/tests.e2e.existing.sh | 57 +- scripts/tests.e2e.sh | 19 +- scripts/tests.upgrade.sh | 11 +- snow/README.md | 2 +- snow/choices/test_decidable.go | 4 + snow/consensus/snowball/binary_snowball.go | 27 +- .../snowball/binary_snowball_test.go | 68 +- snow/consensus/snowball/binary_snowflake.go | 74 +- .../snowball/binary_snowflake_test.go | 57 +- snow/consensus/snowball/consensus.go | 54 +- .../snowball/consensus_performance_test.go | 3 +- snow/consensus/snowball/factory.go | 8 +- snow/consensus/snowball/flat.go | 15 +- snow/consensus/snowball/flat_test.go | 7 +- snow/consensus/snowball/nnary_snowball.go | 31 +- .../consensus/snowball/nnary_snowball_test.go | 58 +- snow/consensus/snowball/nnary_snowflake.go | 85 +- .../snowball/nnary_snowflake_test.go | 102 +- snow/consensus/snowball/parameters.go | 41 +- snow/consensus/snowball/parameters_test.go | 81 +- snow/consensus/snowball/test_snowflake.go | 145 + snow/consensus/snowball/tree.go | 49 +- snow/consensus/snowball/tree_test.go | 496 ++-- snow/consensus/snowball/unary_snowball.go | 34 +- .../consensus/snowball/unary_snowball_test.go | 38 +- snow/consensus/snowball/unary_snowflake.go | 75 +- .../snowball/unary_snowflake_test.go | 68 +- .../consensus/snowman/bootstrapper/sampler.go | 10 +- snow/consensus/snowman/consensus.go | 17 +- snow/consensus/snowman/consensus_test.go | 1190 +++----- snow/consensus/snowman/metrics.go | 70 +- snow/consensus/snowman/network_test.go | 21 +- .../snowman/poll/early_term_no_traversal.go | 133 +- .../poll/early_term_no_traversal_test.go | 35 +- snow/consensus/snowman/poll/set.go | 13 +- snow/consensus/snowman/poll/set_test.go | 48 +- snow/consensus/snowman/snowman_block.go | 13 +- snow/consensus/snowman/snowmantest/block.go | 102 + .../snowman/{ => snowmantest}/mock_block.go | 6 +- snow/consensus/snowman/test_block.go | 54 - snow/consensus/snowman/topological.go | 76 +- snow/consensus/snowman/traced_consensus.go | 10 - snow/context.go | 15 +- .../avalanche/bootstrap/bootstrapper.go | 177 +- .../avalanche/bootstrap/bootstrapper_test.go | 150 +- snow/engine/avalanche/bootstrap/config.go | 10 +- snow/engine/avalanche/bootstrap/metrics.go | 41 +- .../bootstrap}/queue/job.go | 0 .../bootstrap}/queue/jobs.go | 0 .../bootstrap}/queue/jobs_test.go | 0 .../bootstrap}/queue/parser.go | 0 .../bootstrap}/queue/state.go | 0 .../bootstrap}/queue/test_job.go | 0 .../bootstrap}/queue/test_parser.go | 0 snow/engine/avalanche/bootstrap/tx_job.go | 17 +- snow/engine/avalanche/bootstrap/vertex_job.go | 26 +- snow/engine/avalanche/getter/getter.go | 47 +- snow/engine/avalanche/getter/getter_test.go | 108 - snow/engine/avalanche/vertex/codec.go | 6 +- snow/engine/avalanche/vertex/mock_vm.go | 14 - .../common/appsender/appsender_client.go | 33 +- .../common/appsender/appsender_server.go | 16 +- snow/engine/common/mock_sender.go | 49 +- snow/engine/common/no_ops_handlers.go | 22 +- snow/engine/common/sender.go | 49 +- snow/engine/common/test_sender.go | 84 +- snow/engine/common/tracker/peers.go | 60 +- snow/engine/common/tracker/peers_test.go | 13 - snow/engine/snowman/block/mock_chain_vm.go | 14 - snow/engine/snowman/block/test_vm.go | 13 - snow/engine/snowman/block/vm.go | 16 - snow/engine/snowman/bootstrap/acceptor.go | 53 + snow/engine/snowman/bootstrap/block_job.go | 116 - snow/engine/snowman/bootstrap/bootstrapper.go | 431 ++- .../snowman/bootstrap/bootstrapper_test.go | 1286 ++------ snow/engine/snowman/bootstrap/config.go | 15 +- .../snowman/bootstrap/interval/blocks.go | 44 + .../snowman/bootstrap/interval/blocks_test.go | 137 + .../snowman/bootstrap/interval/interval.go | 35 + .../bootstrap/interval/interval_test.go | 255 ++ .../snowman/bootstrap/interval/state.go | 109 + .../engine/snowman/bootstrap/interval/tree.go | 188 ++ .../snowman/bootstrap/interval/tree_test.go | 389 +++ snow/engine/snowman/bootstrap/metrics.go | 27 +- snow/engine/snowman/bootstrap/storage.go | 278 ++ snow/engine/snowman/bootstrap/storage_test.go | 312 ++ snow/engine/snowman/config_test.go | 3 +- snow/engine/snowman/engine.go | 22 - snow/engine/snowman/getter/getter.go | 3 +- snow/engine/snowman/getter/getter_test.go | 39 +- snow/engine/snowman/issuer.go | 48 +- snow/engine/snowman/job/scheduler.go | 109 + snow/engine/snowman/job/scheduler_test.go | 338 +++ snow/engine/snowman/metrics.go | 182 +- .../snowman/syncer/state_syncer_test.go | 30 +- snow/engine/snowman/test_engine.go | 44 - snow/engine/snowman/traced_engine.go | 42 - snow/engine/snowman/transitive.go | 559 ++-- snow/engine/snowman/transitive_test.go | 2641 +++++++++-------- snow/engine/snowman/voter.go | 107 +- snow/event/blockable.go | 24 - snow/event/blocker.go | 92 - snow/event/blocker_test.go | 116 - snow/networking/benchlist/benchlist.go | 35 +- snow/networking/benchlist/benchlist_test.go | 4 + snow/networking/benchlist/manager.go | 23 +- snow/networking/benchlist/metrics.go | 36 - snow/networking/handler/handler.go | 197 +- snow/networking/handler/handler_test.go | 141 +- snow/networking/handler/health_test.go | 21 +- snow/networking/handler/message_queue.go | 42 +- .../handler/message_queue_metrics.go | 49 +- snow/networking/handler/message_queue_test.go | 35 +- snow/networking/handler/metrics.go | 95 +- snow/networking/router/chain_router.go | 17 +- .../networking/router/chain_router_metrics.go | 17 +- snow/networking/router/chain_router_test.go | 189 +- snow/networking/router/mock_router.go | 8 +- snow/networking/router/router.go | 3 +- snow/networking/router/traced_router.go | 6 +- snow/networking/sender/external_sender.go | 15 +- .../networking/sender/mock_external_sender.go | 23 +- snow/networking/sender/sender.go | 343 +-- snow/networking/sender/sender_test.go | 264 +- .../networking/sender/test_external_sender.go | 39 +- snow/networking/sender/traced_sender.go | 39 +- snow/networking/timeout/manager.go | 20 +- snow/networking/timeout/manager_test.go | 2 +- snow/networking/timeout/metrics.go | 151 +- snow/networking/tracker/resource_tracker.go | 35 +- snow/snowtest/snowtest.go | 14 +- .../gvalidators/validator_state_client.go | 11 +- .../gvalidators/validator_state_server.go | 4 +- .../gvalidators/validator_state_test.go | 4 +- snow/validators/logger.go | 2 +- snow/validators/manager.go | 35 +- snow/validators/manager_test.go | 378 ++- snow/validators/mock_manager.go | 226 -- snow/validators/set.go | 52 +- snow/validators/set_test.go | 111 +- staking/asn1.go | 7 - staking/certificate.go | 19 +- staking/local/README.md | 106 +- staking/local/signer1.key | 1 + staking/local/signer2.key | 1 + staking/local/signer3.key | 1 + staking/local/signer4.key | 1 + staking/local/signer5.key | 1 + staking/parse.go | 48 +- staking/parse_test.go | 74 +- staking/tls.go | 11 +- staking/tls_test.go | 7 + staking/verify.go | 52 - staking/verify_test.go | 53 + subnets/config.go | 14 - subnets/config.md | 110 + subnets/config_test.go | 3 +- tests/antithesis/README.md | 145 + .../Dockerfile.builder-instrumented | 46 + .../Dockerfile.builder-uninstrumented | 17 + .../antithesis/avalanchego/Dockerfile.config | 5 + tests/antithesis/avalanchego/Dockerfile.node | 30 + .../avalanchego/Dockerfile.workload | 28 + .../avalanchego/gencomposeconfig/main.go | 37 + tests/antithesis/avalanchego/main.go | 651 ++++ tests/antithesis/compose.go | 241 ++ tests/antithesis/config.go | 75 + tests/antithesis/init_db.go | 60 + tests/antithesis/node_health.go | 50 + tests/antithesis/xsvm/Dockerfile.config | 5 + tests/antithesis/xsvm/Dockerfile.node | 31 + tests/antithesis/xsvm/Dockerfile.workload | 28 + .../antithesis/xsvm/gencomposeconfig/main.go | 62 + tests/antithesis/xsvm/main.go | 183 ++ tests/e2e/README.md | 51 +- tests/e2e/banff/suites.go | 4 +- tests/e2e/c/dynamic_fees.go | 3 +- tests/e2e/c/interchain_workflow.go | 12 +- tests/e2e/e2e_test.go | 16 +- tests/e2e/p/interchain_workflow.go | 17 +- tests/e2e/p/permissionless_subnets.go | 4 +- tests/e2e/p/staking_rewards.go | 13 +- tests/e2e/p/validator_sets.go | 5 +- tests/e2e/p/workflow.go | 8 +- tests/e2e/vms/xsvm.go | 177 ++ tests/e2e/x/interchain_workflow.go | 12 +- tests/e2e/x/transfer/virtuous.go | 59 +- tests/fixture/e2e/env.go | 136 +- tests/fixture/e2e/flags.go | 61 +- tests/fixture/e2e/helpers.go | 47 +- tests/fixture/subnet/xsvm.go | 45 + tests/fixture/tmpnet/README.md | 139 +- tests/fixture/tmpnet/cmd/main.go | 12 +- tests/fixture/tmpnet/defaults.go | 41 +- .../tmpnet/detached_process_default.go | 17 + .../tmpnet/detached_process_windows.go | 12 + tests/fixture/tmpnet/flags.go | 20 +- tests/fixture/tmpnet/local_network.go | 67 + tests/fixture/tmpnet/network.go | 536 ++-- tests/fixture/tmpnet/network_config.go | 6 +- tests/fixture/tmpnet/network_test.go | 4 +- tests/fixture/tmpnet/node.go | 69 +- tests/fixture/tmpnet/node_config.go | 12 +- tests/fixture/tmpnet/node_process.go | 124 +- tests/fixture/tmpnet/subnet.go | 52 +- tests/fixture/tmpnet/utils.go | 8 + tests/http.go | 100 - tests/metrics.go | 83 + tests/upgrade/upgrade_test.go | 6 +- trace/noop.go | 18 +- utils/atomic.go | 30 +- utils/atomic_test.go | 45 + utils/beacon/beacon.go | 11 +- utils/beacon/set.go | 22 +- utils/beacon/set_test.go | 27 +- utils/bimap/bimap.go | 13 + utils/bimap/bimap_test.go | 12 +- utils/bytes.go | 71 +- utils/bytes_test.go | 91 + utils/compression/compressor_test.go | 12 - utils/compression/gzip_compressor.go | 91 - utils/compression/gzip_zip_bomb.bin | Bin 2096390 -> 0 bytes utils/compression/no_compressor.go | 3 - utils/compression/type.go | 5 - utils/compression/type_test.go | 6 +- utils/compression/zstd_compressor.go | 9 +- utils/constants/acps.go | 34 +- utils/constants/networking.go | 21 +- utils/constants/vm_ids.go | 29 + utils/crypto/bls/public.go | 32 +- utils/crypto/bls/public_test.go | 14 +- utils/crypto/bls/secret.go | 8 - utils/dynamicip/ifconfig_resolver.go | 21 +- utils/dynamicip/opendns_resolver.go | 17 +- utils/dynamicip/resolver.go | 4 +- utils/dynamicip/updater.go | 24 +- utils/dynamicip/updater_test.go | 48 +- utils/ips/claimed_ip_port.go | 11 +- utils/ips/dynamic_ip_port.go | 56 - utils/ips/ip.go | 57 + utils/ips/ip_port.go | 104 - utils/ips/ip_test.go | 176 -- utils/ips/lookup.go | 13 +- utils/ips/lookup_test.go | 12 +- utils/linked/hashmap.go | 166 ++ .../hashmap_test.go} | 56 +- utils/linked/list.go | 217 ++ utils/linked/list_test.go | 168 ++ utils/linkedhashmap/iterator.go | 76 - utils/linkedhashmap/linkedhashmap.go | 148 - utils/logging/logger.go | 4 + utils/metric/api_interceptor.go | 15 +- utils/metric/averager.go | 16 +- utils/metric/namespace.go | 7 +- utils/resource/metrics.go | 27 +- utils/resource/usage.go | 2 +- utils/sampler/uniform.go | 6 +- utils/sampler/uniform_best.go | 2 +- utils/sampler/uniform_replacer.go | 16 +- utils/sampler/uniform_resample.go | 16 +- utils/sampler/uniform_test.go | 32 +- utils/sampler/weighted.go | 6 +- utils/sampler/weighted_array.go | 6 +- utils/sampler/weighted_best.go | 2 +- utils/sampler/weighted_heap.go | 6 +- utils/sampler/weighted_linear.go | 6 +- utils/sampler/weighted_test.go | 16 +- utils/sampler/weighted_uniform.go | 6 +- utils/sampler/weighted_without_replacement.go | 4 +- .../weighted_without_replacement_generic.go | 17 +- .../weighted_without_replacement_test.go | 24 +- utils/timer/adaptive_timeout_manager.go | 31 +- utils/timer/adaptive_timeout_manager_test.go | 3 +- utils/window/window_test.go | 4 +- version/application.go | 4 +- version/application_test.go | 4 +- version/compatibility.go | 28 +- version/compatibility.json | 14 + version/compatibility_test.go | 27 +- version/constants.go | 25 +- version/parser.go | 26 +- version/parser_test.go | 54 - version/string.go | 53 +- version/string_test.go | 23 + vms/avm/block/block_test.go | 2 - vms/avm/block/builder/builder_test.go | 3 +- vms/avm/block/executor/block_test.go | 99 +- vms/avm/block/executor/manager_test.go | 19 +- vms/avm/block/parser.go | 8 +- vms/avm/client.go | 64 +- vms/avm/config.md | 61 + vms/avm/config/config.go | 8 +- vms/avm/config_test.go | 11 +- vms/avm/environment_test.go | 70 +- vms/avm/fxs/fx.go | 2 +- vms/avm/index_test.go | 42 +- vms/avm/metrics/metrics.go | 24 +- vms/avm/metrics/tx_metrics.go | 72 +- vms/avm/network/config.go | 41 +- vms/avm/network/gossip.go | 17 +- vms/avm/network/gossip_test.go | 8 +- vms/avm/network/network.go | 211 +- vms/avm/network/network_test.go | 328 +- vms/avm/service.go | 32 +- vms/avm/service.md | 2319 +++++++++++++++ vms/avm/service_test.go | 1144 ++++--- vms/avm/state/mock_state.go | 16 - vms/avm/state/state.go | 362 +-- vms/avm/state/state_test.go | 1 - vms/avm/state_test.go | 19 +- vms/avm/static_service.go | 2 - vms/avm/txs/base_tx_test.go | 2 - vms/avm/txs/create_asset_tx_test.go | 3 - vms/avm/txs/executor/executor_test.go | 4 - .../txs/executor/semantic_verifier_test.go | 5 - .../txs/executor/syntactic_verifier_test.go | 8 +- vms/avm/txs/export_tx_test.go | 2 - vms/avm/txs/import_tx_test.go | 2 - vms/avm/txs/initial_state_test.go | 13 +- vms/avm/txs/mempool/mempool.go | 214 +- vms/avm/txs/mempool/mempool_test.go | 259 +- vms/avm/txs/operation_test.go | 3 +- vms/avm/txs/parser.go | 9 +- vms/avm/txs/tx.go | 8 + vms/avm/txs/txstest/builder.go | 231 ++ vms/avm/txs/txstest/context.go | 25 + vms/avm/txs/txstest/utxos.go | 103 + vms/avm/vm.go | 58 +- vms/avm/vm_benchmark_test.go | 24 +- vms/avm/vm_regression_test.go | 134 +- vms/avm/vm_test.go | 634 ++-- vms/avm/wallet_service.go | 11 +- vms/avm/wallet_service_test.go | 17 +- vms/components/avax/asset_test.go | 3 +- vms/components/avax/atomic_utxos.go | 52 +- vms/components/avax/metadata.go | 59 - vms/components/avax/metadata_test.go | 22 - vms/components/avax/transferables_test.go | 9 +- vms/components/avax/utxo_fetching_test.go | 5 +- vms/components/avax/utxo_id_test.go | 3 +- vms/components/avax/utxo_state_test.go | 3 +- vms/components/avax/utxo_test.go | 3 +- vms/components/chain/state_test.go | 125 +- vms/components/keystore/codec.go | 5 +- vms/components/message/codec.go | 34 - vms/components/message/handler.go | 29 - vms/components/message/handler_test.go | 40 - vms/components/message/message.go | 84 - vms/components/message/message_test.go | 49 - vms/components/message/tx.go | 18 - vms/components/message/tx_test.go | 34 - vms/example/xsvm/Dockerfile | 31 + vms/example/xsvm/api/client.go | 34 + vms/example/xsvm/builder/builder.go | 6 +- vms/example/xsvm/cmd/chain/create/cmd.go | 4 +- vms/example/xsvm/cmd/issue/export/cmd.go | 40 +- vms/example/xsvm/cmd/issue/importtx/cmd.go | 57 +- vms/example/xsvm/cmd/issue/status/status.go | 28 + vms/example/xsvm/cmd/issue/transfer/cmd.go | 40 +- vms/example/xsvm/cmd/version/cmd.go | 5 +- vms/example/xsvm/constants.go | 21 +- vms/example/xsvm/tx/codec.go | 3 +- vms/example/xsvm/vm.go | 7 +- vms/metervm/block_metrics.go | 57 +- vms/metervm/block_vm.go | 35 +- vms/metervm/metrics.go | 3 +- vms/metervm/vertex_metrics.go | 17 +- vms/metervm/vertex_vm.go | 26 +- vms/nftfx/fx_test.go | 32 +- vms/platformvm/api/static_service.go | 2 +- vms/platformvm/block/builder/builder.go | 47 +- vms/platformvm/block/builder/builder_test.go | 249 +- vms/platformvm/block/builder/helpers_test.go | 141 +- .../block/builder/standard_block_test.go | 16 +- vms/platformvm/block/codec.go | 31 +- vms/platformvm/block/executor/helpers_test.go | 165 +- vms/platformvm/block/executor/manager.go | 12 +- .../block/executor/proposal_block_test.go | 431 +-- .../block/executor/standard_block_test.go | 118 +- vms/platformvm/block/executor/verifier.go | 25 +- .../block/executor/verifier_test.go | 67 +- vms/platformvm/client.go | 444 +-- vms/platformvm/config/config.go | 82 +- vms/platformvm/config/config.md | 226 ++ .../config/execution_config_test.go | 124 +- vms/platformvm/docs/validators_versioning.md | 2 +- vms/platformvm/metrics/block_metrics.go | 97 +- vms/platformvm/metrics/metrics.go | 49 +- vms/platformvm/metrics/tx_metrics.go | 136 +- vms/platformvm/network/config.go | 41 +- vms/platformvm/network/gossip.go | 13 +- vms/platformvm/network/gossip_test.go | 12 +- vms/platformvm/network/network.go | 184 +- vms/platformvm/network/network_test.go | 249 +- vms/platformvm/service.go | 1291 +------- vms/platformvm/service.md | 1983 +++++++++++++ vms/platformvm/service_test.go | 348 ++- vms/platformvm/signer/proof_of_possession.go | 6 +- vms/platformvm/state/chain_time_helpers.go | 70 + vms/platformvm/state/diff.go | 10 +- vms/platformvm/state/diff_test.go | 26 +- vms/platformvm/state/metadata_codec.go | 5 +- vms/platformvm/state/metadata_validator.go | 3 +- vms/platformvm/state/mock_state.go | 79 +- vms/platformvm/state/state.go | 555 ++-- vms/platformvm/state/state_test.go | 189 +- .../add_permissionless_delegator_tx_test.go | 4 +- .../add_permissionless_validator_tx_test.go | 4 +- vms/platformvm/txs/builder/builder.go | 940 ------ vms/platformvm/txs/codec.go | 31 +- .../txs/executor/advance_time_test.go | 171 +- .../txs/executor/create_chain_test.go | 79 +- .../txs/executor/create_subnet_test.go | 37 +- vms/platformvm/txs/executor/export_test.go | 28 +- vms/platformvm/txs/executor/helpers_test.go | 148 +- vms/platformvm/txs/executor/import_test.go | 45 +- .../txs/executor/proposal_tx_executor.go | 20 +- .../txs/executor/proposal_tx_executor_test.go | 749 ++--- .../txs/executor/reward_validator_test.go | 185 +- .../txs/executor/staker_tx_verification.go | 98 +- .../staker_tx_verification_helpers.go | 34 - .../executor/staker_tx_verification_test.go | 139 +- .../txs/executor/standard_tx_executor.go | 52 +- .../txs/executor/standard_tx_executor_test.go | 1074 ++++--- vms/platformvm/txs/executor/state_changes.go | 25 - vms/platformvm/txs/fee/calculator.go | 142 + vms/platformvm/txs/fee/calculator_test.go | 251 ++ vms/platformvm/txs/fee/static_config.go | 33 + vms/platformvm/txs/mempool/mempool.go | 206 +- vms/platformvm/txs/mempool/mempool_test.go | 299 -- vms/platformvm/txs/tx.go | 10 + vms/platformvm/txs/txstest/backend.go | 81 + vms/platformvm/txs/txstest/builder.go | 43 + vms/platformvm/txs/txstest/context.go | 39 + vms/platformvm/upgrade/config.go | 50 + vms/platformvm/utxo/handler.go | 671 ----- vms/platformvm/utxo/verifier.go | 333 +++ .../{handler_test.go => verifier_test.go} | 2 +- vms/platformvm/validator_set_property_test.go | 143 +- vms/platformvm/validators/manager.go | 22 +- .../validators/manager_benchmark_test.go | 3 +- vms/platformvm/vm.go | 102 +- vms/platformvm/vm_regression_test.go | 1044 ++++--- vms/platformvm/vm_test.go | 766 +++-- vms/platformvm/warp/codec.go | 3 +- vms/platformvm/warp/payload/codec.go | 4 +- vms/platformvm/warp/signature_test.go | 2 +- vms/platformvm/warp/validator.go | 12 +- vms/platformvm/warp/validator_test.go | 8 +- vms/propertyfx/fx_test.go | 26 +- vms/proposervm/batched_vm.go | 2 +- vms/proposervm/batched_vm_test.go | 277 +- vms/proposervm/block.go | 16 +- vms/proposervm/block/block.go | 56 +- vms/proposervm/block/block_test.go | 35 +- vms/proposervm/block/build.go | 7 +- vms/proposervm/block/build_test.go | 15 +- vms/proposervm/block/codec.go | 3 +- vms/proposervm/block/option.go | 8 +- vms/proposervm/block/option_test.go | 13 - vms/proposervm/block/parse.go | 19 +- vms/proposervm/block/parse_test.go | 185 +- vms/proposervm/block_server.go | 30 - vms/proposervm/block_test.go | 133 +- vms/proposervm/config.go | 5 + vms/proposervm/height_indexed_vm.go | 77 - vms/proposervm/indexer/block_server.go | 22 - vms/proposervm/indexer/block_server_test.go | 53 - vms/proposervm/indexer/height_indexer.go | 208 -- vms/proposervm/indexer/height_indexer_test.go | 278 -- vms/proposervm/post_fork_block.go | 13 +- vms/proposervm/post_fork_block_test.go | 329 +- vms/proposervm/post_fork_option_test.go | 277 +- vms/proposervm/pre_fork_block.go | 15 +- vms/proposervm/pre_fork_block_test.go | 333 +-- vms/proposervm/proposer/windower.go | 18 +- vms/proposervm/state/block_height_index.go | 27 +- vms/proposervm/state/block_state.go | 7 +- vms/proposervm/state/block_state_test.go | 28 +- vms/proposervm/state/codec.go | 3 +- vms/proposervm/state/mock_state.go | 57 - vms/proposervm/state_syncable_vm.go | 12 - vms/proposervm/state_syncable_vm_test.go | 94 +- vms/proposervm/summary/codec.go | 3 +- vms/proposervm/tree/tree_test.go | 67 +- vms/proposervm/vm.go | 266 +- vms/proposervm/vm_byzantine_test.go | 254 +- vms/proposervm/vm_regression_test.go | 81 - vms/proposervm/vm_test.go | 748 ++--- vms/rpcchainvm/batched_vm_test.go | 6 +- vms/rpcchainvm/errors.go | 2 - vms/rpcchainvm/grpcutils/util.go | 2 +- vms/rpcchainvm/state_syncable_vm_test.go | 6 +- vms/rpcchainvm/vm_client.go | 36 +- vms/rpcchainvm/vm_server.go | 66 +- vms/rpcchainvm/vm_test.go | 2 +- vms/rpcchainvm/with_context_vm_test.go | 5 +- vms/secp256k1fx/credential_test.go | 3 +- vms/secp256k1fx/fx_test.go | 48 +- vms/secp256k1fx/transfer_input_test.go | 3 +- vms/secp256k1fx/transfer_output_test.go | 3 +- vms/tracedvm/block_vm.go | 9 - vms/txs/mempool/mempool.go | 220 ++ vms/txs/mempool/mempool_test.go | 299 ++ vms/txs/mempool/metrics.go | 44 + wallet/chain/c/backend.go | 13 +- wallet/chain/c/builder.go | 37 +- wallet/chain/c/context.go | 77 +- wallet/chain/c/signer.go | 13 +- wallet/chain/c/wallet.go | 62 +- wallet/chain/p/backend.go | 27 +- wallet/chain/p/backend_visitor.go | 17 +- wallet/chain/p/{ => builder}/builder.go | 167 +- .../p/{ => builder}/builder_with_options.go | 56 +- wallet/chain/p/builder/context.go | 82 + wallet/chain/p/builder_test.go | 97 +- wallet/chain/p/context.go | 163 - wallet/chain/p/{ => signer}/signer.go | 10 +- .../{signer_visitor.go => signer/visitor.go} | 80 +- wallet/chain/p/wallet.go | 47 +- wallet/chain/p/wallet_with_options.go | 51 +- wallet/chain/x/backend.go | 23 +- wallet/chain/x/backend_visitor.go | 6 +- wallet/chain/x/{ => builder}/builder.go | 93 +- .../x/{ => builder}/builder_with_options.go | 38 +- wallet/chain/x/{ => builder}/constants.go | 5 +- wallet/chain/x/builder/context.go | 38 + wallet/chain/x/builder_test.go | 65 +- wallet/chain/x/context.go | 103 +- wallet/chain/x/{ => signer}/signer.go | 22 +- .../{signer_visitor.go => signer/visitor.go} | 60 +- wallet/chain/x/wallet.go | 49 +- wallet/chain/x/wallet_with_options.go | 38 +- wallet/subnet/primary/api.go | 17 +- wallet/subnet/primary/example_test.go | 4 +- .../examples/add-primary-validator/main.go | 4 +- .../primary/examples/c-chain-export/main.go | 2 +- .../primary/examples/c-chain-import/main.go | 5 +- .../primary/examples/create-chain/main.go | 21 +- .../examples/create-locked-stakeable/main.go | 4 +- .../examples/get-p-chain-balance/main.go | 5 +- .../examples/get-x-chain-balance/main.go | 7 +- wallet/subnet/primary/wallet.go | 21 +- x/merkledb/README.md | 4 +- x/merkledb/bytes_pool.go | 60 + x/merkledb/bytes_pool_test.go | 46 + x/merkledb/cache.go | 19 +- x/merkledb/codec.go | 443 ++- x/merkledb/codec_test.go | 597 +++- x/merkledb/db.go | 284 +- x/merkledb/db_test.go | 125 +- x/merkledb/hashing.go | 99 + x/merkledb/hashing_test.go | 157 + x/merkledb/history.go | 2 +- x/merkledb/history_test.go | 36 +- x/merkledb/intermediate_node_db.go | 56 +- x/merkledb/intermediate_node_db_test.go | 109 +- x/merkledb/key.go | 2 + x/merkledb/metrics.go | 336 +-- x/merkledb/metrics_test.go | 16 +- x/merkledb/node.go | 27 +- x/merkledb/node_test.go | 13 +- x/merkledb/proof.go | 50 +- x/merkledb/proof_test.go | 41 +- x/merkledb/trie.go | 5 +- x/merkledb/trie_test.go | 26 +- x/merkledb/value_node_db.go | 124 +- x/merkledb/value_node_db_test.go | 57 +- x/merkledb/view.go | 334 ++- x/merkledb/view_test.go | 105 + x/merkledb/wait_group.go | 25 + x/merkledb/wait_group_test.go | 29 + x/sync/client.go | 12 + x/sync/network_client.go | 9 +- 826 files changed, 44966 insertions(+), 33884 deletions(-) create mode 100644 .github/actions/install-focal-deps/action.yml create mode 100644 .github/actions/set-go-version-in-env/action.yml create mode 100755 .github/actions/set-go-version-in-env/go_version_env.sh create mode 100644 .github/actions/setup-go-for-project-v3/action.yml create mode 100644 .github/actions/setup-go-for-project/action.yml delete mode 100644 .github/packer/ubuntu-jammy-x86_64-public-ami.json create mode 100644 .github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl create mode 100755 .github/workflows/notify-metrics-availability.sh create mode 100644 .github/workflows/publish_antithesis_images.yml delete mode 100755 .github/workflows/publish_image.sh create mode 100644 .github/workflows/trigger-antithesis-runs.yml create mode 100644 api/admin/service.md delete mode 100644 api/auth/auth.go delete mode 100644 api/auth/auth_test.go delete mode 100644 api/auth/claims.go delete mode 100644 api/auth/response.go delete mode 100644 api/auth/service.go delete mode 100644 api/health/metrics.go create mode 100644 api/health/service.md create mode 100644 api/info/service.md delete mode 100644 api/ipcs/client.go delete mode 100644 api/ipcs/service.go create mode 100644 api/keystore/service.md create mode 100644 api/metrics/client.go create mode 100644 api/metrics/label_gatherer.go create mode 100644 api/metrics/label_gatherer_test.go delete mode 100644 api/metrics/multi_gatherer_test.go delete mode 100644 api/metrics/optional_gatherer.go delete mode 100644 api/metrics/optional_gatherer_test.go create mode 100644 api/metrics/prefix_gatherer.go create mode 100644 api/metrics/prefix_gatherer_test.go create mode 100644 api/metrics/service.md delete mode 100644 api/server/wrapper.go create mode 100644 config/config.md delete mode 100644 database/meterdb/metrics.go rename database/{pebble => pebbledb}/batch.go (77%) rename database/{pebble => pebbledb}/batch_test.go (89%) rename database/{pebble => pebbledb}/db.go (83%) rename database/{pebble => pebbledb}/db_test.go (96%) rename database/{pebble => pebbledb}/iterator.go (83%) create mode 100644 genesis/checkpoints.go create mode 100644 genesis/checkpoints.json create mode 100644 genesis/generate/checkpoints/main.go create mode 100644 genesis/generate/validators/main.go create mode 100644 genesis/validators.go create mode 100644 genesis/validators.json create mode 100644 indexer/service.md delete mode 100644 ipcs/chainipc.go delete mode 100644 ipcs/eventsocket.go delete mode 100644 ipcs/socket/socket.go delete mode 100644 ipcs/socket/socket_test.go delete mode 100644 ipcs/socket/socket_unix.go delete mode 100644 ipcs/socket/socket_windows.go delete mode 100644 network/certs_test.go delete mode 100644 network/test_cert_1.crt delete mode 100644 network/test_cert_2.crt delete mode 100644 network/test_cert_3.crt delete mode 100644 network/test_key_1.key delete mode 100644 network/test_key_2.key delete mode 100644 network/test_key_3.key delete mode 100644 proto/Dockerfile.buf delete mode 100644 proto/message/tx.proto delete mode 100644 proto/pb/message/tx.pb.go create mode 100755 scripts/build_antithesis_avalanchego_workload.sh create mode 100755 scripts/build_antithesis_images.sh create mode 100755 scripts/build_antithesis_xsvm_workload.sh create mode 100755 scripts/run_prometheus.sh create mode 100755 scripts/run_promtail.sh create mode 100755 scripts/tests.build_antithesis_images.sh create mode 100755 scripts/tests.build_image.sh create mode 100644 snow/consensus/snowball/test_snowflake.go create mode 100644 snow/consensus/snowman/snowmantest/block.go rename snow/consensus/snowman/{ => snowmantest}/mock_block.go (95%) delete mode 100644 snow/consensus/snowman/test_block.go rename snow/engine/{common => avalanche/bootstrap}/queue/job.go (100%) rename snow/engine/{common => avalanche/bootstrap}/queue/jobs.go (100%) rename snow/engine/{common => avalanche/bootstrap}/queue/jobs_test.go (100%) rename snow/engine/{common => avalanche/bootstrap}/queue/parser.go (100%) rename snow/engine/{common => avalanche/bootstrap}/queue/state.go (100%) rename snow/engine/{common => avalanche/bootstrap}/queue/test_job.go (100%) rename snow/engine/{common => avalanche/bootstrap}/queue/test_parser.go (100%) delete mode 100644 snow/engine/avalanche/getter/getter_test.go create mode 100644 snow/engine/snowman/bootstrap/acceptor.go delete mode 100644 snow/engine/snowman/bootstrap/block_job.go create mode 100644 snow/engine/snowman/bootstrap/interval/blocks.go create mode 100644 snow/engine/snowman/bootstrap/interval/blocks_test.go create mode 100644 snow/engine/snowman/bootstrap/interval/interval.go create mode 100644 snow/engine/snowman/bootstrap/interval/interval_test.go create mode 100644 snow/engine/snowman/bootstrap/interval/state.go create mode 100644 snow/engine/snowman/bootstrap/interval/tree.go create mode 100644 snow/engine/snowman/bootstrap/interval/tree_test.go create mode 100644 snow/engine/snowman/bootstrap/storage.go create mode 100644 snow/engine/snowman/bootstrap/storage_test.go delete mode 100644 snow/engine/snowman/engine.go create mode 100644 snow/engine/snowman/job/scheduler.go create mode 100644 snow/engine/snowman/job/scheduler_test.go delete mode 100644 snow/engine/snowman/test_engine.go delete mode 100644 snow/engine/snowman/traced_engine.go delete mode 100644 snow/event/blockable.go delete mode 100644 snow/event/blocker.go delete mode 100644 snow/event/blocker_test.go delete mode 100644 snow/networking/benchlist/metrics.go delete mode 100644 snow/validators/mock_manager.go create mode 100644 staking/local/signer1.key create mode 100644 staking/local/signer2.key create mode 100644 staking/local/signer3.key create mode 100644 staking/local/signer4.key create mode 100644 staking/local/signer5.key create mode 100644 staking/verify_test.go create mode 100644 subnets/config.md create mode 100644 tests/antithesis/README.md create mode 100644 tests/antithesis/avalanchego/Dockerfile.builder-instrumented create mode 100644 tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented create mode 100644 tests/antithesis/avalanchego/Dockerfile.config create mode 100644 tests/antithesis/avalanchego/Dockerfile.node create mode 100644 tests/antithesis/avalanchego/Dockerfile.workload create mode 100644 tests/antithesis/avalanchego/gencomposeconfig/main.go create mode 100644 tests/antithesis/avalanchego/main.go create mode 100644 tests/antithesis/compose.go create mode 100644 tests/antithesis/config.go create mode 100644 tests/antithesis/init_db.go create mode 100644 tests/antithesis/node_health.go create mode 100644 tests/antithesis/xsvm/Dockerfile.config create mode 100644 tests/antithesis/xsvm/Dockerfile.node create mode 100644 tests/antithesis/xsvm/Dockerfile.workload create mode 100644 tests/antithesis/xsvm/gencomposeconfig/main.go create mode 100644 tests/antithesis/xsvm/main.go create mode 100644 tests/e2e/vms/xsvm.go create mode 100644 tests/fixture/subnet/xsvm.go create mode 100644 tests/fixture/tmpnet/detached_process_default.go create mode 100644 tests/fixture/tmpnet/detached_process_windows.go create mode 100644 tests/fixture/tmpnet/local_network.go delete mode 100644 tests/http.go create mode 100644 tests/metrics.go create mode 100644 utils/bytes_test.go delete mode 100644 utils/compression/gzip_compressor.go delete mode 100644 utils/compression/gzip_zip_bomb.bin delete mode 100644 utils/ips/dynamic_ip_port.go create mode 100644 utils/ips/ip.go delete mode 100644 utils/ips/ip_port.go delete mode 100644 utils/ips/ip_test.go create mode 100644 utils/linked/hashmap.go rename utils/{linkedhashmap/linkedhashmap_test.go => linked/hashmap_test.go} (82%) create mode 100644 utils/linked/list.go create mode 100644 utils/linked/list_test.go delete mode 100644 utils/linkedhashmap/iterator.go delete mode 100644 utils/linkedhashmap/linkedhashmap.go create mode 100644 version/string_test.go create mode 100644 vms/avm/config.md create mode 100644 vms/avm/service.md create mode 100644 vms/avm/txs/txstest/builder.go create mode 100644 vms/avm/txs/txstest/context.go create mode 100644 vms/avm/txs/txstest/utxos.go delete mode 100644 vms/components/avax/metadata.go delete mode 100644 vms/components/avax/metadata_test.go delete mode 100644 vms/components/message/codec.go delete mode 100644 vms/components/message/handler.go delete mode 100644 vms/components/message/handler_test.go delete mode 100644 vms/components/message/message.go delete mode 100644 vms/components/message/message_test.go delete mode 100644 vms/components/message/tx.go delete mode 100644 vms/components/message/tx_test.go create mode 100644 vms/example/xsvm/Dockerfile create mode 100644 vms/example/xsvm/cmd/issue/status/status.go create mode 100644 vms/platformvm/config/config.md create mode 100644 vms/platformvm/service.md create mode 100644 vms/platformvm/state/chain_time_helpers.go delete mode 100644 vms/platformvm/txs/builder/builder.go create mode 100644 vms/platformvm/txs/fee/calculator.go create mode 100644 vms/platformvm/txs/fee/calculator_test.go create mode 100644 vms/platformvm/txs/fee/static_config.go delete mode 100644 vms/platformvm/txs/mempool/mempool_test.go create mode 100644 vms/platformvm/txs/txstest/backend.go create mode 100644 vms/platformvm/txs/txstest/builder.go create mode 100644 vms/platformvm/txs/txstest/context.go create mode 100644 vms/platformvm/upgrade/config.go delete mode 100644 vms/platformvm/utxo/handler.go create mode 100644 vms/platformvm/utxo/verifier.go rename vms/platformvm/utxo/{handler_test.go => verifier_test.go} (99%) delete mode 100644 vms/proposervm/block/option_test.go delete mode 100644 vms/proposervm/block_server.go delete mode 100644 vms/proposervm/indexer/block_server.go delete mode 100644 vms/proposervm/indexer/block_server_test.go delete mode 100644 vms/proposervm/indexer/height_indexer.go delete mode 100644 vms/proposervm/indexer/height_indexer_test.go delete mode 100644 vms/proposervm/vm_regression_test.go create mode 100644 vms/txs/mempool/mempool.go create mode 100644 vms/txs/mempool/mempool_test.go create mode 100644 vms/txs/mempool/metrics.go rename wallet/chain/p/{ => builder}/builder.go (89%) rename wallet/chain/p/{ => builder}/builder_with_options.go (81%) create mode 100644 wallet/chain/p/builder/context.go delete mode 100644 wallet/chain/p/context.go rename wallet/chain/p/{ => signer}/signer.go (89%) rename wallet/chain/p/{signer_visitor.go => signer/visitor.go} (77%) rename wallet/chain/x/{ => builder}/builder.go (91%) rename wallet/chain/x/{ => builder}/builder_with_options.go (81%) rename wallet/chain/x/{ => builder}/constants.go (95%) create mode 100644 wallet/chain/x/builder/context.go rename wallet/chain/x/{ => signer}/signer.go (71%) rename wallet/chain/x/{signer_visitor.go => signer/visitor.go} (82%) create mode 100644 x/merkledb/bytes_pool.go create mode 100644 x/merkledb/bytes_pool_test.go create mode 100644 x/merkledb/hashing.go create mode 100644 x/merkledb/hashing_test.go create mode 100644 x/merkledb/view_test.go create mode 100644 x/merkledb/wait_group.go create mode 100644 x/merkledb/wait_group_test.go diff --git a/.github/actions/install-focal-deps/action.yml b/.github/actions/install-focal-deps/action.yml new file mode 100644 index 000000000000..0770ca11a6d6 --- /dev/null +++ b/.github/actions/install-focal-deps/action.yml @@ -0,0 +1,16 @@ +# This action installs dependencies missing from the default +# focal image used by arm64 github workers. +# +# TODO(marun): Find an image with the required dependencies already installed. + +name: 'Install focal arm64 dependencies' +description: 'Installs the dependencies required to build avalanchego on an arm64 github worker running Ubuntu 20.04 (focal)' + +runs: + using: composite + steps: + - name: Install build-essential + run: | + sudo apt update + sudo apt -y install build-essential + shell: bash diff --git a/.github/actions/set-go-version-in-env/action.yml b/.github/actions/set-go-version-in-env/action.yml new file mode 100644 index 000000000000..97ad376adf53 --- /dev/null +++ b/.github/actions/set-go-version-in-env/action.yml @@ -0,0 +1,16 @@ +# This action sets GO_VERSION from the project's go.mod. +# +# Must be run after actions/checkout to ensure go.mod is available to +# source the project's go version from. + +name: 'Set GO_VERSION env var from go.mod' +description: 'Read the go version from go.mod and add it as env var GO_VERSION in the github env' + +runs: + using: composite + steps: + - name: Set the project Go version in the environment + # A script works across different platforms but attempting to replicate the script directly in + # the run statement runs into platform-specific path handling issues. + run: .github/actions/set-go-version-in-env/go_version_env.sh >> $GITHUB_ENV + shell: bash diff --git a/.github/actions/set-go-version-in-env/go_version_env.sh b/.github/actions/set-go-version-in-env/go_version_env.sh new file mode 100755 index 000000000000..8a0c535a2331 --- /dev/null +++ b/.github/actions/set-go-version-in-env/go_version_env.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Prints the go version defined in the repo's go.mod. This is useful +# for configuring the correct version of go to install in CI. +# +# `go list -m -f '{{.GoVersion}}'` should be preferred outside of CI +# when go is already installed. + +# 3 directories above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd ../../.. && pwd ) + +echo GO_VERSION="~$(sed -n -e 's/^go //p' "${AVALANCHE_PATH}"/go.mod)" diff --git a/.github/actions/setup-go-for-project-v3/action.yml b/.github/actions/setup-go-for-project-v3/action.yml new file mode 100644 index 000000000000..b92bbdfb2aec --- /dev/null +++ b/.github/actions/setup-go-for-project-v3/action.yml @@ -0,0 +1,24 @@ +# This action targets setup-go@v3 to support workers with old NodeJS +# incompabible with newer versions of setup-go. +# +# Since github actions do not support dynamically configuring the +# versions in a uses statement (e.g. `actions/setup-go@${{ var }}`) it +# is necessary to define an action per version rather than one action +# that can be parameterized. +# +# Must be run after actions/checkout to ensure go.mod is available to +# source the project's go version from. + +name: 'Install Go toolchain with project defaults' +description: 'Install a go toolchain with project defaults' + +runs: + using: composite + steps: + - name: Set the project Go version in the environment + uses: ./.github/actions/set-go-version-in-env + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true diff --git a/.github/actions/setup-go-for-project/action.yml b/.github/actions/setup-go-for-project/action.yml new file mode 100644 index 000000000000..dd38ceff47f0 --- /dev/null +++ b/.github/actions/setup-go-for-project/action.yml @@ -0,0 +1,25 @@ +# This action targets the project default version of setup-go. For +# workers with old NodeJS incompabible with newer versions of +# setup-go, try setup-go-for-project-v3. +# +# Since github actions do not support dynamically configuring the +# versions in a uses statement (e.g. `actions/setup-go@${{ var }}`) it +# is necessary to define an action per version rather than one action +# that can be parameterized. +# +# Must be run after actions/checkout to ensure go.mod is available to +# source the project's go version from. + +name: 'Install Go toolchain with project defaults' +description: 'Install a go toolchain with project defaults' + +runs: + using: composite + steps: + - name: Set the project Go version in the environment + uses: ./.github/actions/set-go-version-in-env + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '${{ env.GO_VERSION }}' + check-latest: true diff --git a/.github/packer/ubuntu-jammy-x86_64-public-ami.json b/.github/packer/ubuntu-jammy-x86_64-public-ami.json deleted file mode 100644 index cb0562b9bd6f..000000000000 --- a/.github/packer/ubuntu-jammy-x86_64-public-ami.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "variables": { - "version": "jammy-22.04", - "tag": "{{env `TAG`}}", - "skip_create_ami": "{{env `SKIP_CREATE_AMI`}}" - }, - "builders": [ - { - "type": "amazon-ebs", - "region": "us-east-1", - "skip_create_ami": "{{ user `skip_create_ami` }}", - "ami_name": "public-avalanche-ubuntu-{{ user `version` }}-{{ user `tag` }}-{{timestamp}}", - "source_ami_filter": { - "filters": { - "virtualization-type": "hvm", - "name": "ubuntu/images/*ubuntu-{{ user `version` }}-*-server-*", - "root-device-type": "ebs", - "architecture": "x86_64" - }, - "most_recent": true, - "owners": [ - "099720109477" - ] - }, - "ssh_username": "ubuntu", - "instance_type": "c5.large", - "ami_groups": "all", - "tags": { - "Name": "public-avalanche-ubuntu-{{ user `version` }}-{{ user `tag` }}-{{ isotime | clean_resource_name }}", - "Release": "{{ user `version` }}", - "Base_AMI_Name": "{{ .SourceAMIName }}" - } - } - ], - "provisioners": [ - { - "type": "shell", - "inline": [ - "while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done", - "wait_apt=$(ps aux | grep apt | wc -l)", - "while [ \"$wait_apt\" -gt \"1\" ]; do echo \"waiting for apt to be ready....\"; wait_apt=$(ps aux | grep apt | wc -l); sleep 5; done", - "sudo apt-get -y update", - "sudo apt-get install -y python3-boto3 golang" - ] - }, - { - "type": "ansible", - "playbook_file": ".github/packer/create_public_ami.yml", - "roles_path": ".github/packer/roles/", - "use_proxy": false, - "extra_arguments": ["-e", "component=public-ami build=packer os_release=jammy tag={{ user `tag` }}"] - }, - { - "type": "shell", - "script": ".github/packer/clean-public-ami.sh", - "execute_command": "sudo bash -x {{.Path}}" - } - ] -} - diff --git a/.github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl b/.github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl new file mode 100644 index 000000000000..c319e87134c7 --- /dev/null +++ b/.github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl @@ -0,0 +1,81 @@ +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + ansible = { + source = "github.com/hashicorp/ansible" + version = "~> 1" + } + } +} + +variable "skip_create_ami" { + type = string + default = "${env("SKIP_CREATE_AMI")}" +} + +variable "tag" { + type = string + default = "${env("TAG")}" +} + +variable "version" { + type = string + default = "jammy-22.04" +} + +data "amazon-ami" "autogenerated_1" { + filters = { + architecture = "x86_64" + name = "ubuntu/images/*ubuntu-${var.version}-*-server-*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["099720109477"] + region = "us-east-1" +} + +locals { + skip_create_ami = var.skip_create_ami == "True" + timestamp = regex_replace(timestamp(), "[- TZ:]", "") + clean_name = regex_replace(timestamp(), "[^a-zA-Z0-9-]", "-") +} + +source "amazon-ebs" "autogenerated_1" { + ami_groups = ["all"] + ami_name = "public-avalanche-ubuntu-${var.version}-${var.tag}-${local.timestamp}" + instance_type = "c5.large" + region = "us-east-1" + skip_create_ami = local.skip_create_ami + source_ami = "${data.amazon-ami.autogenerated_1.id}" + ssh_username = "ubuntu" + tags = { + Base_AMI_Name = "{{ .SourceAMIName }}" + Name = "public-avalanche-ubuntu-${var.version}-${var.tag}-${local.clean_name}" + Release = "${var.version}" + } +} + +build { + sources = ["source.amazon-ebs.autogenerated_1"] + + provisioner "shell" { + inline = ["while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting for cloud-init...'; sleep 1; done", "wait_apt=$(ps aux | grep apt | wc -l)", "while [ \"$wait_apt\" -gt \"1\" ]; do echo \"waiting for apt to be ready....\"; wait_apt=$(ps aux | grep apt | wc -l); sleep 5; done", "sudo apt-get -y update", "sudo apt-get install -y python3-boto3 golang"] + } + + provisioner "ansible" { + extra_arguments = ["-e", "component=public-ami build=packer os_release=jammy tag=${var.tag}"] + playbook_file = ".github/packer/create_public_ami.yml" + roles_path = ".github/packer/roles/" + use_proxy = false + } + + provisioner "shell" { + execute_command = "sudo bash -x {{ .Path }}" + script = ".github/packer/clean-public-ami.sh" + } + +} diff --git a/.github/workflows/buf-push.yml b/.github/workflows/buf-push.yml index 1b2f72dca75c..9cbc657fc47e 100644 --- a/.github/workflows/buf-push.yml +++ b/.github/workflows/buf-push.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: bufbuild/buf-setup-action@v1.29.0 + - uses: bufbuild/buf-setup-action@v1.31.0 - uses: bufbuild/buf-push-action@v1 with: input: "proto" diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index 857ba374a20e..9f5cdfe97475 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -10,9 +10,6 @@ on: tags: - "*" -env: - go_version: '~1.21.7' - jobs: build-x86_64-binaries-tarball: runs-on: ubuntu-20.04 @@ -20,10 +17,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - run: go version @@ -77,15 +71,14 @@ jobs: rm -rf /tmp/avalanchego build-arm64-binaries-tarball: - runs-on: [self-hosted, linux, ARM64, focal] + runs-on: custom-arm64-focal steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v3 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/install-focal-deps + + - uses: ./.github/actions/setup-go-for-project-v3 - run: go version diff --git a/.github/workflows/build-macos-release.yml b/.github/workflows/build-macos-release.yml index ea057dbfadb2..2a4bfb1c45d1 100644 --- a/.github/workflows/build-macos-release.yml +++ b/.github/workflows/build-macos-release.yml @@ -24,10 +24,7 @@ jobs: steps: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: '~1.21.7' - check-latest: true + - uses: ./.github/actions/setup-go-for-project - run: go version # Runs a single command using the runners shell diff --git a/.github/workflows/build-public-ami.yml b/.github/workflows/build-public-ami.yml index e32e43de235a..cc9082ab3e3c 100644 --- a/.github/workflows/build-public-ami.yml +++ b/.github/workflows/build-public-ami.yml @@ -10,6 +10,10 @@ on: tags: - "*" +env: + PACKER_VERSION: "1.10.2" + PYTHON3_BOTO3_VERSION: "1.20.34+dfsg-1" + jobs: build-public-ami-and-upload: runs-on: ubuntu-22.04 @@ -17,16 +21,13 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: '~1.21.7' - check-latest: true + - uses: ./.github/actions/setup-go-for-project - run: go version - name: Install aws cli run: | sudo apt update - sudo apt-get -y install packer python3-boto3 + sudo apt-get -y install python3-boto3=${PYTHON3_BOTO3_VERSION} - name: Get the tag id: get_tag @@ -53,6 +54,20 @@ jobs: aws-secret-access-key: ${{ secrets.MARKETPLACE_KEY }} aws-region: us-east-1 + - name: Setup `packer` + uses: hashicorp/setup-packer@main + id: setup + with: + version: ${{ env.PACKER_VERSION }} + + - name: Run `packer init` + id: init + run: "packer init ./.github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl" + + - name: Run `packer validate` + id: validate + run: "packer validate ./.github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl" + - name: Create AMI and upload to marketplace run: | ./.github/workflows/update-ami.py diff --git a/.github/workflows/build-ubuntu-amd64-release.yml b/.github/workflows/build-ubuntu-amd64-release.yml index 86be909be5c4..7c00b56d1224 100644 --- a/.github/workflows/build-ubuntu-amd64-release.yml +++ b/.github/workflows/build-ubuntu-amd64-release.yml @@ -10,19 +10,13 @@ on: tags: - "*" -env: - go_version: '~1.21.7' - jobs: build-jammy-amd64-package: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - run: go version - name: Build the avalanchego binaries @@ -79,10 +73,7 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - run: go version - name: Build the avalanchego binaries diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 6c0b37d6924d..096137b1a2ef 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -10,19 +10,13 @@ on: tags: - "*" -env: - go_version: '~1.21.7' - jobs: build-jammy-arm64-package: - runs-on: [self-hosted, linux, ARM64, jammy] + runs-on: custom-arm64-jammy steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v3 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project-v3 - run: go version - name: Build the avalanchego binaries @@ -75,14 +69,12 @@ jobs: rm -rf /tmp/avalanchego build-focal-arm64-package: - runs-on: [self-hosted, linux, ARM64, focal] + runs-on: custom-arm64-focal steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v3 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/install-focal-deps + - uses: ./.github/actions/setup-go-for-project-v3 - run: go version - name: Build the avalanchego binaries diff --git a/.github/workflows/build-win-release.yml b/.github/workflows/build-win-release.yml index ef4ef29f2fe3..9d04e036b7e8 100644 --- a/.github/workflows/build-win-release.yml +++ b/.github/workflows/build-win-release.yml @@ -24,10 +24,7 @@ jobs: # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: '~1.21.7' - check-latest: true + - uses: ./.github/actions/setup-go-for-project - run: go version diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88c803ffe85c..7fc80756daed 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,8 +20,7 @@ concurrency: cancel-in-progress: true env: - go_version: '~1.21.7' - tmpnet_data_path: ~/.tmpnet/networks/1000 + grafana_url: https://grafana-experimental.avax-dev.network/d/kBQpRdWnk/avalanche-main-dashboard?orgId=1&refresh=10s&var-filter=is_ephemeral_node%7C%3D%7Cfalse&var-filter=gh_repo%7C%3D%7Cava-labs%2Favalanchego&var-filter=gh_run_id%7C%3D%7C${{ github.run_id }}&var-filter=gh_run_attempt%7C%3D%7C${{ github.run_attempt }} jobs: Unit: @@ -29,13 +28,12 @@ jobs: strategy: fail-fast: false matrix: - os: [macos-12, ubuntu-20.04, ubuntu-22.04, windows-2022, [self-hosted, linux, ARM64, focal], [self-hosted, linux, ARM64, jammy]] + os: [macos-12, ubuntu-20.04, ubuntu-22.04, windows-2022, custom-arm64-focal, custom-arm64-jammy] steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v3 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/install-focal-deps + if: matrix.os == 'custom-arm64-focal' + - uses: ./.github/actions/setup-go-for-project-v3 - name: Set timeout on Windows # Windows UT run slower and need a longer timeout shell: bash if: matrix.os == 'windows-2022' @@ -49,10 +47,7 @@ jobs: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - name: fuzz_test shell: bash run: ./scripts/build_fuzz.sh 10 # Run each fuzz test 10 seconds @@ -60,73 +55,158 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh -r + - name: Start prometheus + # Only run for the original repo; a forked repo won't have access to the monitoring credentials + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: bash -x ./scripts/run_prometheus.sh + env: + PROMETHEUS_ID: ${{ secrets.PROMETHEUS_ID }} + PROMETHEUS_PASSWORD: ${{ secrets.PROMETHEUS_PASSWORD }} + - name: Start promtail + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: bash -x ./scripts/run_promtail.sh + env: + LOKI_ID: ${{ secrets.LOKI_ID }} + LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }} + - name: Notify of metrics availability + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: .github/workflows/notify-metrics-availability.sh + env: + GRAFANA_URL: ${{ env.grafana_url }} + GH_JOB_ID: ${{ github.job }} + FILTER_BY_OWNER: avalanchego-e2e - name: Run e2e tests shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.sh + run: E2E_SERIAL=1 ./scripts/tests.e2e.sh --delay-network-shutdown + env: + GH_REPO: ${{ github.repository }} + GH_WORKFLOW: ${{ github.workflow }} + GH_RUN_ID: ${{ github.run_id }} + GH_RUN_NUMBER: ${{ github.run_number }} + GH_RUN_ATTEMPT: ${{ github.run_attempt }} + GH_JOB_ID: ${{ github.job }} - name: Upload tmpnet network dir uses: actions/upload-artifact@v4 if: always() with: name: e2e-tmpnet-data - path: ${{ env.tmpnet_data_path }} + path: | + ~/.tmpnet/networks + ~/.tmpnet/prometheus/prometheus.log + ~/.tmpnet/promtail/promtail.log if-no-files-found: error e2e_existing_network: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh -r + - name: Start prometheus + # Only run for the original repo; a forked repo won't have access to the monitoring credentials + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: bash -x ./scripts/run_prometheus.sh + env: + PROMETHEUS_ID: ${{ secrets.PROMETHEUS_ID }} + PROMETHEUS_PASSWORD: ${{ secrets.PROMETHEUS_PASSWORD }} + - name: Start promtail + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: bash -x ./scripts/run_promtail.sh + env: + LOKI_ID: ${{ secrets.LOKI_ID }} + LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }} + - name: Notify of metrics availability + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: .github/workflows/notify-metrics-availability.sh + env: + GRAFANA_URL: ${{ env.grafana_url }} + GH_JOB_ID: ${{ github.job }} - name: Run e2e tests with existing network shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh + run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh --delay-network-shutdown + env: + GH_REPO: ${{ github.repository }} + GH_WORKFLOW: ${{ github.workflow }} + GH_RUN_ID: ${{ github.run_id }} + GH_RUN_NUMBER: ${{ github.run_number }} + GH_RUN_ATTEMPT: ${{ github.run_attempt }} + GH_JOB_ID: ${{ github.job }} - name: Upload tmpnet network dir uses: actions/upload-artifact@v4 if: always() with: name: e2e-existing-network-tmpnet-data - path: ${{ env.tmpnet_data_path }} + path: | + ~/.tmpnet/networks + ~/.tmpnet/prometheus/prometheus.log + ~/.tmpnet/promtail/promtail.log if-no-files-found: error Upgrade: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - name: Build AvalancheGo Binary shell: bash run: ./scripts/build.sh + - name: Start prometheus + # Only run for the original repo; a forked repo won't have access to the monitoring credentials + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: bash -x ./scripts/run_prometheus.sh + env: + PROMETHEUS_ID: ${{ secrets.PROMETHEUS_ID }} + PROMETHEUS_PASSWORD: ${{ secrets.PROMETHEUS_PASSWORD }} + - name: Start promtail + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: bash -x ./scripts/run_promtail.sh + env: + LOKI_ID: ${{ secrets.LOKI_ID }} + LOKI_PASSWORD: ${{ secrets.LOKI_PASSWORD }} + - name: Notify of metrics availability + if: (github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == github.repository) + shell: bash + run: .github/workflows/notify-metrics-availability.sh + env: + GRAFANA_URL: ${{ env.grafana_url }} + GH_JOB_ID: ${{ github.job }} - name: Run e2e tests shell: bash run: ./scripts/tests.upgrade.sh + env: + GH_REPO: ${{ github.repository }} + GH_WORKFLOW: ${{ github.workflow }} + GH_RUN_ID: ${{ github.run_id }} + GH_RUN_NUMBER: ${{ github.run_number }} + GH_RUN_ATTEMPT: ${{ github.run_attempt }} + GH_JOB_ID: ${{ github.job }} - name: Upload tmpnet network dir uses: actions/upload-artifact@v4 if: always() with: name: upgrade-tmpnet-data - path: ${{ env.tmpnet_data_path }} + path: | + ~/.tmpnet/networks + ~/.tmpnet/prometheus/prometheus.log + ~/.tmpnet/promtail/promtail.log if-no-files-found: error Lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - name: Run static analysis tests shell: bash run: scripts/lint.sh @@ -138,7 +218,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: bufbuild/buf-setup-action@v1.29.0 + - uses: bufbuild/buf-setup-action@v1.31.0 with: github_token: ${{ github.token }} - uses: bufbuild/buf-lint-action@v1 @@ -149,11 +229,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true - - uses: bufbuild/buf-setup-action@v1.29.0 + - uses: ./.github/actions/setup-go-for-project + - uses: bufbuild/buf-setup-action@v1.31.0 - shell: bash run: scripts/protobuf_codegen.sh - shell: bash @@ -163,10 +240,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - shell: bash run: scripts/mock.gen.sh - shell: bash @@ -176,11 +250,40 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version: ${{ env.go_version }} - check-latest: true + - uses: ./.github/actions/setup-go-for-project - shell: bash run: go mod tidy - shell: bash run: .github/workflows/check-clean-branch.sh + test_build_image: + name: Image build + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install qemu (required for cross-platform builds) + run: | + sudo apt update + sudo apt -y install qemu qemu-user-static + - name: Check image build + shell: bash + run: bash -x scripts/tests.build_image.sh + test_build_antithesis_avalanchego_images: + name: Build Antithesis avalanchego images + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Check image build for avalanchego test setup + shell: bash + run: bash -x scripts/tests.build_antithesis_images.sh + env: + TEST_SETUP: avalanchego + test_build_antithesis_xsvm_images: + name: Build Antithesis xsvm images + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Check image build for xsvm test setup + shell: bash + run: bash -x scripts/tests.build_antithesis_images.sh + env: + TEST_SETUP: xsvm diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 85a5e059b332..9a1446deb746 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -43,10 +43,7 @@ jobs: uses: actions/checkout@v4 - name: Setup Golang - uses: actions/setup-go@v5 - with: - go-version: '~1.21.7' - check-latest: true + uses: ./.github/actions/setup-go-for-project # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 07c5d2dbfcb4..abc4f141ef3c 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -16,10 +16,7 @@ jobs: with: ref: 'dev' - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version: '~1.21.7' - check-latest: true + uses: ./.github/actions/setup-go-for-project - name: Run fuzz tests shell: bash run: ./scripts/build_fuzz.sh 180 # Run each fuzz test 180 seconds diff --git a/.github/workflows/fuzz_merkledb.yml b/.github/workflows/fuzz_merkledb.yml index 09c58ffd38e0..1b331f0cfad6 100644 --- a/.github/workflows/fuzz_merkledb.yml +++ b/.github/workflows/fuzz_merkledb.yml @@ -18,10 +18,7 @@ jobs: with: ref: 'dev' - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version: '~1.21.7' - check-latest: true + uses: ./.github/actions/setup-go-for-project - name: Run merkledb fuzz tests shell: bash run: ./scripts/build_fuzz.sh 900 ./x/merkledb # Run each merkledb fuzz tests 15 minutes diff --git a/.github/workflows/notify-metrics-availability.sh b/.github/workflows/notify-metrics-availability.sh new file mode 100755 index 000000000000..fd69064045c1 --- /dev/null +++ b/.github/workflows/notify-metrics-availability.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Timestamps are in seconds +from_timestamp="$(date '+%s')" +monitoring_period=900 # 15 minutes +to_timestamp="$((from_timestamp + monitoring_period))" + +# Grafana expects microseconds, so pad timestamps with 3 zeros +metrics_url="${GRAFANA_URL}&var-filter=gh_job_id%7C%3D%7C${GH_JOB_ID}&from=${from_timestamp}000&to=${to_timestamp}000" + +# Optionally ensure that the link displays metrics only for the shared +# network rather than mixing it with the results for private networks. +if [[ -n "${FILTER_BY_OWNER:-}" ]]; then + metrics_url="${metrics_url}&var-filter=network_owner%7C%3D%7C${FILTER_BY_OWNER}" +fi + +echo "::notice links::metrics ${metrics_url}" diff --git a/.github/workflows/publish_antithesis_images.yml b/.github/workflows/publish_antithesis_images.yml new file mode 100644 index 000000000000..8363ad73e975 --- /dev/null +++ b/.github/workflows/publish_antithesis_images.yml @@ -0,0 +1,46 @@ +name: Publish Antithesis Images + +on: + workflow_dispatch: + inputs: + image_tag: + description: 'The tag to apply to published images' + default: latest + required: true + type: string + push: + branches: + - master + +env: + REGISTRY: us-central1-docker.pkg.dev + REPOSITORY: molten-verve-216720/avalanche-repository + +jobs: + antithesis: + runs-on: ubuntu-latest + + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Login to GAR + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: _json_key + password: ${{ secrets.ANTITHESIS_GAR_JSON_KEY }} + + - name: Build and push images for avalanchego test setup + run: bash -x ./scripts/build_antithesis_images.sh + env: + IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} + TAG: ${{ github.events.inputs.image_tag || 'latest' }} + TEST_SETUP: avalanchego + + - name: Build and push images for xsvm test setup + run: bash -x ./scripts/build_antithesis_images.sh + env: + IMAGE_PREFIX: ${{ env.REGISTRY }}/${{ env.REPOSITORY }} + TAG: ${{ github.events.inputs.image_tag || 'latest' }} + TEST_SETUP: xsvm diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index c8940983c5f9..2674c429bfd5 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -14,9 +14,16 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Publish image to DockerHub + - name: Install qemu (required for cross-platform builds) + run: | + sudo apt update + sudo apt -y install qemu qemu-user-static + sudo systemctl restart docker + - name: Create multiplatform docker builder + run: docker buildx create --use + - name: Build and publish images to DockerHub env: DOCKER_USERNAME: ${{ secrets.docker_username }} DOCKER_PASS: ${{ secrets.docker_pass }} - DOCKER_REPO: ${{ secrets.docker_repo }} - run: .github/workflows/publish_image.sh + DOCKER_IMAGE: ${{ secrets.docker_repo }} + run: scripts/build_image.sh diff --git a/.github/workflows/publish_image.sh b/.github/workflows/publish_image.sh deleted file mode 100755 index d6988bdeb99c..000000000000 --- a/.github/workflows/publish_image.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# If this is not a trusted build (Docker Credentials are not set) -if [[ -z "$DOCKER_USERNAME" ]]; then - exit 0; -fi - -# Avalanche root directory -AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd ../.. && pwd ) - -# Load the constants -source "$AVALANCHE_PATH"/scripts/constants.sh - -# Build current avalanchego -source "$AVALANCHE_PATH"/scripts/build_image.sh - -if [[ $current_branch == "master" ]]; then - echo "Tagging current avalanchego image as $avalanchego_dockerhub_repo:latest" - docker tag "$avalanchego_dockerhub_repo:$current_branch" "$avalanchego_dockerhub_repo:latest" -fi - -echo "Pushing: $avalanchego_dockerhub_repo:$current_branch" - -echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin - -## pushing image with tags -docker image push -a "$avalanchego_dockerhub_repo" diff --git a/.github/workflows/trigger-antithesis-runs.yml b/.github/workflows/trigger-antithesis-runs.yml new file mode 100644 index 000000000000..0521b0770d79 --- /dev/null +++ b/.github/workflows/trigger-antithesis-runs.yml @@ -0,0 +1,53 @@ +name: Trigger Antithesis Test Runs + +on: + # TODO(marun) Add a schedule to execute regularly + workflow_dispatch: + inputs: + duration: + description: 'The duration to run the test for' + default: '0.5' + required: true + type: string + recipients: + description: 'Email recipients to send the test report to' + default: ${{ secrets.ANTITHESIS_RECIPIENTS }} + required: true + type: string + image_tag: + description: 'The image tag to target' + default: latest + required: true + type: string + +jobs: + Run Antithesis Avalanchego Test Setup: + runs-on: ubuntu-latest + steps: + - uses: antithesishq/antithesis-trigger-action@v0.5 + with: + notebook_name: avalanche + tenant: avalanche + username: ${{ secrets.ANTITHESIS_USERNAME }} + password: ${{ secrets.ANTITHESIS_PASSWORD }} + github_token: ${{ secrets.ANTITHESIS_GH_PAT }} + config_image: antithesis-avalanchego-config@${{ github.events.inputs.image_tag }} + images: antithesis-avalanchego-workload@${{ github.events.inputs.image_tag }};antithesis-avalanchego-node@${{ github.events.inputs.image_tag }} + email_recipients: ${{ github.events.inputs.recipients }} + additional_parameters: |- + custom.duration=${{ github.events.inputs.duration }} + Run Antithesis XSVM Test Setup: + runs-on: ubuntu-latest + steps: + - uses: antithesishq/antithesis-trigger-action@v0.5 + with: + notebook_name: avalanche + tenant: avalanche + username: ${{ secrets.ANTITHESIS_USERNAME }} + password: ${{ secrets.ANTITHESIS_PASSWORD }} + github_token: ${{ secrets.ANTITHESIS_GH_PAT }} + config_image: antithesis-xsvm-config@${{ github.events.inputs.image_tag }} + images: antithesis-xsvm-workload@${{ github.events.inputs.image_tag }};antithesis-xsvm-node@${{ github.events.inputs.image_tag }} + email_recipients: ${{ github.events.inputs.recipients }} + additional_parameters: |- + custom.duration=${{ github.events.inputs.duration }} diff --git a/.github/workflows/update-ami.py b/.github/workflows/update-ami.py index 09e8154454cc..95360c553d39 100755 --- a/.github/workflows/update-ami.py +++ b/.github/workflows/update-ami.py @@ -5,10 +5,11 @@ import uuid import re import subprocess +import sys # Globals amifile = '.github/workflows/amichange.json' -packerfile = ".github/packer/ubuntu-jammy-x86_64-public-ami.json" +packerfile = ".github/packer/ubuntu-jammy-x86_64-public-ami.pkr.hcl" # Environment Globals product_id = os.getenv('PRODUCT_ID') @@ -19,11 +20,16 @@ def packer_build(packerfile): print("Running the packer build") - subprocess.run('/usr/local/bin/packer build ' + packerfile, shell=True) + output = subprocess.run('/usr/local/bin/packer build ' + packerfile, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if output.returncode != 0: + raise RuntimeError(f"Command returned with code: {output.returncode}") def packer_build_update(packerfile): print("Creating packer AMI image for Marketplace") - output = subprocess.run('/usr/local/bin/packer build ' + packerfile, shell=True, stdout=subprocess.PIPE) + output = subprocess.run('/usr/local/bin/packer build ' + packerfile, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if output.returncode != 0: + raise RuntimeError(f"Command returned with code: {output.returncode}") + found = re.findall('ami-[a-z0-9]*', str(output.stdout)) if found: @@ -76,10 +82,16 @@ def update_ami(amifile, amiid): print(f"An error occurred while updating AMI delivery options: {e}") def main(): - if skip_create_ami == "True": - packer_build(packerfile) - else: - update_ami(amifile, packer_build_update(packerfile)) + try: + if skip_create_ami == "True": + packer_build(packerfile) + else: + update_ami(amifile, packer_build_update(packerfile)) + + print("Ran packer build and update ami successfully") + except Exception as e: + print(f"An error occurred while running packer") + sys.exit(5) if __name__ == '__main__': main() diff --git a/.golangci.yml b/.golangci.yml index 8475a75ce08c..a1991abd29aa 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,11 +2,6 @@ run: timeout: 10m - # Enables skipping of directories: - # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - # Default: true - skip-dirs-use-default: false - # If set we pass it to "go list -mod={option}". From "go help modules": # If invoked with -mod=readonly, the go command is disallowed from the implicit # automatic updating of go.mod described above. Instead, it fails when any changes @@ -36,6 +31,11 @@ issues: # Default: 3 max-same-issues: 0 + # Enables skipping of directories: + # - vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + # Default: true + exclude-dirs-use-default: false + linters: disable-all: true enable: @@ -86,12 +86,14 @@ linters-settings: rules: packages: deny: - - pkg: "io/ioutil" - desc: io/ioutil is deprecated. Use package io or os instead. - - pkg: "github.com/stretchr/testify/assert" - desc: github.com/stretchr/testify/require should be used instead. + - pkg: "container/list" + desc: github.com/ava-labs/avalanchego/utils/linked should be used instead. - pkg: "github.com/golang/mock/gomock" desc: go.uber.org/mock/gomock should be used instead. + - pkg: "github.com/stretchr/testify/assert" + desc: github.com/stretchr/testify/require should be used instead. + - pkg: "io/ioutil" + desc: io/ioutil is deprecated. Use package io or os instead. errorlint: # Check for plain type assertions and type switches. asserts: false @@ -146,7 +148,14 @@ linters-settings: - name: string-format disabled: false arguments: + - ["b.Logf[0]", "/.*%.*/", "no format directive, use b.Log instead"] - ["fmt.Errorf[0]", "/.*%.*/", "no format directive, use errors.New instead"] + - ["fmt.Fprintf[1]", "/.*%.*/", "no format directive, use fmt.Fprint instead"] + - ["fmt.Printf[0]", "/.*%.*/", "no format directive, use fmt.Print instead"] + - ["fmt.Sprintf[0]", "/.*%.*/", "no format directive, use fmt.Sprint instead"] + - ["log.Fatalf[0]", "/.*%.*/", "no format directive, use log.Fatal instead"] + - ["log.Printf[0]", "/.*%.*/", "no format directive, use log.Print instead"] + - ["t.Logf[0]", "/.*%.*/", "no format directive, use t.Log instead"] # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - name: struct-tag disabled: false @@ -159,6 +168,7 @@ linters-settings: arguments: - "fmt\\.Fprint" - "fmt\\.Fprintf" + - "fmt\\.Fprintln" - "fmt\\.Print" - "fmt\\.Printf" - "fmt\\.Println" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3e509ed11064..7b57a5e6886f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ To start developing on AvalancheGo, you'll need a few things installed. -- Golang version >= 1.21.7 +- Golang version >= 1.21.11 - gcc - g++ diff --git a/Dockerfile b/Dockerfile index 62594f64c766..f7fffb848b2d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,30 @@ -# Changes to the minimum golang version must also be replicated in -# scripts/build_avalanche.sh -# Dockerfile (here) -# README.md -# go.mod +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + # ============= Compilation Stage ================ -FROM golang:1.21.7-bullseye AS builder +# Always use the native platform to ensure fast builds +FROM --platform=$BUILDPLATFORM golang:$GO_VERSION-bullseye AS builder WORKDIR /build + +ARG TARGETPLATFORM +ARG BUILDPLATFORM + +# Configure a cross-compiler if the target platform differs from the build platform. +# +# build_env.sh is used to capture the environmental changes required by the build step since RUN +# environment state is not otherwise persistent. +RUN if [ "$TARGETPLATFORM" = "linux/arm64" ] && [ "$BUILDPLATFORM" != "linux/arm64" ]; then \ + apt-get update && apt-get install -y gcc-aarch64-linux-gnu && \ + echo "export CC=aarch64-linux-gnu-gcc" > ./build_env.sh \ + ; elif [ "$TARGETPLATFORM" = "linux/amd64" ] && [ "$BUILDPLATFORM" != "linux/amd64" ]; then \ + apt-get update && apt-get install -y gcc-x86-64-linux-gnu && \ + echo "export CC=x86_64-linux-gnu-gcc" > ./build_env.sh \ + ; else \ + echo "export CC=gcc" > ./build_env.sh \ + ; fi + # Copy and download avalanche dependencies using go mod COPY go.mod . COPY go.sum . @@ -15,15 +33,28 @@ RUN go mod download # Copy the code into the container COPY . . -# Build avalanchego +# Ensure pre-existing builds are not available for inclusion in the final image +RUN [ -d ./build ] && rm -rf ./build/* || true + +# Build avalanchego. The build environment is configured with build_env.sh from the step +# enabling cross-compilation. ARG RACE_FLAG="" -RUN ./scripts/build.sh ${RACE_FLAG} +RUN . ./build_env.sh && \ + echo "{CC=$CC, TARGETPLATFORM=$TARGETPLATFORM, BUILDPLATFORM=$BUILDPLATFORM}" && \ + export GOARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) && \ + ./scripts/build.sh ${RACE_FLAG} + +# Create this directory in the builder to avoid requiring anything to be executed in the +# potentially emulated execution container. +RUN mkdir -p /avalanchego/build # ============= Cleanup Stage ================ +# Commands executed in this stage may be emulated (i.e. very slow) if TARGETPLATFORM and +# BUILDPLATFORM have different arches. FROM debian:11-slim AS execution # Maintain compatibility with previous images -RUN mkdir -p /avalanchego/build +COPY --from=builder /avalanchego/build /avalanchego/build WORKDIR /avalanchego/build # Copy the executables into the container diff --git a/README.md b/README.md index 7eec5b925b80..e6763982bce8 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ The minimum recommended hardware specification for nodes connected to Mainnet is If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.21.7 +- [Go](https://golang.org/doc/install) version >= 1.21.11 - [gcc](https://gcc.gnu.org/) - g++ @@ -159,12 +159,12 @@ To regenerate the protobuf go code, run `scripts/protobuf_codegen.sh` from the r This should only be necessary when upgrading protobuf versions or modifying .proto definition files. -To use this script, you must have [buf](https://docs.buf.build/installation) (v1.29.0), protoc-gen-go (v1.30.0) and protoc-gen-go-grpc (v1.3.0) installed. +To use this script, you must have [buf](https://docs.buf.build/installation) (v1.31.0), protoc-gen-go (v1.33.0) and protoc-gen-go-grpc (v1.3.0) installed. To install the buf dependencies: ```sh -go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30.0 +go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.33.0 go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0 ``` @@ -185,13 +185,6 @@ scripts/protobuf_codegen.sh For more information, refer to the [GRPC Golang Quick Start Guide](https://grpc.io/docs/languages/go/quickstart/). -### Running protobuf codegen from docker - -```sh -docker build -t avalanche:protobuf_codegen -f api/Dockerfile.buf . -docker run -t -i -v $(pwd):/opt/avalanche -w/opt/avalanche avalanche:protobuf_codegen bash -c "scripts/protobuf_codegen.sh" -``` - ### Running mock codegen To regenerate the [gomock](https://github.com/uber-go/mock) code, run `scripts/mock.gen.sh` from the root of the repo. diff --git a/RELEASES.md b/RELEASES.md index 750b7e49c60e..e616850e816c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,630 @@ # Release Notes +## [v1.11.8](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.8) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.7`. + +### APIs + +- Redesigned metrics to use labels rather than custom namespaces. + +### What's Changed + +- Remove avalanche metrics registerer from consensus context by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3087 +- Remove rejection from `consensus.Add` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3084 +- [vms/platformvm] Rename `txstest.Builder` to `txstest.WalletFactory` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2890 +- Small metrics cleanup by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3088 +- Fix race in test by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3089 +- Implement error driven snowflake hardcoded to support a single beta by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2978 +- Replace all chain namespaces with labels by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3053 +- add a metrics gauge for built block slot by @tsachiherman in https://github.com/ava-labs/avalanchego/pull/3048 +- [ci] Switch to gh workers for arm64 by @marun in https://github.com/ava-labs/avalanchego/pull/3090 +- [ci] Ensure focal arm64 builds all have their required dependencies by @marun in https://github.com/ava-labs/avalanchego/pull/3091 +- X-chain - consolidate tx creation in unit tests by @abi87 in https://github.com/ava-labs/avalanchego/pull/2736 +- Use netip.AddrPort rather than ips.IPPort by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3094 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.7...v1.11.8 + +## [v1.11.7](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.7) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.6`. + +### APIs + +- Added peer's `trackedSubnets` that are not locally tracked to the response from `info.peers` + +### Configs + +- Changed the undocumented `pebble` option for `--db-type` to be `pebbledb` and documented the option + +### Fixes + +- Removed repeated DB compaction during bootstrapping that caused a significant regression in bootstrapping times +- Fixed C-Chain state-sync crash +- Fixed C-Chain state-sync ETA calculation +- Fixed Subnet owner reported by `platform.getSubnets` after a subnet's owner was rotated + +### What's Changed + +- Expose canonical warp formatting function by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3049 +- Remove subnet filter from Peer.TrackedSubnets() by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2975 +- Remove optional gatherer by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3052 +- [vms/platformvm] Return the correct owner in `platform.GetSubnets` after transfer by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3054 +- Add metrics client by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3057 +- [vms/platformvm] Replace `GetSubnets` with `GetSubnetIDs` in `State` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3055 +- Implement `constants.VMName` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3058 +- [testing] Remove superfluous gomega dep by @marun in https://github.com/ava-labs/avalanchego/pull/3063 +- [antithesis] Enable workload instrumentation by @marun in https://github.com/ava-labs/avalanchego/pull/3059 +- Add pebbledb to docs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3061 +- [ci] Remove perpetually failing govulncheck job by @marun in https://github.com/ava-labs/avalanchego/pull/3069 +- Remove api namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3066 +- Remove unused metrics namespaces by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3062 +- Only compact after executing a large number of blocks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3065 +- Remove network namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3067 +- Remove db namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3068 +- Remove averager metrics namespace by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3072 +- chore: fix function name by @stellrust in https://github.com/ava-labs/avalanchego/pull/3075 +- Select metric by label in e2e tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3073 +- [tmpnet] Bootstrap subnets with a single node by @marun in https://github.com/ava-labs/avalanchego/pull/3005 +- [antithesis] Skip push for builder image by @marun in https://github.com/ava-labs/avalanchego/pull/3070 +- Implement label gatherer by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3074 + +### New Contributors + +- @stellrust made their first contribution in https://github.com/ava-labs/avalanchego/pull/3075 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.6...v1.11.7 + +## [v1.11.6](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.6) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.5`. + +### APIs + +- Updated cache metrics: + - `*_cache_put_sum` was replaced with `*_cache_put_time` + - `*_cache_get_sum` was replaced with `*_cache_get_time` + - `*_cache_hit` and `*_cache_miss` were removed and `*_cache_get_count` added a `result` label +- Updated db metrics: + - `*_db_{method}_count` were replaced with `*_db_calls` with a `method` label + - `*_db_{method}_sum` were replaced with `*_db_duration` with a `method` label + - `*_db_{method}_size_count` were deleted + - `*_db_{method}_size_sum` were replaced with `*_db_size` with a `method` label +- Updated p2p message compression metrics: + - `avalanche_network_codec_{type}_{op}_{direction}_time_count` were replaced with `avalanche_network_codec_compressed_count` with `direction`, `op`, and `type` labels +- Updated p2p message metrics: + - `avalanche_network_{op}_{io}` were replaced with `avalanche_network_msgs` with `compressed:"false"`, `io`, and `op` labels + - `avalanche_network_{op}_{io}_bytes` were replaced with `avalanche_network_msgs_bytes` with `io` and `op` labels + - `avalanche_network_{op}_compression_saved_{io}_bytes_sum` were replaced with `avalanche_network_msgs_bytes_saved` with `io` and `op` labels + - `avalanche_network_{op}_compression_saved_{io}_bytes_count` were replaced with `avalanche_network_msgs` with `compressed:"true"`, `io`, and `op` labels + - `avalanche_network_{op}_failed` were replaced with `avalanche_network_msgs_failed_to_send` with an `op` label +- Updated p2p sdk message metrics: + - `*_p2p_{op}_count` were replaced with `*_p2p_msg_count` with an `op` label + - `*_p2p_{op}_time` were replaced with `*_p2p_msg_time` with an `op` label +- Updated consensus message queue metrics: + - `avalanche_{chainID}_handler_unprocessed_msgs_{op}` were replaced with `avalanche_{chainID}_handler_unprocessed_msgs_count` with an `op` label + - `avalanche_{chainID}_handler_async_unprocessed_msgs_{op}` were replaced with `avalanche_{chainID}_handler_unprocessed_msgs_count` with an `op` label +- Updated consensus handler metrics: + - `avalanche_{chainID}_handler_{op}_count` were replaced with `avalanche_{chainID}_handler_messages` with an `op` label + - `avalanche_{chainID}_handler_{op}_msg_handling_count` was deleted + - `avalanche_{chainID}_handler_{op}_msg_handling_sum` were replaced with `avalanche_{chainID}_handler_message_handling_time` with an `op` label + - `avalanche_{chainID}_handler_{op}_sum` were replaced with `avalanche_{chainID}_handler_locking_time` +- Updated consensus sender metrics: + - `avalanche_{chainID}_{op}_failed_benched` were replaced with `avalanche_{chainID}_failed_benched` with an `op` label +- Updated consensus latency metrics: + - `avalanche_{chainID}_lat_{op}_count` were replaced with `avalanche_{chainID}_response_messages` with an `op` label + - `avalanche_{chainID}_lat_{op}_sum` were replaced with `avalanche_{chainID}_response_message_latencies` with an `op` label +- Updated X-chain metrics: + - `avalanche_X_vm_avalanche_{tx}_txs_accepted` were replaced with `avalanche_X_vm_avalanche_txs_accepted` with a `tx` label +- Updated P-chain metrics: + - `avalanche_P_vm_{tx}_txs_accepted` were replaced with `avalanche_P_vm_txs_accepted` with a `tx` label + - `avalanche_P_vm_{blk}_blks_accepted` were replaced with `avalanche_P_vm_blks_accepted` with a `blk` label + +### Fixes + +- Fixed performance regression while executing blocks in bootstrapping +- Fixed peer connection tracking in the P-chain and C-chain to re-enable tx pull gossip +- Fixed C-chain deadlock while executing blocks in bootstrapping after aborting state sync +- Fixed negative ETA while fetching blocks after aborting state sync +- Fixed C-chain snapshot initialization after state sync +- Fixed panic when running avalanchego in environments with an incorrectly implemented monotonic clock +- Fixed memory corruption when accessing keys and values from released pebbledb iterators +- Fixed prefixdb compaction when specifying a `nil` limit + +### What's Changed + +- Consolidate record poll by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2970 +- Update metercacher to use vectors by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2979 +- Reduce p2p sdk metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2980 +- Use vectors in message queue metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2985 +- Use vectors for p2p message metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2983 +- Simplify gossip metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2984 +- Use vectors for message handler metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2987 +- Use vector in message sender by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2988 +- Simplify go version maintenance by @marun in https://github.com/ava-labs/avalanchego/pull/2977 +- Use vector for router latency metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2989 +- Use vectors for accepted tx and block metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2990 +- fix: version application error by @jujube in https://github.com/ava-labs/avalanchego/pull/2995 +- Chore: fix some typos. by @hattizai in https://github.com/ava-labs/avalanchego/pull/2993 +- Cleanup meterdb metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2991 +- Cleanup compression metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2992 +- Fix antithesis image publication by @marun in https://github.com/ava-labs/avalanchego/pull/2998 +- Remove unused `Metadata` struct by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3001 +- prefixdb: fix bug with Compact nil limit by @a1k0n in https://github.com/ava-labs/avalanchego/pull/3000 +- Update go version to 1.21.10 by @marun in https://github.com/ava-labs/avalanchego/pull/3004 +- vms/txs/mempool: unify avm and platformvm mempool implementations by @lebdron in https://github.com/ava-labs/avalanchego/pull/2994 +- Use gauges for time metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3009 +- Chore: fix typos. by @cocoyeal in https://github.com/ava-labs/avalanchego/pull/3010 +- [antithesis] Refactor existing job to support xsvm test setup by @marun in https://github.com/ava-labs/avalanchego/pull/2976 +- chore: fix some function names by @cartnavoy in https://github.com/ava-labs/avalanchego/pull/3015 +- Mark nodes as connected to the P-chain networking stack by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2981 +- [antithesis] Ensure images with a prefix are pushed by @marun in https://github.com/ava-labs/avalanchego/pull/3016 +- boostrapper: compact blocks before iterating them by @a1k0n in https://github.com/ava-labs/avalanchego/pull/2997 +- Remove pre-Durango networking checks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3018 +- Repackaged upgrades times into upgrade package by @abi87 in https://github.com/ava-labs/avalanchego/pull/3019 +- Standardize peer logs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3017 +- Fix pebbledb memory corruption by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3020 +- [vms/avm] fix linter error in benchmark : Use of weak random number generator by @tsachiherman in https://github.com/ava-labs/avalanchego/pull/3023 +- Simplify sampler interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3026 +- [build] Update linter version by @tsachiherman in https://github.com/ava-labs/avalanchego/pull/3024 +- fix broken link. by @cocoyeal in https://github.com/ava-labs/avalanchego/pull/3028 +- `gossipping` -> `gossiping` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3033 +- [tmpnet] Ensure tmpnet compatibility with windows by @marun in https://github.com/ava-labs/avalanchego/pull/3002 +- Fix negative ETA caused by rollback in vm.SetState by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3036 +- [tmpnet] Enable single node networks by @marun in https://github.com/ava-labs/avalanchego/pull/3003 +- P-chain - introducing fees calculators by @abi87 in https://github.com/ava-labs/avalanchego/pull/2698 +- Change default staking key from RSA 4096 to secp256r1 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3025 +- Fix ACP links by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3037 +- Prevent unnecessary bandwidth from activated ACPs by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/3031 +- [antithesis] Add test setup for xsvm by @marun in https://github.com/ava-labs/avalanchego/pull/2982 +- [antithesis] Ensure node image is pushed by @marun in https://github.com/ava-labs/avalanchego/pull/3042 +- Cleanup fee config passing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3043 +- Fix typo fix by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3044 +- Grab iterator at previously executed height by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3045 +- Verify signatures during Parse by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/3046 + +### New Contributors + +- @jujube made their first contribution in https://github.com/ava-labs/avalanchego/pull/2995 +- @hattizai made their first contribution in https://github.com/ava-labs/avalanchego/pull/2993 +- @a1k0n made their first contribution in https://github.com/ava-labs/avalanchego/pull/3000 +- @lebdron made their first contribution in https://github.com/ava-labs/avalanchego/pull/2994 +- @cocoyeal made their first contribution in https://github.com/ava-labs/avalanchego/pull/3010 +- @cartnavoy made their first contribution in https://github.com/ava-labs/avalanchego/pull/3015 +- @tsachiherman made their first contribution in https://github.com/ava-labs/avalanchego/pull/3023 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.5...v1.11.6 + +## [v1.11.5](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.5) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with versions `v1.11.3-v1.11.4`. + +### APIs + +- Renamed metric `avalanche_network_validator_ips` to `avalanche_network_tracked_ips` + +### Configs + +- Removed `--snow-virtuous-commit-threshold` +- Removed `--snow-rogue-commit-threshold` + +### Fixes + +- Fixed increased outbound PeerList messages when specifying custom bootstrap IDs +- Fixed CPU spike when disconnected from the network during bootstrapping fetching +- Fixed topological sort in vote calculation +- Fixed job dependency handling for transitively rejected blocks +- Prevented creation of unnecessary consensus polls during the issuance of a block + +### What's Changed + +- Remove duplicate metrics increment by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2926 +- Optimize merkledb metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2927 +- Optimize intermediateNodeDB.constructDBKey by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2928 +- [vms/proposervm] Remove `getForkHeight()` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2929 +- Improve logging of startup and errors in bootstrapping by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2933 +- Add hashing interface to merkledb by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2930 +- Assign instead of append to `keys` slice by @danlaine in https://github.com/ava-labs/avalanchego/pull/2932 +- Remove uptimes from Pong messages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2936 +- Enable creation of multi-arch docker images by @marun in https://github.com/ava-labs/avalanchego/pull/2914 +- Improve networking README by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2937 +- Specify golang patch version in go.mod by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2938 +- Include consensus decisions into logs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2943 +- CI: ensure image build job is compatible with merge queue by @marun in https://github.com/ava-labs/avalanchego/pull/2941 +- Remove unused `validators.Manager` mock by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2944 +- Split ManuallyTrack into ManuallyTrack and ManuallyGossip by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2940 +- Sync primary network checkpoints during bootstrapping by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2752 +- [ci] Add govulncheck job and update x/net as per its recommendation by @marun in https://github.com/ava-labs/avalanchego/pull/2948 +- [tmpnet] Add network reuse to e2e fixture by @marun in https://github.com/ava-labs/avalanchego/pull/2935 +- `e2e`: Add basic warp test with xsvm by @marun in https://github.com/ava-labs/avalanchego/pull/2043 +- Improve bootstrapping peer selection by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2946 +- Cleanup avalanche bootstrapping fetching by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2947 +- Add manager validator set callbacks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2950 +- chore: fix function names in comment by @socialsister in https://github.com/ava-labs/avalanchego/pull/2957 +- [ci] Fix conditional guarding monitoring configuration by @marun in https://github.com/ava-labs/avalanchego/pull/2959 +- Cleanup consensus engine tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2953 +- Improve and test getProcessingAncestor by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2956 +- Exit topological sort earlier by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2965 +- Consolidate beta by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2949 +- Abandon decided blocks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2968 +- Bump bufbuild/buf-setup-action from 1.30.0 to 1.31.0 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2923 +- Cleanup test block creation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2973 + +### New Contributors + +- @socialsister made their first contribution in https://github.com/ava-labs/avalanchego/pull/2957 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.4...v1.11.5 + +## [v1.11.4](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.4) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is unchanged at `35` and is compatible with version `v1.11.3`. + +### APIs + +- Removed metrics for each chainID: + - `avalanche_{chainID}_bs_eta_fetching_complete` + - `avalanche_{chainID}_block_eta_execution_complete` + - `avalanche_{chainID}_block_jobs_cache_get_count` + - `avalanche_{chainID}_block_jobs_cache_get_sum` + - `avalanche_{chainID}_block_jobs_cache_hit` + - `avalanche_{chainID}_block_jobs_cache_len` + - `avalanche_{chainID}_block_jobs_cache_miss` + - `avalanche_{chainID}_block_jobs_cache_portion_filled` + - `avalanche_{chainID}_block_jobs_cache_put_count` + - `avalanche_{chainID}_block_jobs_cache_put_sum` +- Added finer grained tracing of merkledb trie construction and hashing + - renamed `MerkleDB.view.calculateNodeIDs` to `MerkleDB.view.applyValueChanges` + - Added `MerkleDB.view.calculateNodeChanges` + - Added `MerkleDB.view.hashChangedNodes` + +### Fixes + +- Fixed p2p SDK handling of cancelled `AppRequest` messages +- Fixed merkledb crash recovery + +### What's Changed + +- Bump github.com/consensys/gnark-crypto from 0.10.0 to 0.12.1 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2862 +- Push antithesis images by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2864 +- Revert removal of legacy P-chain block parsing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2866 +- `tmpnet`: Ensure nodes are properly detached from the parent process by @marun in https://github.com/ava-labs/avalanchego/pull/2859 +- indicies -> indices by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2873 +- Reindex P-chain blocks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2869 +- Add detail to tmpnet metrics documentation by @marun in https://github.com/ava-labs/avalanchego/pull/2854 +- docs migration by @meaghanfitzgerald in https://github.com/ava-labs/avalanchego/pull/2845 +- Implement interval tree to replace bootstrapping jobs queue by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2756 +- Cleanup codec constants by @abi87 in https://github.com/ava-labs/avalanchego/pull/2699 +- Update health API readme by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2875 +- `tmpnet`: Improve subnet configuration by @marun in https://github.com/ava-labs/avalanchego/pull/2871 +- Add tests for inefficient string formatting by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2878 +- [vms/platformvm] Declare `maxPageSize` in `service.go` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2881 +- [vms/platformvm] Use `wallet` sdk in `txstest.Builder` by @abi87 in https://github.com/ava-labs/avalanchego/pull/2751 +- Optimize encodeUint by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2882 +- [components/avax] Remove `AtomicUTXOManager` interface by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2884 +- Remove merkledb codec struct by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2883 +- [vms/platformvm] Minimize exported functions in `txstest` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2888 +- `ci`: Skip monitoring if secrets are not present by @marun in https://github.com/ava-labs/avalanchego/pull/2880 +- Optimize merkledb hashing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2886 +- [vms/platformvm] Miscellaneous testing cleanups by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2891 +- Move functions around so that encode and decode are next to each other by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2892 +- Remove memory alloc from encodeDBNode by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2893 +- Interval tree syncing integration by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2855 +- Optimize hashing of leaf nodes by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2894 +- Improve performance of marshalling small keys by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2895 +- Improve tracing of merkledb trie updates by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2897 +- Remove usage of bytes.Buffer and bytes.Reader by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2896 +- Optimize key creation in hashing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2899 +- Move bootstrapping queue out of common by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2856 +- Conditionally allocate WaitGroup memory by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2901 +- Reuse key buffers during hashing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2902 +- Remove AddEphemeralNode by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2887 +- Rename linkedhashmap package to `linked` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2907 +- [tmpnet] Misc cleanup to support xsvm warp test PR by @marun in https://github.com/ava-labs/avalanchego/pull/2903 +- Implement generic `linked.List` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2908 +- Remove full message from error logs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2912 +- Use generic linked list by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2909 +- Avoid allocating new list entries by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2910 +- Remove `linked.Hashmap` locking by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2911 +- Fix MerkleDB crash recovery by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2913 +- Remove cancellation for Send*AppRequest messages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2915 +- Add `.Clear()` to `linked.Hashmap` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2917 +- Allow pre-allocating `linked.Hashmap` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2918 +- Fix comment and remove unneeded allocation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2919 +- Implement `utils.BytesPool` to replace `sync.Pool` for byte slices by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2920 +- Refactor `MerkleDB.commitChanges` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2921 +- Remove value_node_db batch by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2922 +- Remove memory allocations from merkledb iteration by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2925 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.3...v1.11.4 + +## [v1.11.3](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.3) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but encouraged. + +The plugin version is updated to `35` all plugins must update to be compatible. + +### APIs + +- Removed: + - `platform.GetPendingValidators` + - `platform.GetMaxStakeAmount` + +### Configs + +- Removed avalanchego configs: + - `network-peer-list-validator-gossip-size` + - `network-peer-list-non-validator-gossip-size` + - `network-peer-list-peers-gossip-size` + - `network-peer-list-gossip-frequency` + - `consensus-accepted-frontier-gossip-validator-size` + - `consensus-accepted-frontier-gossip-non-validator-size` + - `consensus-accepted-frontier-gossip-peer-size` + - `consensus-on-accept-gossip-validator-size` + - `consensus-on-accept-gossip-non-validator-size` + - `consensus-on-accept-gossip-peer-size` +- Added P-chain, X-chain, and C-chain configs: + - `push-gossip-percent-stake` + +### Fixes + +- Fixed p2p SDK validator sampling to only return connected validators + +### What's Changed + +- Cleanup BLS naming and documentation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2798 +- Add BLS keys + signers config for local network by @Nuttymoon in https://github.com/ava-labs/avalanchego/pull/2794 +- Remove double spaces by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2802 +- [vms/platformvm] Remove `platform.getMaxStakeAmount` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2795 +- Remove unused engine interface by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2811 +- Cleanup Duplicate Transitive Constructor by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2812 +- Update minimum golang version to v1.21.8 by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2814 +- Cleanup consensus metrics by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2815 +- Remove peerlist push gossip by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2791 +- Remove bitmaskCodec by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2792 +- Use `BaseTx` in P-chain wallet by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2731 +- Remove put gossip by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2790 +- [vms/platformvm] Remove `GetPendingValidators` API by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2817 +- [vms/platformvm] Remove `ErrFutureStakeTime` check in `VerifyTx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2797 +- Remove pre-Durango block building logic and verification by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2823 +- Remove pre-Durango checks in BLS key verification by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2824 +- [snow/networking] Enforce `PreferredIDAtHeight` in `Chits` messages by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2827 +- Combine AppGossip and AppGossipSpecific by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2836 +- [network/peer] Disconnect from peers who only send legacy version field by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2830 +- [vms/avm] Cleanup `GetTx` + remove state pruning logic by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2826 +- [vms/avm] Remove `snow.Context` from `Network` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2834 +- [vms/platformvm] Remove state pruning logic by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2825 +- Prevent zero length values in slices and maps in codec by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2819 +- [utils/compression] Remove gzip compressor by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2839 +- Remove legacy p2p message handling by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2833 +- Remove Durango codec check by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2818 +- Remove Pre-Durango TLS certificate parsing logic by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2831 +- Remove engine type handling for everything other than GetAncestors by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2800 +- P-chain: Improve GetValidatorsSet error expressivity by @abi87 in https://github.com/ava-labs/avalanchego/pull/2808 +- Add antithesis PoC workload by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2796 +- Add Antithesis docker compose file by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2838 +- merkledb metric naming nits by @danlaine in https://github.com/ava-labs/avalanchego/pull/2844 +- Allow configuring push gossip to send txs to validators by stake by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2835 +- update merkledb readme to specify key length is in bits by @danlaine in https://github.com/ava-labs/avalanchego/pull/2840 +- `tmpnet`: Add a UUID to temporary networks to support metrics collection by @marun in https://github.com/ava-labs/avalanchego/pull/2763 +- packer build by @Dirrk in https://github.com/ava-labs/avalanchego/pull/2806 +- Bump google.golang.org/protobuf from 1.32.0 to 1.33.0 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2849 +- Bump bufbuild/buf-setup-action from 1.29.0 to 1.30.0 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2842 +- Remove verify height index by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2634 +- Dynamic Fees - Add E Upgrade boilerplate by @abi87 in https://github.com/ava-labs/avalanchego/pull/2597 +- `tmpnet`: Enable collection of logs and metrics by @marun in https://github.com/ava-labs/avalanchego/pull/2820 +- P-Chain - repackaged wallet backends by @abi87 in https://github.com/ava-labs/avalanchego/pull/2757 +- X-Chain - repackaged wallet backends by @abi87 in https://github.com/ava-labs/avalanchego/pull/2762 +- Remove fallback validator height indexing by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2801 +- `tmpnet`: Reuse dynamically-allocated API port across restarts by @marun in https://github.com/ava-labs/avalanchego/pull/2857 +- Remove useless bootstrapping metric by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2858 +- Remove duplicate log by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2860 + +### New Contributors + +- @Nuttymoon made their first contribution in https://github.com/ava-labs/avalanchego/pull/2794 +- @Dirrk made their first contribution in https://github.com/ava-labs/avalanchego/pull/2806 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.2...v1.11.3 + +## [v1.11.2](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.2) + +This version is backwards compatible to [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0). It is optional, but strongly encouraged. + +The plugin version is updated to `34` all plugins must update to be compatible. + +### APIs + +- Removed the `ipc` API +- Removed the `auth` API +- Removed most `keystore` related methods from the `platform` API + - `platform.importKey` + - `platform.createAddress` + - `platform.addValidator` + - `platform.addDelegator` + - `platform.addSubnetValidator` + - `platform.createSubnet` + - `platform.exportAVAX` + - `platform.importAVAX` + - `platform.createBlockchain` +- Added push gossip metrics: + - `gossip_tracking{type="sent"}` + - `gossip_tracking{type="unsent"}` + - `gossip_tracking_lifetime_average` + to the following namespaces: + - `avalanche_P_vm_tx` + - `avalanche_X_vm_avalanche_tx` + - `avalanche_C_vm_sdk_atomic_tx_gossip` + - `avalanche_C_vm_sdk_eth_tx_gossip` +- Removed metrics: + - `avalanche_C_vm_eth_gossip_atomic_sent` + - `avalanche_C_vm_eth_gossip_eth_txs_sent` + - `avalanche_C_vm_eth_regossip_eth_txs_queued_attempts` + - `avalanche_C_vm_eth_regossip_eth_txs_queued_local_tx_count` + - `avalanche_C_vm_eth_regossip_eth_txs_queued_remote_tx_count` + +### Configs + +- Removed: + - `api-ipcs-enabled` + - `ipcs-chain-ids` + - `ipcs-path` + - `api-auth-required` + - `api-auth-password` + - `api-auth-password-file` + - `consensus-app-gossip-validator-size` + - `consensus-app-gossip-non-validator-size` + - `consensus-app-gossip-peer-size` +- Removed subnet configs: + - `appGossipValidatorSize` + - `appGossipNonValidatorSize` + - `appGossipPeerSize` +- Added X-chain and P-chain networking configs: + - `push-gossip-num-validators` + - `push-gossip-num-peers` + - `push-regossip-num-validators` + - `push-regossip-num-peers` + - `push-gossip-discarded-cache-size` + - `push-gossip-max-regossip-frequency` + - `push-gossip-frequency` +- Removed X-chain and P-chain networking configs: + - `legacy-push-gossip-cache-size` +- Added C-chain configs: + - `push-gossip-num-validators` + - `push-gossip-num-peers` + - `push-regossip-num-validators` + - `push-regossip-num-peers` + - `push-gossip-frequency` + - `pull-gossip-frequency` + - `tx-pool-lifetime` +- Removed C-chain configs: + - `tx-pool-journal` + - `tx-pool-rejournal` + - `remote-gossip-only-enabled` + - `regossip-max-txs` + - `remote-tx-gossip-only-enabled` + - `tx-regossip-max-size` + +### Fixes + +- Fixed mempool push gossip amplification + +### What's Changed + +- Remove deprecated IPC API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2760 +- `vms/platformvm`: Remove all keystore APIs except `ExportKey` and `ListAddresses` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2761 +- Remove Deprecated Auth API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2759 +- Remove `defaultAddress` helper from platformvm service tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2767 +- [trace] upgrade opentelemetry to v1.22.0 by @bianyuanop in https://github.com/ava-labs/avalanchego/pull/2702 +- Reenable the upgrade tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2769 +- [network/p2p] Redesign Push Gossip by @patrick-ogrady in https://github.com/ava-labs/avalanchego/pull/2772 +- Move AppGossip configs from SubnetConfig into ChainConfig by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2785 +- `merkledb` -- move compressedKey declaration to avoid usage of stale values in loop by @danlaine in https://github.com/ava-labs/avalanchego/pull/2777 +- `merkledb` -- fix `hasValue` in `recordNodeDeleted` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2779 +- `merkledb` -- rename metrics and add missing call by @danlaine in https://github.com/ava-labs/avalanchego/pull/2781 +- `merkledb` -- style nit, remove var name `newView` to reduce shadowing by @danlaine in https://github.com/ava-labs/avalanchego/pull/2784 +- `merkledb` style nits by @danlaine in https://github.com/ava-labs/avalanchego/pull/2783 +- `merkledb` comment accuracy fixes by @danlaine in https://github.com/ava-labs/avalanchego/pull/2780 +- Increase gossip size on first push by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2787 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.11.0...v1.11.2 + +## [v1.11.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0) + +This upgrade consists of the following Avalanche Community Proposals (ACPs): + +- [ACP-23](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/23-p-chain-native-transfers/README.md) P-Chain Native Transfers +- [ACP-24](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/24-shanghai-eips/README.md) Activate Shanghai EIPs on C-Chain +- [ACP-25](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/25-vm-application-errors/README.md) Virtual Machine Application Errors +- [ACP-30](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/30-avalanche-warp-x-evm/README.md) Integrate Avalanche Warp Messaging into the EVM +- [ACP-31](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/31-enable-subnet-ownership-transfer/README.md) Enable Subnet Ownership Transfer +- [ACP-41](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/41-remove-pending-stakers/README.md) Remove Pending Stakers +- [ACP-62](https://github.com/avalanche-foundation/ACPs/blob/main/ACPs/62-disable-addvalidatortx-and-adddelegatortx/README.md) Disable AddValidatorTx and AddDelegatorTx + +The changes in the upgrade go into effect at 11 AM ET (4 PM UTC) on Wednesday, March 6th, 2024 on Mainnet. + +**All Durango supporting Mainnet nodes should upgrade before 11 AM ET, March 6th 2024.** + +The plugin version is updated to `33` all plugins must update to be compatible. + +### APIs + +- Added `platform.getSubnet` API + +### Configs + +- Deprecated: + - `api-auth-required` + - `api-auth-password` + - `api-auth-password-file` + +### Fixes + +- Fixed potential deadlock during P-chain shutdown +- Updated the consensus engine to recover from previously misconfigured subnets without requiring a restart + +### What's Changed + +- `ci`: Upgrade all workflow actions to versions using Node 20 by @marun in https://github.com/ava-labs/avalanchego/pull/2677 +- `tmpnet`: Ensure restart after chain creation by @marun in https://github.com/ava-labs/avalanchego/pull/2675 +- Publish docker images with race detection by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2680 +- `vms/platformvm`: Remove `NewRewardValidatorTx` from `Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2676 +- `ci`: Updated shellcheck script to support autofix by @marun in https://github.com/ava-labs/avalanchego/pull/2678 +- Unblock misconfigured subnets by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2679 +- Add transfer subnet ownership functionality to wallet by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2659 +- Add ACP-62 by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2681 +- `vms/platformvm`: Add missing txs to `txs.Builder` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2663 +- `vms/platformvm`: Disable `AddValidatorTx` and `AddDelegatorTx` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2662 +- Remove chain router from node.Config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2683 +- Deprecate the auth API by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2684 +- Fix P-chain Shutdown deadlock by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2686 +- Cleanup ID initialization by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2690 +- Remove unused chains#beacons field by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2692 +- x/sync: Remove duplicated call to TrackBandwidth by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2694 +- Move VMAliaser into node from config by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2689 +- Fix minor errors in x/sync tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2709 +- Update minimum golang version to v1.21.7 by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2710 +- Check for github action updates in dependabot by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2715 +- Update `golangci-lint` to `v1.56.1` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2714 +- Add stringer to warp types by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2712 +- Refactor `p2p.PeerTracker` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2701 +- Bump actions/stale from 8 to 9 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2719 +- Bump github/codeql-action from 2 to 3 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2720 +- Bump bufbuild/buf-setup-action from 1.26.1 to 1.29.0 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2721 +- Bump aws-actions/configure-aws-credentials from 1 to 4 by @dependabot in https://github.com/ava-labs/avalanchego/pull/2722 +- Manually setup golang in codeql action by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2725 +- Provide pgo file during compilation by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2724 +- P-chain - Tx builder cleanup by @abi87 in https://github.com/ava-labs/avalanchego/pull/2718 +- Refactor chain manager subnets by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2711 +- Replace snowball/snowflake interface with single shared snow interface by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2717 +- Remove duplicate IP length constant by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2733 +- Add `platform.getSubnet` API by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2704 +- Provide BLS signature in Handshake message by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2730 +- Verify BLS signature provided in Handshake messages by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2735 +- Move UTXOs definition from primary to primary/common by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2741 +- Minimize Signer interface and document Sign by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2740 +- Revert setup-go during unit tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2744 +- P-chain wallet fees UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2734 +- `merkledb` -- generalize error case to check state that should never occur by @danlaine in https://github.com/ava-labs/avalanchego/pull/2743 +- Revert setup-go to v3 on all arm actions by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2749 +- Add AppError to Sender interface by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2737 +- P-chain - Cleaned up fork switch in UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2746 +- X-chain wallet fees UTs by @abi87 in https://github.com/ava-labs/avalanchego/pull/2747 +- Add keys values to bimap by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2754 +- fix test sender by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2755 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.19...v1.11.0 + ## [v1.10.19](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.19) This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. @@ -2594,7 +3219,7 @@ This version is backwards compatible to [v1.7.0](https://github.com/ava-labs/ava ### Networking -- Reduced default peerlist and accepted frontier gossipping +- Reduced default peerlist and accepted frontier gossiping - Increased the default at-large outbound buffer size to 32 MiB ### Metrics @@ -2676,7 +3301,7 @@ This version is backwards compatible to [v1.7.0](https://github.com/ava-labs/ava - Added `--snow-mixed-query-num-push-vdr` and `--snow-mixed-query-num-push-non-vdr` to allow parameterization of sending push queries - By default, non-validators now send only pull queries, not push queries. - By default, validators now send both pull queries and push queries upon inserting a container into consensus. Previously, nodes sent only push queries. -- Added metrics to track the amount of over gossipping of `peerlist` messages +- Added metrics to track the amount of over gossiping of `peerlist` messages - Added custom message queueing support to outbound `Peer` messages - Reused `Ping` messages to avoid needless memory allocations diff --git a/api/admin/service.go b/api/admin/service.go index a7936b2a4999..39e60c08b0a1 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -292,7 +292,7 @@ type GetLoggerLevelArgs struct { LoggerName string `json:"loggerName"` } -// GetLogLevel returns the log level and display level of all loggers. +// GetLoggerLevel returns the log level and display level of all loggers. func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *LoggerLevelReply) error { a.Log.Debug("API called", zap.String("service", "admin"), diff --git a/api/admin/service.md b/api/admin/service.md new file mode 100644 index 000000000000..4a2a97c29e13 --- /dev/null +++ b/api/admin/service.md @@ -0,0 +1,442 @@ +--- +tags: [AvalancheGo APIs] +description: This page is an overview of the Admin API associated with AvalancheGo. +sidebar_label: Admin API +pagination_label: Admin API +--- + +# Admin API + +This API can be used for measuring node health and debugging. + +:::info +The Admin API is disabled by default for security reasons. To run a node with the Admin API +enabled, use [config flag `--api-admin-enabled=true`](/nodes/configure/avalanchego-config-flags.md#--api-admin-enabled-boolean). + +This API set is for a specific node, it is unavailable on the [public server](/tooling/rpc-providers.md). + +::: + +## Format + +This API uses the `json 2.0` RPC format. For details, see [here](/reference/standards/guides/issuing-api-calls.md). + +## Endpoint + +```text +/ext/admin +``` + +## Methods + +### `admin.alias` + +Assign an API endpoint an alias, a different endpoint for the API. The original endpoint will still +work. This change only affects this node; other nodes will not know about this alias. + +**Signature:** + +```text +admin.alias({endpoint:string, alias:string}) -> {} +``` + +- `endpoint` is the original endpoint of the API. `endpoint` should only include the part of the + endpoint after `/ext/`. +- The API being aliased can now be called at `ext/alias`. +- `alias` can be at most 512 characters. + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.alias", + "params": { + "alias":"myAlias", + "endpoint":"bc/X" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +Now, calls to the X-Chain can be made to either `/ext/bc/X` or, equivalently, to `/ext/myAlias`. + +### `admin.aliasChain` + +Give a blockchain an alias, a different name that can be used any place the blockchain’s ID is used. + +:::note Aliasing a chain can also be done via the [Node API](/nodes/configure/avalanchego-config-flags.md#--chain-aliases-file-string). +Note that the alias is set for each chain on each node individually. In a multi-node Subnet, the +same alias should be configured on each node to use an alias across a Subnet successfully. Setting +an alias for a chain on one node does not register that alias with other nodes automatically. + +::: + +**Signature:** + +```text +admin.aliasChain( + { + chain:string, + alias:string + } +) -> {} +``` + +- `chain` is the blockchain’s ID. +- `alias` can now be used in place of the blockchain’s ID (in API endpoints, for example.) + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.aliasChain", + "params": { + "chain":"sV6o671RtkGBcno1FiaDbVcFv2sG5aVXMZYzKdP4VQAWmJQnM", + "alias":"myBlockchainAlias" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +Now, instead of interacting with the blockchain whose ID is +`sV6o671RtkGBcno1FiaDbVcFv2sG5aVXMZYzKdP4VQAWmJQnM` by making API calls to +`/ext/bc/sV6o671RtkGBcno1FiaDbVcFv2sG5aVXMZYzKdP4VQAWmJQnM`, one can also make calls to +`ext/bc/myBlockchainAlias`. + +### `admin.getChainAliases` + +Returns the aliases of the chain + +**Signature:** + +```text +admin.getChainAliases( + { + chain:string + } +) -> {aliases:string[]} +``` + +- `chain` is the blockchain’s ID. + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.getChainAliases", + "params": { + "chain":"sV6o671RtkGBcno1FiaDbVcFv2sG5aVXMZYzKdP4VQAWmJQnM" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "aliases": [ + "X", + "avm", + "2eNy1mUFdmaxXNj1eQHUe7Np4gju9sJsEtWQ4MX3ToiNKuADed" + ] + }, + "id": 1 +} +``` + +### `admin.getLoggerLevel` + +Returns log and display levels of loggers. + +**Signature:** + +```text +admin.getLoggerLevel( + { + loggerName:string // optional + } +) -> { + loggerLevels: { + loggerName: { + logLevel: string, + displayLevel: string + } + } + } +``` + +- `loggerName` is the name of the logger to be returned. This is an optional argument. If not + specified, it returns all possible loggers. + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.getLoggerLevel", + "params": { + "loggerName": "C" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "loggerLevels": { + "C": { + "logLevel": "DEBUG", + "displayLevel": "INFO" + } + } + }, + "id": 1 +} +``` + +### `admin.loadVMs` + +Dynamically loads any virtual machines installed on the node as plugins. See +[here](/build/vm/intro#installing-a-vm) for more information on how to install a +virtual machine on a node. + +**Signature:** + +```sh +admin.loadVMs() -> { + newVMs: map[string][]string + failedVMs: map[string]string, +} +``` + +- `failedVMs` is only included in the response if at least one virtual machine fails to be loaded. + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.loadVMs", + "params" :{} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "newVMs": { + "tGas3T58KzdjLHhBDMnH2TvrddhqTji5iZAMZ3RXs2NLpSnhH": ["foovm"] + }, + "failedVMs": { + "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy": "error message" + } + }, + "id": 1 +} +``` + +### `admin.lockProfile` + +Writes a profile of mutex statistics to `lock.profile`. + +**Signature:** + +```text +admin.lockProfile() -> {} +``` + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.lockProfile", + "params" :{} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### `admin.memoryProfile` + +Writes a memory profile of the to `mem.profile`. + +**Signature:** + +```text +admin.memoryProfile() -> {} +``` + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.memoryProfile", + "params" :{} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### `admin.setLoggerLevel` + +Sets log and display levels of loggers. + +**Signature:** + +```text +admin.setLoggerLevel( + { + loggerName: string, // optional + logLevel: string, // optional + displayLevel: string, // optional + } +) -> {} +``` + +- `loggerName` is the logger's name to be changed. This is an optional parameter. If not specified, + it changes all possible loggers. +- `logLevel` is the log level of written logs, can be omitted. +- `displayLevel` is the log level of displayed logs, can be omitted. + +`logLevel` and `displayLevel` cannot be omitted at the same time. + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.setLoggerLevel", + "params": { + "loggerName": "C", + "logLevel": "DEBUG", + "displayLevel": "INFO" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### `admin.startCPUProfiler` + +Start profiling the CPU utilization of the node. To stop, call `admin.stopCPUProfiler`. On stop, +writes the profile to `cpu.profile`. + +**Signature:** + +```text +admin.startCPUProfiler() -> {} +``` + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.startCPUProfiler", + "params" :{} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### `admin.stopCPUProfiler` + +Stop the CPU profile that was previously started. + +**Signature:** + +```text +admin.stopCPUProfiler() -> {} +``` + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"admin.stopCPUProfiler" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/admin +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` diff --git a/api/auth/auth.go b/api/auth/auth.go deleted file mode 100644 index a8e4fa9c318a..000000000000 --- a/api/auth/auth.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package auth - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "net/http" - "path" - "strings" - "sync" - "time" - - "github.com/gorilla/rpc/v2" - - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/password" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - - jwt "github.com/golang-jwt/jwt/v4" -) - -const ( - headerKey = "Authorization" - headerValStart = "Bearer " - - // number of bytes to use when generating a new random token ID - tokenIDByteLen = 20 - - // defaultTokenLifespan is how long a token lives before it expires - defaultTokenLifespan = time.Hour * 12 - - maxEndpoints = 128 -) - -var ( - errNoToken = errors.New("auth token not provided") - errAuthHeaderNotParsable = fmt.Errorf( - `couldn't parse auth token. Header "%s" should be "%sTOKEN.GOES.HERE"`, - headerKey, - headerValStart, - ) - errInvalidSigningMethod = errors.New("auth token didn't specify the HS256 signing method correctly") - errTokenRevoked = errors.New("the provided auth token was revoked") - errTokenInsufficientPermission = errors.New("the provided auth token does not allow access to this endpoint") - errWrongPassword = errors.New("incorrect password") - errSamePassword = errors.New("new password can't be same as old password") - errNoEndpoints = errors.New("must name at least one endpoint") - errTooManyEndpoints = fmt.Errorf("can only name at most %d endpoints", maxEndpoints) - - _ Auth = (*auth)(nil) -) - -type Auth interface { - // Create and return a new token that allows access to each API endpoint for - // [duration] such that the API's path ends with an element of [endpoints]. - // If one of the elements of [endpoints] is "*", all APIs are accessible. - NewToken(pw string, duration time.Duration, endpoints []string) (string, error) - - // Revokes [token]; it will not be accepted as authorization for future API - // calls. If the token is invalid, this is a no-op. If a token is revoked - // and then the password is changed, and then changed back to the current - // password, the token will be un-revoked. Therefore, passwords shouldn't be - // re-used before previously revoked tokens have expired. - RevokeToken(pw, token string) error - - // Authenticates [token] for access to [url]. - AuthenticateToken(token, url string) error - - // Change the password required to create and revoke tokens. - // [oldPW] is the current password. - // [newPW] is the new password. It can't be the empty string and it can't be - // unreasonably long. - // Changing the password makes tokens issued under a previous password - // invalid. - ChangePassword(oldPW, newPW string) error - - // Create the API endpoint for this auth handler. - CreateHandler() (http.Handler, error) - - // WrapHandler wraps an http.Handler. Before passing a request to the - // provided handler, the auth token is authenticated. - WrapHandler(h http.Handler) http.Handler -} - -type auth struct { - // Used to mock time. - clock mockable.Clock - - log logging.Logger - endpoint string - - lock sync.RWMutex - // Can be changed via API call. - password password.Hash - // Set of token IDs that have been revoked - revoked set.Set[string] -} - -func New(log logging.Logger, endpoint, pw string) (Auth, error) { - a := &auth{ - log: log, - endpoint: endpoint, - } - return a, a.password.Set(pw) -} - -func NewFromHash(log logging.Logger, endpoint string, pw password.Hash) Auth { - return &auth{ - log: log, - endpoint: endpoint, - password: pw, - } -} - -func (a *auth) NewToken(pw string, duration time.Duration, endpoints []string) (string, error) { - if pw == "" { - return "", password.ErrEmptyPassword - } - if l := len(endpoints); l == 0 { - return "", errNoEndpoints - } else if l > maxEndpoints { - return "", errTooManyEndpoints - } - - a.lock.RLock() - defer a.lock.RUnlock() - - if !a.password.Check(pw) { - return "", errWrongPassword - } - - canAccessAll := false - for _, endpoint := range endpoints { - if endpoint == "*" { - canAccessAll = true - break - } - } - - idBytes := [tokenIDByteLen]byte{} - if _, err := rand.Read(idBytes[:]); err != nil { - return "", fmt.Errorf("failed to generate the unique token ID due to %w", err) - } - id := base64.RawURLEncoding.EncodeToString(idBytes[:]) - - claims := endpointClaims{ - RegisteredClaims: jwt.RegisteredClaims{ - ExpiresAt: jwt.NewNumericDate(a.clock.Time().Add(duration)), - ID: id, - }, - } - if canAccessAll { - claims.Endpoints = []string{"*"} - } else { - claims.Endpoints = endpoints - } - token := jwt.NewWithClaims(jwt.SigningMethodHS256, &claims) - return token.SignedString(a.password.Password[:]) // Sign the token and return its string repr. -} - -func (a *auth) RevokeToken(tokenStr, pw string) error { - if tokenStr == "" { - return errNoToken - } - if pw == "" { - return password.ErrEmptyPassword - } - - a.lock.Lock() - defer a.lock.Unlock() - - if !a.password.Check(pw) { - return errWrongPassword - } - - // See if token is well-formed and signature is right - token, err := jwt.ParseWithClaims(tokenStr, &endpointClaims{}, a.getTokenKey) - if err != nil { - return err - } - - // If the token isn't valid, it has essentially already been revoked. - if !token.Valid { - return nil - } - - claims, ok := token.Claims.(*endpointClaims) - if !ok { - return fmt.Errorf("expected auth token's claims to be type endpointClaims but is %T", token.Claims) - } - a.revoked.Add(claims.ID) - return nil -} - -func (a *auth) AuthenticateToken(tokenStr, url string) error { - a.lock.RLock() - defer a.lock.RUnlock() - - token, err := jwt.ParseWithClaims(tokenStr, &endpointClaims{}, a.getTokenKey) - if err != nil { // Probably because signature wrong - return err - } - - // Make sure this token gives access to the requested endpoint - claims, ok := token.Claims.(*endpointClaims) - if !ok { - // Error is intentionally dropped here as there is nothing left to do - // with it. - return fmt.Errorf("expected auth token's claims to be type endpointClaims but is %T", token.Claims) - } - - _, revoked := a.revoked[claims.ID] - if revoked { - return errTokenRevoked - } - - for _, endpoint := range claims.Endpoints { - if endpoint == "*" || strings.HasSuffix(url, endpoint) { - return nil - } - } - return errTokenInsufficientPermission -} - -func (a *auth) ChangePassword(oldPW, newPW string) error { - if oldPW == newPW { - return errSamePassword - } - - a.lock.Lock() - defer a.lock.Unlock() - - if !a.password.Check(oldPW) { - return errWrongPassword - } - if err := password.IsValid(newPW, password.OK); err != nil { - return err - } - if err := a.password.Set(newPW); err != nil { - return err - } - - // All the revoked tokens are now invalid; no need to mark specifically as - // revoked. - a.revoked.Clear() - return nil -} - -func (a *auth) CreateHandler() (http.Handler, error) { - server := rpc.NewServer() - codec := json.NewCodec() - server.RegisterCodec(codec, "application/json") - server.RegisterCodec(codec, "application/json;charset=UTF-8") - return server, server.RegisterService( - &Service{auth: a}, - "auth", - ) -} - -func (a *auth) WrapHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Don't require auth token to hit auth endpoint - if path.Base(r.URL.Path) == a.endpoint { - h.ServeHTTP(w, r) - return - } - - // Should be "Bearer AUTH.TOKEN.HERE" - rawHeader := r.Header.Get(headerKey) - if rawHeader == "" { - writeUnauthorizedResponse(w, errNoToken) - return - } - if !strings.HasPrefix(rawHeader, headerValStart) { - // Error is intentionally dropped here as there is nothing left to - // do with it. - writeUnauthorizedResponse(w, errAuthHeaderNotParsable) - return - } - // Returns actual auth token. Slice guaranteed to not go OOB - tokenStr := rawHeader[len(headerValStart):] - - if err := a.AuthenticateToken(tokenStr, r.URL.Path); err != nil { - writeUnauthorizedResponse(w, err) - return - } - - h.ServeHTTP(w, r) - }) -} - -// getTokenKey returns the key to use when making and parsing tokens -func (a *auth) getTokenKey(t *jwt.Token) (interface{}, error) { - if t.Method != jwt.SigningMethodHS256 { - return nil, errInvalidSigningMethod - } - return a.password.Password[:], nil -} diff --git a/api/auth/auth_test.go b/api/auth/auth_test.go deleted file mode 100644 index fce886d6ff7c..000000000000 --- a/api/auth/auth_test.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package auth - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/password" - - jwt "github.com/golang-jwt/jwt/v4" -) - -var ( - testPassword = "password!@#$%$#@!" - hashedPassword = password.Hash{} - unAuthorizedResponseRegex = `^{"jsonrpc":"2.0","error":{"code":-32600,"message":"(.*)"},"id":1}` - errTest = errors.New("non-nil error") - hostName = "http://127.0.0.1:9650" -) - -func init() { - if err := hashedPassword.Set(testPassword); err != nil { - panic(err) - } -} - -// Always returns 200 (http.StatusOK) -var dummyHandler = http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}) - -func TestNewTokenWrongPassword(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - _, err := auth.NewToken("", defaultTokenLifespan, []string{"endpoint1, endpoint2"}) - require.ErrorIs(err, password.ErrEmptyPassword) - - _, err = auth.NewToken("notThePassword", defaultTokenLifespan, []string{"endpoint1, endpoint2"}) - require.ErrorIs(err, errWrongPassword) -} - -func TestNewTokenHappyPath(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) - - now := time.Now() - auth.clock.Set(now) - - // Make a token - endpoints := []string{"endpoint1", "endpoint2", "endpoint3"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - // Parse the token - token, err := jwt.ParseWithClaims(tokenStr, &endpointClaims{}, func(*jwt.Token) (interface{}, error) { - auth.lock.RLock() - defer auth.lock.RUnlock() - return auth.password.Password[:], nil - }) - require.NoError(err) - - require.IsType(&endpointClaims{}, token.Claims) - claims := token.Claims.(*endpointClaims) - require.Equal(endpoints, claims.Endpoints) - - shouldExpireAt := jwt.NewNumericDate(now.Add(defaultTokenLifespan)) - require.Equal(shouldExpireAt, claims.ExpiresAt) -} - -func TestTokenHasWrongSig(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) - - // Make a token - endpoints := []string{"endpoint1", "endpoint2", "endpoint3"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - // Try to parse the token using the wrong password - _, err = jwt.ParseWithClaims(tokenStr, &endpointClaims{}, func(*jwt.Token) (interface{}, error) { - auth.lock.RLock() - defer auth.lock.RUnlock() - return []byte(""), nil - }) - require.ErrorIs(err, jwt.ErrSignatureInvalid) - - // Try to parse the token using the wrong password - _, err = jwt.ParseWithClaims(tokenStr, &endpointClaims{}, func(*jwt.Token) (interface{}, error) { - auth.lock.RLock() - defer auth.lock.RUnlock() - return []byte("notThePassword"), nil - }) - require.ErrorIs(err, jwt.ErrSignatureInvalid) -} - -func TestChangePassword(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) - - password2 := "fejhkefjhefjhefhje" // #nosec G101 - var err error - - err = auth.ChangePassword("", password2) - require.ErrorIs(err, errWrongPassword) - - err = auth.ChangePassword("notThePassword", password2) - require.ErrorIs(err, errWrongPassword) - - err = auth.ChangePassword(testPassword, "") - require.ErrorIs(err, password.ErrEmptyPassword) - - require.NoError(auth.ChangePassword(testPassword, password2)) - require.True(auth.password.Check(password2)) - - password3 := "ufwhwohwfohawfhwdwd" // #nosec G101 - - err = auth.ChangePassword(testPassword, password3) - require.ErrorIs(err, errWrongPassword) - - require.NoError(auth.ChangePassword(password2, password3)) -} - -func TestRevokeToken(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) - - // Make a token - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - require.NoError(auth.RevokeToken(tokenStr, testPassword)) - require.Len(auth.revoked, 1) -} - -func TestWrapHandlerHappyPath(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - // Make a token - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - wrappedHandler := auth.WrapHandler(dummyHandler) - - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusOK, rr.Code) - } -} - -func TestWrapHandlerRevokedToken(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - // Make a token - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - require.NoError(auth.RevokeToken(tokenStr, testPassword)) - - wrappedHandler := auth.WrapHandler(dummyHandler) - - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusUnauthorized, rr.Code) - require.Contains(rr.Body.String(), errTokenRevoked.Error()) - require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) - } -} - -func TestWrapHandlerExpiredToken(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) - - auth.clock.Set(time.Now().Add(-2 * defaultTokenLifespan)) - - // Make a token that expired well in the past - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - wrappedHandler := auth.WrapHandler(dummyHandler) - - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusUnauthorized, rr.Code) - require.Contains(rr.Body.String(), "expired") - require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) - } -} - -func TestWrapHandlerNoAuthToken(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - wrappedHandler := auth.WrapHandler(dummyHandler) - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusUnauthorized, rr.Code) - require.Contains(rr.Body.String(), errNoToken.Error()) - require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) - } -} - -func TestWrapHandlerUnauthorizedEndpoint(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - // Make a token - endpoints := []string{"/ext/info"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - unauthorizedEndpoints := []string{"/ext/bc/X", "/ext/metrics", "", "/foo", "/ext/info/foo"} - - wrappedHandler := auth.WrapHandler(dummyHandler) - for _, endpoint := range unauthorizedEndpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusUnauthorized, rr.Code) - require.Contains(rr.Body.String(), errTokenInsufficientPermission.Error()) - require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) - } -} - -func TestWrapHandlerAuthEndpoint(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - // Make a token - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics", "", "/foo", "/ext/info/foo"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - wrappedHandler := auth.WrapHandler(dummyHandler) - req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650/ext/auth", strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusOK, rr.Code) -} - -func TestWrapHandlerAccessAll(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - // Make a token that allows access to all endpoints - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics", "", "/foo", "/ext/foo/info"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, []string{"*"}) - require.NoError(err) - - wrappedHandler := auth.WrapHandler(dummyHandler) - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusOK, rr.Code) - } -} - -func TestWriteUnauthorizedResponse(t *testing.T) { - require := require.New(t) - - rr := httptest.NewRecorder() - writeUnauthorizedResponse(rr, errTest) - require.Equal(http.StatusUnauthorized, rr.Code) - require.Equal(`{"jsonrpc":"2.0","error":{"code":-32600,"message":"non-nil error"},"id":1}`+"\n", rr.Body.String()) -} - -func TestWrapHandlerMutatedRevokedToken(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword) - - // Make a token - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) - require.NoError(err) - - require.NoError(auth.RevokeToken(tokenStr, testPassword)) - - wrappedHandler := auth.WrapHandler(dummyHandler) - - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s=", tokenStr)) // The appended = at the end looks like padding - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusUnauthorized, rr.Code) - } -} - -func TestWrapHandlerInvalidSigningMethod(t *testing.T) { - require := require.New(t) - - auth := NewFromHash(logging.NoLog{}, "auth", hashedPassword).(*auth) - - // Make a token - endpoints := []string{"/ext/info", "/ext/bc/X", "/ext/metrics"} - idBytes := [tokenIDByteLen]byte{} - _, err := rand.Read(idBytes[:]) - require.NoError(err) - id := base64.RawURLEncoding.EncodeToString(idBytes[:]) - - claims := endpointClaims{ - RegisteredClaims: jwt.RegisteredClaims{ - ExpiresAt: jwt.NewNumericDate(auth.clock.Time().Add(defaultTokenLifespan)), - ID: id, - }, - Endpoints: endpoints, - } - token := jwt.NewWithClaims(jwt.SigningMethodHS512, &claims) - tokenStr, err := token.SignedString(auth.password.Password[:]) - require.NoError(err) - - wrappedHandler := auth.WrapHandler(dummyHandler) - - for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, hostName+endpoint, strings.NewReader("")) - req.Header.Add("Authorization", headerValStart+tokenStr) - rr := httptest.NewRecorder() - wrappedHandler.ServeHTTP(rr, req) - require.Equal(http.StatusUnauthorized, rr.Code) - require.Contains(rr.Body.String(), errInvalidSigningMethod.Error()) - require.Regexp(unAuthorizedResponseRegex, rr.Body.String()) - } -} diff --git a/api/auth/claims.go b/api/auth/claims.go deleted file mode 100644 index 1cdda3d4a224..000000000000 --- a/api/auth/claims.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package auth - -import ( - jwt "github.com/golang-jwt/jwt/v4" -) - -// Custom claim type used for API access token -type endpointClaims struct { - jwt.RegisteredClaims - - // Each element is an endpoint that the token allows access to - // If endpoints has an element "*", allows access to all API endpoints - // In this case, "*" should be the only element of [endpoints] - Endpoints []string `json:"endpoints,omitempty"` -} diff --git a/api/auth/response.go b/api/auth/response.go deleted file mode 100644 index eca4b39da9b8..000000000000 --- a/api/auth/response.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package auth - -import ( - "encoding/json" - "net/http" - - rpc "github.com/gorilla/rpc/v2/json2" -) - -type responseErr struct { - Code rpc.ErrorCode `json:"code"` - Message string `json:"message"` -} - -type responseBody struct { - Version string `json:"jsonrpc"` - Err responseErr `json:"error"` - ID uint8 `json:"id"` -} - -// Write a JSON-RPC formatted response saying that the API call is unauthorized. -// The response has header http.StatusUnauthorized. -// Errors while writing are ignored. -func writeUnauthorizedResponse(w http.ResponseWriter, err error) { - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusUnauthorized) - - // There isn't anything to do with the returned error, so it is dropped. - _ = json.NewEncoder(w).Encode(responseBody{ - Version: rpc.Version, - Err: responseErr{ - Code: rpc.E_INVALID_REQ, - Message: err.Error(), - }, - ID: 1, - }) -} diff --git a/api/auth/service.go b/api/auth/service.go deleted file mode 100644 index badb544c5ccb..000000000000 --- a/api/auth/service.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package auth - -import ( - "net/http" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/api" -) - -// Service that serves the Auth API functionality. -type Service struct { - auth *auth -} - -type Password struct { - Password string `json:"password"` // The authorization password -} - -type NewTokenArgs struct { - Password - // Endpoints that may be accessed with this token e.g. if endpoints is - // ["/ext/bc/X", "/ext/admin"] then the token holder can hit the X-Chain API - // and the admin API. If [Endpoints] contains an element "*" then the token - // allows access to all API endpoints. [Endpoints] must have between 1 and - // [maxEndpoints] elements - Endpoints []string `json:"endpoints"` -} - -type Token struct { - Token string `json:"token"` // The new token. Expires in [TokenLifespan]. -} - -func (s *Service) NewToken(_ *http.Request, args *NewTokenArgs, reply *Token) error { - s.auth.log.Debug("API called", - zap.String("service", "auth"), - zap.String("method", "newToken"), - ) - - var err error - reply.Token, err = s.auth.NewToken(args.Password.Password, defaultTokenLifespan, args.Endpoints) - return err -} - -type RevokeTokenArgs struct { - Password - Token -} - -func (s *Service) RevokeToken(_ *http.Request, args *RevokeTokenArgs, _ *api.EmptyReply) error { - s.auth.log.Debug("API called", - zap.String("service", "auth"), - zap.String("method", "revokeToken"), - ) - - return s.auth.RevokeToken(args.Token.Token, args.Password.Password) -} - -type ChangePasswordArgs struct { - OldPassword string `json:"oldPassword"` // Current authorization password - NewPassword string `json:"newPassword"` // New authorization password -} - -func (s *Service) ChangePassword(_ *http.Request, args *ChangePasswordArgs, _ *api.EmptyReply) error { - s.auth.log.Debug("API called", - zap.String("service", "auth"), - zap.String("method", "changePassword"), - ) - - return s.auth.ChangePassword(args.OldPassword, args.NewPassword) -} diff --git a/api/health/health.go b/api/health/health.go index 9997d665e77f..01661b7e85ab 100644 --- a/api/health/health.go +++ b/api/health/health.go @@ -14,6 +14,10 @@ import ( ) const ( + // CheckLabel is the label used to differentiate between health checks. + CheckLabel = "check" + // TagLabel is the label used to differentiate between health check tags. + TagLabel = "tag" // AllTag is automatically added to every registered check. AllTag = "all" // ApplicationTag checks will act as if they specified every tag that has @@ -62,23 +66,19 @@ type health struct { } func New(log logging.Logger, registerer prometheus.Registerer) (Health, error) { - readinessWorker, err := newWorker(log, "readiness", registerer) - if err != nil { - return nil, err - } - - healthWorker, err := newWorker(log, "health", registerer) - if err != nil { - return nil, err - } - - livenessWorker, err := newWorker(log, "liveness", registerer) + failingChecks := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "checks_failing", + Help: "number of currently failing health checks", + }, + []string{CheckLabel, TagLabel}, + ) return &health{ log: log, - readiness: readinessWorker, - health: healthWorker, - liveness: livenessWorker, - }, err + readiness: newWorker(log, "readiness", failingChecks), + health: newWorker(log, "health", failingChecks), + liveness: newWorker(log, "liveness", failingChecks), + }, registerer.Register(failingChecks) } func (h *health) RegisterReadinessCheck(name string, checker Checker, tags ...string) error { diff --git a/api/health/metrics.go b/api/health/metrics.go deleted file mode 100644 index fdb7b2ed813b..000000000000 --- a/api/health/metrics.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package health - -import "github.com/prometheus/client_golang/prometheus" - -type metrics struct { - // failingChecks keeps track of the number of check failing - failingChecks *prometheus.GaugeVec -} - -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { - metrics := &metrics{ - failingChecks: prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "checks_failing", - Help: "number of currently failing health checks", - }, - []string{"tag"}, - ), - } - metrics.failingChecks.WithLabelValues(AllTag).Set(0) - metrics.failingChecks.WithLabelValues(ApplicationTag).Set(0) - return metrics, registerer.Register(metrics.failingChecks) -} diff --git a/api/health/service.md b/api/health/service.md new file mode 100644 index 000000000000..7f6a787f44fe --- /dev/null +++ b/api/health/service.md @@ -0,0 +1,305 @@ +--- +tags: [AvalancheGo APIs] +description: This page is an overview of the Health API associated with AvalancheGo. This API can be used for measuring node health. +sidebar_label: Health API +pagination_label: Health API +--- + +# Health API + +This API can be used for measuring node health. + +:::info + +This API set is for a specific node; it is unavailable on the [public server](/tooling/rpc-providers.md). + +::: + +## Health Checks + +The node periodically runs all health checks, including health checks for each chain. + +The frequency at which health checks are run can be specified with the [--health-check-frequency](/nodes/configure/avalanchego-config-flags.md) flag. + +## Filterable Health Checks + +The health checks that are run by the node are filterable. You can specify which health checks +you want to see by using `tags` filters. Returned results will only include health checks that +match the specified tags and global health checks like `network`, `database` etc. +When filtered, the returned results will not show the full node health, +but only a subset of filtered health checks. +This means the node can still be unhealthy in unfiltered checks, even if the returned results show that the node is healthy. +AvalancheGo supports using subnetIDs as tags. + +## GET Request + +To get an HTTP status code response that indicates the node’s health, make a `GET` request. +If the node is healthy, it will return a `200` status code. +If the node is unhealthy, it will return a `503` status code. +In-depth information about the node's health is included in the response body. + +### Filtering + +To filter GET health checks, add a `tag` query parameter to the request. +The `tag` parameter is a string. +For example, to filter health results by subnetID `29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL`, +use the following query: + +```sh +curl 'http://localhost:9650/ext/health?tag=29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL' +``` + +In this example returned results will contain global health checks and health checks that are +related to subnetID `29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL`. + +**Note:** This filtering can show healthy results even if the node is unhealthy in other Chains/Subnets. + +In order to filter results by multiple tags, use multiple `tag` query parameters. +For example, to filter health results by subnetID `29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL` and +`28nrH5T2BMvNrWecFcV3mfccjs6axM1TVyqe79MCv2Mhs8kxiY` use the following query: + +```sh +curl 'http://localhost:9650/ext/health?tag=29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL&tag=28nrH5T2BMvNrWecFcV3mfccjs6axM1TVyqe79MCv2Mhs8kxiY' +``` + +The returned results will include health checks for both subnetIDs as well as global health checks. + +### Endpoints + +The available endpoints for GET requests are: + +- `/ext/health` returns a holistic report of the status of the node. + **Most operators should monitor this status.** +- `/ext/health/health` is the same as `/ext/health`. +- `/ext/health/readiness` returns healthy once the node has finished initializing. +- `/ext/health/liveness` returns healthy once the endpoint is available. + +## JSON RPC Request + +### Format + +This API uses the `json 2.0` RPC format. For more information on making JSON RPC calls, see +[here](/reference/standards/guides/issuing-api-calls.md). + +### Endpoint + +```text +/ext/health +``` + +### Methods + +#### `health.health` + +This method returns the last set of health check results. + +**Example Call:** + +```sh +curl -H 'Content-Type: application/json' --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"health.health", + "params": { + "tags": ["11111111111111111111111111111111LpoYY", "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL"] + } +}' 'http://localhost:9650/ext/health' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "checks": { + "C": { + "message": { + "engine": { + "consensus": { + "lastAcceptedHeight": 31273749, + "lastAcceptedID": "2Y4gZGzQnu8UjnHod8j1BLewHFVEbzhULPNzqrSWEHkHNqDrYL", + "longestProcessingBlock": "0s", + "processingBlocks": 0 + }, + "vm": null + }, + "networking": { + "percentConnected": 0.9999592612587486 + } + }, + "timestamp": "2024-03-26T19:44:45.2931-04:00", + "duration": 20375 + }, + "P": { + "message": { + "engine": { + "consensus": { + "lastAcceptedHeight": 142517, + "lastAcceptedID": "2e1FEPCBEkG2Q7WgyZh1v4nt3DXj1HDbDthyhxdq2Ltg3shSYq", + "longestProcessingBlock": "0s", + "processingBlocks": 0 + }, + "vm": null + }, + "networking": { + "percentConnected": 0.9999592612587486 + } + }, + "timestamp": "2024-03-26T19:44:45.293115-04:00", + "duration": 8750 + }, + "X": { + "message": { + "engine": { + "consensus": { + "lastAcceptedHeight": 24464, + "lastAcceptedID": "XuFCsGaSw9cn7Vuz5e2fip4KvP46Xu53S8uDRxaC2QJmyYc3w", + "longestProcessingBlock": "0s", + "processingBlocks": 0 + }, + "vm": null + }, + "networking": { + "percentConnected": 0.9999592612587486 + } + }, + "timestamp": "2024-03-26T19:44:45.29312-04:00", + "duration": 23291 + }, + "bootstrapped": { + "message": [], + "timestamp": "2024-03-26T19:44:45.293078-04:00", + "duration": 3375 + }, + "database": { + "timestamp": "2024-03-26T19:44:45.293102-04:00", + "duration": 1959 + }, + "diskspace": { + "message": { + "availableDiskBytes": 227332591616 + }, + "timestamp": "2024-03-26T19:44:45.293106-04:00", + "duration": 3042 + }, + "network": { + "message": { + "connectedPeers": 284, + "sendFailRate": 0, + "timeSinceLastMsgReceived": "293.098ms", + "timeSinceLastMsgSent": "293.098ms" + }, + "timestamp": "2024-03-26T19:44:45.2931-04:00", + "duration": 2333 + }, + "router": { + "message": { + "longestRunningRequest": "66.90725ms", + "outstandingRequests": 3 + }, + "timestamp": "2024-03-26T19:44:45.293097-04:00", + "duration": 3542 + } + }, + "healthy": true + }, + "id": 1 +} +``` + +In this example response, every check has passed. So, the node is healthy. + +**Response Explanation:** + +- `checks` is a list of health check responses. + - A check response may include a `message` with additional context. + - A check response may include an `error` describing why the check failed. + - `timestamp` is the timestamp of the last health check. + - `duration` is the execution duration of the last health check, in nanoseconds. + - `contiguousFailures` is the number of times in a row this check failed. + - `timeOfFirstFailure` is the time this check first failed. +- `healthy` is true all the health checks are passing. + +#### `health.readiness` + +This method returns the last evaluation of the startup health check results. + +**Example Call:** + +```sh +curl -H 'Content-Type: application/json' --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"health.readiness", + "params": { + "tags": ["11111111111111111111111111111111LpoYY", "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL"] + } +}' 'http://localhost:9650/ext/health' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "checks": { + "bootstrapped": { + "message": [], + "timestamp": "2024-03-26T20:02:45.299114-04:00", + "duration": 2834 + } + }, + "healthy": true + }, + "id": 1 +} +``` + +In this example response, every check has passed. So, the node has finished the startup process. + +**Response Explanation:** + +- `checks` is a list of health check responses. + - A check response may include a `message` with additional context. + - A check response may include an `error` describing why the check failed. + - `timestamp` is the timestamp of the last health check. + - `duration` is the execution duration of the last health check, in nanoseconds. + - `contiguousFailures` is the number of times in a row this check failed. + - `timeOfFirstFailure` is the time this check first failed. +- `healthy` is true all the health checks are passing. + +#### `health.liveness` + +This method returns healthy. + +**Example Call:** + +```sh +curl -H 'Content-Type: application/json' --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"health.liveness" +}' 'http://localhost:9650/ext/health' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "checks": {}, + "healthy": true + }, + "id": 1 +} +``` + +In this example response, the node was able to handle the request and mark the service as healthy. + +**Response Explanation:** + +- `checks` is an empty list. +- `healthy` is true. diff --git a/api/health/worker.go b/api/health/worker.go index 91fad853b94b..ee9b7bbe85d6 100644 --- a/api/health/worker.go +++ b/api/health/worker.go @@ -28,11 +28,11 @@ var ( ) type worker struct { - log logging.Logger - namespace string - metrics *metrics - checksLock sync.RWMutex - checks map[string]*taggedChecker + log logging.Logger + name string + failingChecks *prometheus.GaugeVec + checksLock sync.RWMutex + checks map[string]*taggedChecker resultsLock sync.RWMutex results map[string]Result @@ -53,19 +53,25 @@ type taggedChecker struct { func newWorker( log logging.Logger, - namespace string, - registerer prometheus.Registerer, -) (*worker, error) { - metrics, err := newMetrics(namespace, registerer) + name string, + failingChecks *prometheus.GaugeVec, +) *worker { + // Initialize the number of failing checks to 0 for all checks + for _, tag := range []string{AllTag, ApplicationTag} { + failingChecks.With(prometheus.Labels{ + CheckLabel: name, + TagLabel: tag, + }).Set(0) + } return &worker{ - log: log, - namespace: namespace, - metrics: metrics, - checks: make(map[string]*taggedChecker), - results: make(map[string]Result), - closer: make(chan struct{}), - tags: make(map[string]set.Set[string]), - }, err + log: log, + name: name, + failingChecks: failingChecks, + checks: make(map[string]*taggedChecker), + results: make(map[string]Result), + closer: make(chan struct{}), + tags: make(map[string]set.Set[string]), + } } func (w *worker) RegisterCheck(name string, check Checker, tags ...string) error { @@ -107,7 +113,7 @@ func (w *worker) RegisterCheck(name string, check Checker, tags ...string) error // Whenever a new check is added - it is failing w.log.Info("registered new check and initialized its state to failing", - zap.String("namespace", w.namespace), + zap.String("name", w.name), zap.String("name", name), zap.Strings("tags", tags), ) @@ -244,7 +250,7 @@ func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, if prevResult.Error == nil { w.log.Warn("check started failing", - zap.String("namespace", w.namespace), + zap.String("name", w.name), zap.String("name", name), zap.Strings("tags", check.tags), zap.Error(err), @@ -253,7 +259,7 @@ func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, } } else if prevResult.Error != nil { w.log.Info("check started passing", - zap.String("namespace", w.namespace), + zap.String("name", w.name), zap.String("name", name), zap.Strings("tags", check.tags), ) @@ -271,7 +277,10 @@ func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { if tc.isApplicationCheck { // Note: [w.tags] will include AllTag. for tag := range w.tags { - gauge := w.metrics.failingChecks.WithLabelValues(tag) + gauge := w.failingChecks.With(prometheus.Labels{ + CheckLabel: w.name, + TagLabel: tag, + }) if healthy { gauge.Dec() } else { @@ -285,7 +294,10 @@ func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { } } else { for _, tag := range tc.tags { - gauge := w.metrics.failingChecks.WithLabelValues(tag) + gauge := w.failingChecks.With(prometheus.Labels{ + CheckLabel: w.name, + TagLabel: tag, + }) if healthy { gauge.Dec() } else { @@ -297,7 +309,10 @@ func (w *worker) updateMetrics(tc *taggedChecker, healthy bool, register bool) { } } } - gauge := w.metrics.failingChecks.WithLabelValues(AllTag) + gauge := w.failingChecks.With(prometheus.Labels{ + CheckLabel: w.name, + TagLabel: AllTag, + }) if healthy { gauge.Dec() } else { diff --git a/api/info/client.go b/api/info/client.go index 6caafd422233..15812cd5c213 100644 --- a/api/info/client.go +++ b/api/info/client.go @@ -5,6 +5,7 @@ package info import ( "context" + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -19,7 +20,7 @@ var _ Client = (*client)(nil) type Client interface { GetNodeVersion(context.Context, ...rpc.Option) (*GetNodeVersionReply, error) GetNodeID(context.Context, ...rpc.Option) (ids.NodeID, *signer.ProofOfPossession, error) - GetNodeIP(context.Context, ...rpc.Option) (string, error) + GetNodeIP(context.Context, ...rpc.Option) (netip.AddrPort, error) GetNetworkID(context.Context, ...rpc.Option) (uint32, error) GetNetworkName(context.Context, ...rpc.Option) (string, error) GetBlockchainID(context.Context, string, ...rpc.Option) (ids.ID, error) @@ -54,7 +55,7 @@ func (c *client) GetNodeID(ctx context.Context, options ...rpc.Option) (ids.Node return res.NodeID, res.NodePOP, err } -func (c *client) GetNodeIP(ctx context.Context, options ...rpc.Option) (string, error) { +func (c *client) GetNodeIP(ctx context.Context, options ...rpc.Option) (netip.AddrPort, error) { res := &GetNodeIPReply{} err := c.requester.SendRequest(ctx, "info.getNodeIP", struct{}{}, res, options...) return res.IP, err diff --git a/api/info/service.go b/api/info/service.go index 929251d25aab..fd0117c5a088 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "net/http" + "net/netip" "github.com/gorilla/rpc/v2" "go.uber.org/zap" @@ -17,8 +18,8 @@ import ( "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -37,7 +38,7 @@ type Info struct { Parameters log logging.Logger validators validators.Manager - myIP ips.DynamicIPPort + myIP *utils.Atomic[netip.AddrPort] networking network.Network chainManager chains.Manager vmManager vms.Manager @@ -67,7 +68,7 @@ func NewService( validators validators.Manager, chainManager chains.Manager, vmManager vms.Manager, - myIP ips.DynamicIPPort, + myIP *utils.Atomic[netip.AddrPort], network network.Network, benchlist benchlist.Manager, ) (http.Handler, error) { @@ -144,7 +145,7 @@ type GetNetworkIDReply struct { // GetNodeIPReply are the results from calling GetNodeIP type GetNodeIPReply struct { - IP string `json:"ip"` + IP netip.AddrPort `json:"ip"` } // GetNodeIP returns the IP of this node @@ -154,7 +155,7 @@ func (i *Info) GetNodeIP(_ *http.Request, _ *struct{}, reply *GetNodeIPReply) er zap.String("method", "getNodeIP"), ) - reply.IP = i.myIP.IPPort().String() + reply.IP = i.myIP.Get() return nil } diff --git a/api/info/service.md b/api/info/service.md new file mode 100644 index 000000000000..d7e70e269dff --- /dev/null +++ b/api/info/service.md @@ -0,0 +1,689 @@ +--- +tags: [AvalancheGo APIs] +description: This page is an overview of the Info API associated with AvalancheGo. +sidebar_label: Info API +pagination_label: Info API +--- + +# Info API + +This API can be used to access basic information about the node. + +## Format + +This API uses the `json 2.0` RPC format. For more information on making JSON RPC calls, see +[here](/reference/standards/guides/issuing-api-calls.md). + +## Endpoint + +```text +/ext/info +``` + +## Methods + +### `info.acps` + +Returns peer preferences for Avalanche Community Proposals (ACPs) + +**Signature:** + +```go +info.acps() -> { + acps: map[uint32]{ + supportWeight: uint64 + supporters: set[string] + objectWeight: uint64 + objectors: set[string] + abstainWeight: uint64 + } +} +``` + +**Example Call:** + +```sh +curl -sX POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.acps", + "params" :{} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "acps": { + "23": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + }, + "24": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + }, + "25": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + }, + "30": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + }, + "31": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + }, + "41": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + }, + "62": { + "supportWeight": "0", + "supporters": [], + "objectWeight": "0", + "objectors": [], + "abstainWeight": "161147778098286584" + } + } + }, + "id": 1 +} +``` + +### `info.isBootstrapped` + +Check whether a given chain is done bootstrapping + +**Signature:** + +```sh +info.isBootstrapped({chain: string}) -> {isBootstrapped: bool} +``` + +`chain` is the ID or alias of a chain. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.isBootstrapped", + "params": { + "chain":"X" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "isBootstrapped": true + }, + "id": 1 +} +``` + +### `info.getBlockchainID` + +Given a blockchain’s alias, get its ID. (See [`admin.aliasChain`](/reference/avalanchego/admin-api.md#adminaliaschain).) + +**Signature:** + +```sh +info.getBlockchainID({alias:string}) -> {blockchainID:string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getBlockchainID", + "params": { + "alias":"X" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "blockchainID": "sV6o671RtkGBcno1FiaDbVcFv2sG5aVXMZYzKdP4VQAWmJQnM" + } +} +``` + +### `info.getNetworkID` + +Get the ID of the network this node is participating in. + +**Signature:** + +```sh +info.getNetworkID() -> {networkID:int} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getNetworkID" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "networkID": "2" + } +} +``` + +Network ID of 1 = Mainnet +Network ID of 5 = Fuji (testnet) + +### `info.getNetworkName` + +Get the name of the network this node is participating in. + +**Signature:** + +```sh +info.getNetworkName() -> {networkName:string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getNetworkName" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "networkName": "local" + } +} +``` + +### `info.getNodeID` + +Get the ID, the BLS key, and the proof of possession(BLS signature) of this node. + +:::info +This endpoint set is for a specific node, it is unavailable on the [public server](/tooling/rpc-providers.md). +::: + +**Signature:** + +```sh +info.getNodeID() -> { + nodeID: string, + nodePOP: { + publicKey: string, + proofOfPossession: string + } +} +``` + +- `nodeID` Node ID is the unique identifier of the node that you set to act as a validator on the + Primary Network. +- `nodePOP` is this node's BLS key and proof of possession. Nodes must register a BLS key to act as + a validator on the Primary Network. Your node's POP is logged on startup and is accessible over this endpoint. + - `publicKey` is the 48 byte hex representation of the BLS key. + - `proofOfPossession` is the 96 byte hex representation of the BLS signature. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getNodeID" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "nodeID": "NodeID-5mb46qkSBj81k9g9e4VFjGGSbaaSLFRzD", + "nodePOP": { + "publicKey": "0x8f95423f7142d00a48e1014a3de8d28907d420dc33b3052a6dee03a3f2941a393c2351e354704ca66a3fc29870282e15", + "proofOfPossession": "0x86a3ab4c45cfe31cae34c1d06f212434ac71b1be6cfe046c80c162e057614a94a5bc9f1ded1a7029deb0ba4ca7c9b71411e293438691be79c2dbf19d1ca7c3eadb9c756246fc5de5b7b89511c7d7302ae051d9e03d7991138299b5ed6a570a98" + } + }, + "id": 1 +} +``` + +### `info.getNodeIP` + +Get the IP of this node. + +:::info +This endpoint set is for a specific node, it is unavailable on the [public server](/tooling/rpc-providers.md). +::: + +**Signature:** + +```text +info.getNodeIP() -> {ip: string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getNodeIP" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "ip": "192.168.1.1:9651" + }, + "id": 1 +} +``` + +### `info.getNodeVersion` + +Get the version of this node. + +**Signature:** + +```sh +info.getNodeVersion() -> { + version: string, + databaseVersion: string, + gitCommit: string, + vmVersions: map[string]string, + rpcProtocolVersion: string, +} +``` + +where: + +- `version` is this node's version +- `databaseVersion` is the version of the database this node is using +- `gitCommit` is the Git commit that this node was built from +- `vmVersions` is map where each key/value pair is the name of a VM, and the version of that VM this + node runs +- `rpcProtocolVersion` is the RPCChainVM protocol version + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getNodeVersion" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "version": "avalanche/1.9.1", + "databaseVersion": "v1.4.5", + "rpcProtocolVersion": "18", + "gitCommit": "79cd09ba728e1cecef40acd60702f0a2d41ea404", + "vmVersions": { + "avm": "v1.9.1", + "evm": "v0.11.1", + "platform": "v1.9.1" + } + }, + "id": 1 +} +``` + +### `info.getTxFee` + +Get the fees of the network. + +**Signature:** + +```sh +info.getTxFee() -> +{ + txFee: uint64, + createAssetTxFee: uint64, + createSubnetTxFee: uint64, + transformSubnetTxFee: uint64, + createBlockchainTxFee: uint64, + addPrimaryNetworkValidatorFee: uint64, + addPrimaryNetworkDelegatorFee: uint64, + addSubnetValidatorFee: uint64, + addSubnetDelegatorFee: uint64 +} +``` + +- `txFee` is the default fee for making transactions. +- `createAssetTxFee` is the fee for creating a new asset. +- `createSubnetTxFee` is the fee for creating a new Subnet. +- `transformSubnetTxFee` is the fee for converting a PoA Subnet into a PoS Subnet. +- `createBlockchainTxFee` is the fee for creating a new blockchain. +- `addPrimaryNetworkValidatorFee` is the fee for adding a new primary network validator. +- `addPrimaryNetworkDelegatorFee` is the fee for adding a new primary network delegator. +- `addSubnetValidatorFee` is the fee for adding a new Subnet validator. +- `addSubnetDelegatorFee` is the fee for adding a new Subnet delegator. + +All fees are denominated in nAVAX. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getTxFee" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txFee": "1000000", + "createAssetTxFee": "10000000", + "createSubnetTxFee": "1000000000", + "transformSubnetTxFee": "10000000000", + "createBlockchainTxFee": "1000000000", + "addPrimaryNetworkValidatorFee": "0", + "addPrimaryNetworkDelegatorFee": "0", + "addSubnetValidatorFee": "1000000", + "addSubnetDelegatorFee": "1000000" + } +} +``` + +### `info.getVMs` + +Get the virtual machines installed on this node. + +:::info +This endpoint set is for a specific node, it is unavailable on the [public server](/tooling/rpc-providers.md). +::: + +**Signature:** + +```sh +info.getVMs() -> { + vms: map[string][]string +} +``` + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.getVMs", + "params" :{} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "vms": { + "jvYyfQTxGMJLuGWa55kdP2p2zSUYsQ5Raupu4TW34ZAUBAbtq": ["avm"], + "mgj786NP7uDwBCcq6YwThhaN8FLyybkCa4zBWTQbNgmK6k9A6": ["evm"], + "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT": ["nftfx"], + "rWhpuQPF1kb72esV2momhMuTYGkEb1oL29pt2EBXWmSy4kxnT": ["platform"], + "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy": ["propertyfx"], + "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ": ["secp256k1fx"] + } + }, + "id": 1 +} +``` + +### `info.peers` + +Get a description of peer connections. + +**Signature:** + +```sh +info.peers({ + nodeIDs: string[] // optional +}) -> +{ + numPeers: int, + peers:[]{ + ip: string, + publicIP: string, + nodeID: string, + version: string, + lastSent: string, + lastReceived: string, + benched: string[], + observedUptime: int, + observedSubnetUptime: map[string]int, + } +} +``` + +- `nodeIDs` is an optional parameter to specify what NodeID's descriptions should be returned. If + this parameter is left empty, descriptions for all active connections will be returned. If the + node is not connected to a specified NodeID, it will be omitted from the response. +- `ip` is the remote IP of the peer. +- `publicIP` is the public IP of the peer. +- `nodeID` is the prefixed Node ID of the peer. +- `version` shows which version the peer runs on. +- `lastSent` is the timestamp of last message sent to the peer. +- `lastReceived` is the timestamp of last message received from the peer. +- `benched` shows chain IDs that the peer is being benched. +- `observedUptime` is this node's primary network uptime, observed by the peer. +- `observedSubnetUptime` is a map of Subnet IDs to this node's Subnet uptimes, observed by the peer. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.peers", + "params": { + "nodeIDs": [] + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "numPeers": 3, + "peers": [ + { + "ip": "206.189.137.87:9651", + "publicIP": "206.189.137.87:9651", + "nodeID": "NodeID-8PYXX47kqLDe2wD4oPbvRRchcnSzMA4J4", + "version": "avalanche/1.9.4", + "lastSent": "2020-06-01T15:23:02Z", + "lastReceived": "2020-06-01T15:22:57Z", + "benched": [], + "observedUptime": "99", + "observedSubnetUptimes": {}, + "trackedSubnets": [], + "benched": [] + }, + { + "ip": "158.255.67.151:9651", + "publicIP": "158.255.67.151:9651", + "nodeID": "NodeID-C14fr1n8EYNKyDfYixJ3rxSAVqTY3a8BP", + "version": "avalanche/1.9.4", + "lastSent": "2020-06-01T15:23:02Z", + "lastReceived": "2020-06-01T15:22:34Z", + "benched": [], + "observedUptime": "75", + "observedSubnetUptimes": { + "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL": "100" + }, + "trackedSubnets": [ + "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" + ], + "benched": [] + }, + { + "ip": "83.42.13.44:9651", + "publicIP": "83.42.13.44:9651", + "nodeID": "NodeID-LPbcSMGJ4yocxYxvS2kBJ6umWeeFbctYZ", + "version": "avalanche/1.9.3", + "lastSent": "2020-06-01T15:23:02Z", + "lastReceived": "2020-06-01T15:22:55Z", + "benched": [], + "observedUptime": "95", + "observedSubnetUptimes": {}, + "trackedSubnets": [], + "benched": [] + } + ] + } +} +``` + +### `info.uptime` + +Returns the network's observed uptime of this node. +This is the only reliable source of data for your node's uptime. +Other sources may be using data gathered with incomplete (limited) information. + +**Signature:** + +```sh +info.uptime({ + subnetID: string // optional +}) -> +{ + rewardingStakePercentage: float64, + weightedAveragePercentage: float64 +} +``` + +- `subnetID` is the Subnet to get the uptime of. If not provided, returns the uptime of the node on + the primary network. + +- `rewardingStakePercentage` is the percent of stake which thinks this node is above the uptime + requirement. +- `weightedAveragePercentage` is the stake-weighted average of all observed uptimes for this node. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.uptime" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "rewardingStakePercentage": "100.0000", + "weightedAveragePercentage": "99.0000" + } +} +``` + +#### **Example Subnet Call** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"info.uptime", + "params" :{ + "subnetID":"29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/info +``` + +#### **Example Subnet Response** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "rewardingStakePercentage": "74.0741", + "weightedAveragePercentage": "72.4074" + } +} +``` diff --git a/api/ipcs/client.go b/api/ipcs/client.go deleted file mode 100644 index 121c1855bc8f..000000000000 --- a/api/ipcs/client.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ipcs - -import ( - "context" - - "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/rpc" -) - -var _ Client = (*client)(nil) - -// Client interface for interacting with the IPCS endpoint -// -// Deprecated: The IPCs API is deprecated. The Index API should be used instead. -type Client interface { - // PublishBlockchain requests the node to begin publishing consensus and decision events - PublishBlockchain(ctx context.Context, chainID string, options ...rpc.Option) (*PublishBlockchainReply, error) - // UnpublishBlockchain requests the node to stop publishing consensus and decision events - UnpublishBlockchain(ctx context.Context, chainID string, options ...rpc.Option) error - // GetPublishedBlockchains requests the node to get blockchains being published - GetPublishedBlockchains(ctx context.Context, options ...rpc.Option) ([]ids.ID, error) -} - -// Client implementation for interacting with the IPCS endpoint -type client struct { - requester rpc.EndpointRequester -} - -// NewClient returns a Client for interacting with the IPCS endpoint -// -// Deprecated: The IPCs API is deprecated. The Index API should be used instead. -func NewClient(uri string) Client { - return &client{requester: rpc.NewEndpointRequester( - uri + "/ext/ipcs", - )} -} - -func (c *client) PublishBlockchain(ctx context.Context, blockchainID string, options ...rpc.Option) (*PublishBlockchainReply, error) { - res := &PublishBlockchainReply{} - err := c.requester.SendRequest(ctx, "ipcs.publishBlockchain", &PublishBlockchainArgs{ - BlockchainID: blockchainID, - }, res, options...) - return res, err -} - -func (c *client) UnpublishBlockchain(ctx context.Context, blockchainID string, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "ipcs.unpublishBlockchain", &UnpublishBlockchainArgs{ - BlockchainID: blockchainID, - }, &api.EmptyReply{}, options...) -} - -func (c *client) GetPublishedBlockchains(ctx context.Context, options ...rpc.Option) ([]ids.ID, error) { - res := &GetPublishedBlockchainsReply{} - err := c.requester.SendRequest(ctx, "ipcs.getPublishedBlockchains", nil, res, options...) - return res.Chains, err -} diff --git a/api/ipcs/service.go b/api/ipcs/service.go deleted file mode 100644 index 2d1fe7757308..000000000000 --- a/api/ipcs/service.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ipcs - -import ( - "net/http" - "sync" - - "github.com/gorilla/rpc/v2" - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/api" - "github.com/ava-labs/avalanchego/chains" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/ipcs" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/logging" -) - -type Service struct { - log logging.Logger - chainManager chains.Manager - lock sync.RWMutex - ipcs *ipcs.ChainIPCs -} - -func NewService(log logging.Logger, chainManager chains.Manager, ipcs *ipcs.ChainIPCs) (http.Handler, error) { - server := rpc.NewServer() - codec := json.NewCodec() - server.RegisterCodec(codec, "application/json") - server.RegisterCodec(codec, "application/json;charset=UTF-8") - return server, server.RegisterService( - &Service{ - log: log, - chainManager: chainManager, - ipcs: ipcs, - }, - "ipcs", - ) -} - -type PublishBlockchainArgs struct { - BlockchainID string `json:"blockchainID"` -} - -type PublishBlockchainReply struct { - ConsensusURL string `json:"consensusURL"` - DecisionsURL string `json:"decisionsURL"` -} - -// PublishBlockchain publishes the finalized accepted transactions from the -// blockchainID over the IPC -func (s *Service) PublishBlockchain(_ *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { - s.log.Warn("deprecated API called", - zap.String("service", "ipcs"), - zap.String("method", "publishBlockchain"), - logging.UserString("blockchainID", args.BlockchainID), - ) - - chainID, err := s.chainManager.Lookup(args.BlockchainID) - if err != nil { - s.log.Error("chain lookup failed", - logging.UserString("blockchainID", args.BlockchainID), - zap.Error(err), - ) - return err - } - - s.lock.Lock() - defer s.lock.Unlock() - - ipcs, err := s.ipcs.Publish(chainID) - if err != nil { - s.log.Error("couldn't publish chain", - logging.UserString("blockchainID", args.BlockchainID), - zap.Error(err), - ) - return err - } - - reply.ConsensusURL = ipcs.ConsensusURL() - reply.DecisionsURL = ipcs.DecisionsURL() - - return nil -} - -type UnpublishBlockchainArgs struct { - BlockchainID string `json:"blockchainID"` -} - -// UnpublishBlockchain closes publishing of a blockchainID -func (s *Service) UnpublishBlockchain(_ *http.Request, args *UnpublishBlockchainArgs, _ *api.EmptyReply) error { - s.log.Warn("deprecated API called", - zap.String("service", "ipcs"), - zap.String("method", "unpublishBlockchain"), - logging.UserString("blockchainID", args.BlockchainID), - ) - - chainID, err := s.chainManager.Lookup(args.BlockchainID) - if err != nil { - s.log.Error("chain lookup failed", - logging.UserString("blockchainID", args.BlockchainID), - zap.Error(err), - ) - return err - } - - s.lock.Lock() - defer s.lock.Unlock() - - ok, err := s.ipcs.Unpublish(chainID) - if !ok { - s.log.Error("couldn't publish chain", - logging.UserString("blockchainID", args.BlockchainID), - zap.Error(err), - ) - } - - return err -} - -type GetPublishedBlockchainsReply struct { - Chains []ids.ID `json:"chains"` -} - -// GetPublishedBlockchains returns blockchains being published -func (s *Service) GetPublishedBlockchains(_ *http.Request, _ *struct{}, reply *GetPublishedBlockchainsReply) error { - s.log.Warn("deprecated API called", - zap.String("service", "ipcs"), - zap.String("method", "getPublishedBlockchains"), - ) - - s.lock.RLock() - defer s.lock.RUnlock() - - reply.Chains = s.ipcs.GetPublishedBlockchains() - return nil -} diff --git a/api/keystore/codec.go b/api/keystore/codec.go index b925747c44ec..3f6df0cf765f 100644 --- a/api/keystore/codec.go +++ b/api/keystore/codec.go @@ -4,8 +4,6 @@ package keystore import ( - "time" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils/units" @@ -20,7 +18,7 @@ const ( var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewManager(maxPackerSize) if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) diff --git a/api/keystore/service.md b/api/keystore/service.md new file mode 100644 index 000000000000..76cc2ce69a56 --- /dev/null +++ b/api/keystore/service.md @@ -0,0 +1,290 @@ +--- +tags: [AvalancheGo APIs] +description: This page is an overview of the Keystore API associated with AvalancheGo. +sidebar_label: Keystore API +pagination_label: Keystore API +--- + +# Keystore API + +:::warning +Because the node operator has access to your plain-text password, you should only create a +keystore user on a node that you operate. If that node is breached, you could lose all your tokens. +Keystore APIs are not recommended for use on Mainnet. +::: + +Every node has a built-in keystore. Clients create users on the keystore, which act as identities to +be used when interacting with blockchains. A keystore exists at the node level, so if you create a +user on a node it exists _only_ on that node. However, users may be imported and exported using this +API. + +For validation and cross-chain transfer on the Mainnet, you should issue transactions through +[AvalancheJS](/tooling/avalanchejs-overview). That way control keys for your funds won't be stored on +the node, which significantly lowers the risk should a computer running a node be compromised. See +following docs for details: + +- Transfer AVAX Tokens Between Chains: + + - C-Chain: [export](https://github.com/ava-labs/avalanchejs/blob/master/examples/c-chain/export.ts) and + [import](https://github.com/ava-labs/avalanchejs/blob/master/examples/c-chain/import.ts) + - P-Chain: [export](https://github.com/ava-labs/avalanchejs/blob/master/examples/p-chain/export.ts) and + [import](https://github.com/ava-labs/avalanchejs/blob/master/examples/p-chain/import.ts) + - X-Chain: [export](https://github.com/ava-labs/avalanchejs/blob/master/examples/x-chain/export.ts) and + [import](https://github.com/ava-labs/avalanchejs/blob/master/examples/x-chain/import.ts) + +- [Add a Node to the Validator Set](/nodes/validate/add-a-validator) + +:::info + +This API set is for a specific node, it is unavailable on the [public server](/tooling/rpc-providers.md). + +::: + +## Format + +This API uses the `json 2.0` API format. For more information on making JSON RPC calls, see +[here](/reference/standards/guides/issuing-api-calls.md). + +## Endpoint + +```text +/ext/keystore +``` + +## Methods + +### keystore.createUser + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Create a new user with the specified username and password. + +**Signature:** + +```sh +keystore.createUser( + { + username:string, + password:string + } +) -> {} +``` + +- `username` and `password` can be at most 1024 characters. +- Your request will be rejected if `password` is too weak. `password` should be at least 8 + characters and contain upper and lower case letters as well as numbers and symbols. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"keystore.createUser", + "params" :{ + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/keystore +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### keystore.deleteUser + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Delete a user. + +**Signature:** + +```sh +keystore.deleteUser({username: string, password:string}) -> {} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"keystore.deleteUser", + "params" : { + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/keystore +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### keystore.exportUser + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Export a user. The user can be imported to another node with +[`keystore.importUser`](/reference/avalanchego/keystore-api.md#keystoreimportuser). The user’s password +remains encrypted. + +**Signature:** + +```sh +keystore.exportUser( + { + username:string, + password:string, + encoding:string //optional + } +) -> { + user:string, + encoding:string +} +``` + +`encoding` specifies the format of the string encoding user data. Can only be `hex` when a value is +provided. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"keystore.exportUser", + "params" :{ + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/keystore +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "user": "7655a29df6fc2747b0874e1148b423b954a25fcdb1f170d0ec8eb196430f7001942ce55b02a83b1faf50a674b1e55bfc00000000", + "encoding": "hex" + } +} +``` + +### keystore.importUser + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Import a user. `password` must match the user’s password. `username` doesn’t have to match the +username `user` had when it was exported. + +**Signature:** + +```sh +keystore.importUser( + { + username:string, + password:string, + user:string, + encoding:string //optional + } +) -> {} +``` + +`encoding` specifies the format of the string encoding user data. Can only be `hex` when a value is +provided. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"keystore.importUser", + "params" :{ + "username":"myUsername", + "password":"myPassword", + "user" :"0x7655a29df6fc2747b0874e1148b423b954a25fcdb1f170d0ec8eb196430f7001942ce55b02a83b1faf50a674b1e55bfc000000008cf2d869" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/keystore +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": {} +} +``` + +### keystore.listUsers + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +List the users in this keystore. + +**Signature:** + +```sh +keystore.ListUsers() -> {users:[]string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"keystore.listUsers" +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/keystore +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "users": ["myUsername"] + } +} +``` diff --git a/api/metrics/client.go b/api/metrics/client.go new file mode 100644 index 000000000000..0b402622cff5 --- /dev/null +++ b/api/metrics/client.go @@ -0,0 +1,68 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" +) + +// Client for requesting metrics from a remote AvalancheGo instance +type Client struct { + uri string +} + +// NewClient returns a new Metrics API Client +func NewClient(uri string) *Client { + return &Client{ + uri: uri + "/ext/metrics", + } +} + +// GetMetrics returns the metrics from the connected node. The metrics are +// returned as a map of metric family name to the metric family. +func (c *Client) GetMetrics(ctx context.Context) (map[string]*dto.MetricFamily, error) { + uri, err := url.Parse(c.uri) + if err != nil { + return nil, err + } + + request, err := http.NewRequestWithContext( + ctx, + http.MethodGet, + uri.String(), + bytes.NewReader(nil), + ) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := http.DefaultClient.Do(request) + if err != nil { + return nil, fmt.Errorf("failed to issue request: %w", err) + } + + // Return an error for any non successful status code + if resp.StatusCode < 200 || resp.StatusCode > 299 { + // Drop any error during close to report the original error + _ = resp.Body.Close() + return nil, fmt.Errorf("received status code: %d", resp.StatusCode) + } + + var parser expfmt.TextParser + metrics, err := parser.TextToMetricFamilies(resp.Body) + if err != nil { + // Drop any error during close to report the original error + _ = resp.Body.Close() + return nil, err + } + return metrics, resp.Body.Close() +} diff --git a/api/metrics/gatherer_test.go b/api/metrics/gatherer_test.go index 334c361ebcc0..83a438867fb9 100644 --- a/api/metrics/gatherer_test.go +++ b/api/metrics/gatherer_test.go @@ -4,14 +4,15 @@ package metrics import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" ) -var ( - hello = "hello" - world = "world" - helloWorld = "hello_world" -) +var counterOpts = prometheus.CounterOpts{ + Name: "counter", + Help: "help", +} type testGatherer struct { mfs []*dto.MetricFamily diff --git a/api/metrics/label_gatherer.go b/api/metrics/label_gatherer.go new file mode 100644 index 000000000000..3b8951a75b77 --- /dev/null +++ b/api/metrics/label_gatherer.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "errors" + "fmt" + "slices" + + "github.com/prometheus/client_golang/prometheus" + + dto "github.com/prometheus/client_model/go" +) + +var ( + _ MultiGatherer = (*prefixGatherer)(nil) + + errDuplicateGatherer = errors.New("attempt to register duplicate gatherer") +) + +// NewLabelGatherer returns a new MultiGatherer that merges metrics by adding a +// new label. +func NewLabelGatherer(labelName string) MultiGatherer { + return &labelGatherer{ + labelName: labelName, + } +} + +type labelGatherer struct { + multiGatherer + + labelName string +} + +func (g *labelGatherer) Register(labelValue string, gatherer prometheus.Gatherer) error { + g.lock.Lock() + defer g.lock.Unlock() + + if slices.Contains(g.names, labelValue) { + return fmt.Errorf("%w: for %q with label %q", + errDuplicateGatherer, + g.labelName, + labelValue, + ) + } + + g.names = append(g.names, labelValue) + g.gatherers = append(g.gatherers, &labeledGatherer{ + labelName: g.labelName, + labelValue: labelValue, + gatherer: gatherer, + }) + return nil +} + +type labeledGatherer struct { + labelName string + labelValue string + gatherer prometheus.Gatherer +} + +func (g *labeledGatherer) Gather() ([]*dto.MetricFamily, error) { + // Gather returns partially filled metrics in the case of an error. So, it + // is expected to still return the metrics in the case an error is returned. + metricFamilies, err := g.gatherer.Gather() + for _, metricFamily := range metricFamilies { + for _, metric := range metricFamily.Metric { + metric.Label = append(metric.Label, &dto.LabelPair{ + Name: &g.labelName, + Value: &g.labelValue, + }) + } + } + return metricFamilies, err +} diff --git a/api/metrics/label_gatherer_test.go b/api/metrics/label_gatherer_test.go new file mode 100644 index 000000000000..d5f30fd6529b --- /dev/null +++ b/api/metrics/label_gatherer_test.go @@ -0,0 +1,217 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func TestLabelGatherer_Gather(t *testing.T) { + const ( + labelName = "smith" + labelValueA = "rick" + labelValueB = "morty" + customLabelName = "tag" + customLabelValueA = "a" + customLabelValueB = "b" + ) + tests := []struct { + name string + labelName string + expectedMetrics []*dto.Metric + expectErr bool + }{ + { + name: "no overlap", + labelName: customLabelName, + expectedMetrics: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String(labelName), + Value: proto.String(labelValueB), + }, + { + Name: proto.String(customLabelName), + Value: proto.String(customLabelValueB), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String(labelName), + Value: proto.String(labelValueA), + }, + { + Name: proto.String(customLabelName), + Value: proto.String(customLabelValueA), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(0), + }, + }, + }, + expectErr: false, + }, + { + name: "has overlap", + labelName: labelName, + expectedMetrics: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String(labelName), + Value: proto.String(labelValueB), + }, + { + Name: proto.String(customLabelName), + Value: proto.String(customLabelValueB), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + expectErr: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + gatherer := NewLabelGatherer(labelName) + require.NotNil(gatherer) + + registerA := prometheus.NewRegistry() + require.NoError(gatherer.Register(labelValueA, registerA)) + { + counterA := prometheus.NewCounterVec( + counterOpts, + []string{test.labelName}, + ) + counterA.With(prometheus.Labels{test.labelName: customLabelValueA}) + require.NoError(registerA.Register(counterA)) + } + + registerB := prometheus.NewRegistry() + require.NoError(gatherer.Register(labelValueB, registerB)) + { + counterB := prometheus.NewCounterVec( + counterOpts, + []string{customLabelName}, + ) + counterB.With(prometheus.Labels{customLabelName: customLabelValueB}).Inc() + require.NoError(registerB.Register(counterB)) + } + + metrics, err := gatherer.Gather() + if test.expectErr { + require.Error(err) //nolint:forbidigo // the error is not exported + } else { + require.NoError(err) + } + require.Equal( + []*dto.MetricFamily{ + { + Name: proto.String(counterOpts.Name), + Help: proto.String(counterOpts.Help), + Type: dto.MetricType_COUNTER.Enum(), + Metric: test.expectedMetrics, + }, + }, + metrics, + ) + }) + } +} + +func TestLabelGatherer_Register(t *testing.T) { + firstLabeledGatherer := &labeledGatherer{ + labelValue: "first", + gatherer: &testGatherer{}, + } + firstLabelGatherer := func() *labelGatherer { + return &labelGatherer{ + multiGatherer: multiGatherer{ + names: []string{firstLabeledGatherer.labelValue}, + gatherers: prometheus.Gatherers{ + firstLabeledGatherer, + }, + }, + } + } + secondLabeledGatherer := &labeledGatherer{ + labelValue: "second", + gatherer: &testGatherer{ + mfs: []*dto.MetricFamily{{}}, + }, + } + secondLabelGatherer := &labelGatherer{ + multiGatherer: multiGatherer{ + names: []string{ + firstLabeledGatherer.labelValue, + secondLabeledGatherer.labelValue, + }, + gatherers: prometheus.Gatherers{ + firstLabeledGatherer, + secondLabeledGatherer, + }, + }, + } + + tests := []struct { + name string + labelGatherer *labelGatherer + labelValue string + gatherer prometheus.Gatherer + expectedErr error + expectedLabelGatherer *labelGatherer + }{ + { + name: "first registration", + labelGatherer: &labelGatherer{}, + labelValue: "first", + gatherer: firstLabeledGatherer.gatherer, + expectedErr: nil, + expectedLabelGatherer: firstLabelGatherer(), + }, + { + name: "second registration", + labelGatherer: firstLabelGatherer(), + labelValue: "second", + gatherer: secondLabeledGatherer.gatherer, + expectedErr: nil, + expectedLabelGatherer: secondLabelGatherer, + }, + { + name: "conflicts with previous registration", + labelGatherer: firstLabelGatherer(), + labelValue: "first", + gatherer: secondLabeledGatherer.gatherer, + expectedErr: errDuplicateGatherer, + expectedLabelGatherer: firstLabelGatherer(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.labelGatherer.Register(test.labelValue, test.gatherer) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedLabelGatherer, test.labelGatherer) + }) + } +} diff --git a/api/metrics/multi_gatherer.go b/api/metrics/multi_gatherer.go index 4bd0900a0227..b2fede55643c 100644 --- a/api/metrics/multi_gatherer.go +++ b/api/metrics/multi_gatherer.go @@ -4,92 +4,48 @@ package metrics import ( - "cmp" - "errors" "fmt" - "slices" "sync" "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/metric" - dto "github.com/prometheus/client_model/go" ) -var ( - _ MultiGatherer = (*multiGatherer)(nil) - - errReregisterGatherer = errors.New("attempt to register existing gatherer") -) - // MultiGatherer extends the Gatherer interface by allowing additional gatherers // to be registered. type MultiGatherer interface { prometheus.Gatherer // Register adds the outputs of [gatherer] to the results of future calls to - // Gather with the provided [namespace] added to the metrics. - Register(namespace string, gatherer prometheus.Gatherer) error + // Gather with the provided [name] added to the metrics. + Register(name string, gatherer prometheus.Gatherer) error } -type multiGatherer struct { - lock sync.RWMutex - gatherers map[string]prometheus.Gatherer +// Deprecated: Use NewPrefixGatherer instead. +// +// TODO: Remove once coreth is updated. +func NewMultiGatherer() MultiGatherer { + return NewPrefixGatherer() } -func NewMultiGatherer() MultiGatherer { - return &multiGatherer{ - gatherers: make(map[string]prometheus.Gatherer), - } +type multiGatherer struct { + lock sync.RWMutex + names []string + gatherers prometheus.Gatherers } func (g *multiGatherer) Gather() ([]*dto.MetricFamily, error) { g.lock.RLock() defer g.lock.RUnlock() - var results []*dto.MetricFamily - for namespace, gatherer := range g.gatherers { - gatheredMetrics, err := gatherer.Gather() - if err != nil { - return nil, err - } - for _, gatheredMetric := range gatheredMetrics { - var name string - if gatheredMetric.Name != nil { - name = metric.AppendNamespace(namespace, *gatheredMetric.Name) - } else { - name = namespace - } - gatheredMetric.Name = &name - results = append(results, gatheredMetric) - } - } - // Because we overwrite every metric's name, we are guaranteed that there - // are no metrics with nil names. - sortMetrics(results) - return results, nil + return g.gatherers.Gather() } -func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) error { - g.lock.Lock() - defer g.lock.Unlock() - - if existingGatherer, exists := g.gatherers[namespace]; exists { - return fmt.Errorf("%w for namespace %q; existing: %#v; new: %#v", - errReregisterGatherer, - namespace, - existingGatherer, - gatherer, - ) +func MakeAndRegister(gatherer MultiGatherer, name string) (*prometheus.Registry, error) { + reg := prometheus.NewRegistry() + if err := gatherer.Register(name, reg); err != nil { + return nil, fmt.Errorf("couldn't register %q metrics: %w", name, err) } - - g.gatherers[namespace] = gatherer - return nil -} - -func sortMetrics(m []*dto.MetricFamily) { - slices.SortFunc(m, func(i, j *dto.MetricFamily) int { - return cmp.Compare(*i.Name, *j.Name) - }) + return reg, nil } diff --git a/api/metrics/multi_gatherer_test.go b/api/metrics/multi_gatherer_test.go deleted file mode 100644 index 033e3e88b1e6..000000000000 --- a/api/metrics/multi_gatherer_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "testing" - - "github.com/stretchr/testify/require" - - dto "github.com/prometheus/client_model/go" -) - -func TestMultiGathererEmptyGather(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - mfs, err := g.Gather() - require.NoError(err) - require.Empty(mfs) -} - -func TestMultiGathererDuplicatedPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - og := NewOptionalGatherer() - - require.NoError(g.Register("", og)) - - err := g.Register("", og) - require.ErrorIs(err, errReregisterGatherer) - - require.NoError(g.Register("lol", og)) -} - -func TestMultiGathererAddedError(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - err: errTest, - } - - require.NoError(g.Register("", tg)) - - mfs, err := g.Gather() - require.ErrorIs(err, errTest) - require.Empty(mfs) -} - -func TestMultiGathererNoAddedPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{ - Name: &hello, - }}, - } - - require.NoError(g.Register("", tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&hello, mfs[0].Name) -} - -func TestMultiGathererAddedPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{ - Name: &world, - }}, - } - - require.NoError(g.Register(hello, tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&helloWorld, mfs[0].Name) -} - -func TestMultiGathererJustPrefix(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{}}, - } - - require.NoError(g.Register(hello, tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&hello, mfs[0].Name) -} - -func TestMultiGathererSorted(t *testing.T) { - require := require.New(t) - - g := NewMultiGatherer() - - name0 := "a" - name1 := "z" - tg := &testGatherer{ - mfs: []*dto.MetricFamily{ - { - Name: &name1, - }, - { - Name: &name0, - }, - }, - } - - require.NoError(g.Register("", tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 2) - require.Equal(&name0, mfs[0].Name) - require.Equal(&name1, mfs[1].Name) -} diff --git a/api/metrics/optional_gatherer.go b/api/metrics/optional_gatherer.go deleted file mode 100644 index 686856efcc86..000000000000 --- a/api/metrics/optional_gatherer.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" - - dto "github.com/prometheus/client_model/go" -) - -var _ OptionalGatherer = (*optionalGatherer)(nil) - -// OptionalGatherer extends the Gatherer interface by allowing the optional -// registration of a single gatherer. If no gatherer is registered, Gather will -// return no metrics and no error. If a gatherer is registered, Gather will -// return the results of calling Gather on the provided gatherer. -type OptionalGatherer interface { - prometheus.Gatherer - - // Register the provided gatherer. If a gatherer was previously registered, - // an error will be returned. - Register(gatherer prometheus.Gatherer) error -} - -type optionalGatherer struct { - lock sync.RWMutex - gatherer prometheus.Gatherer -} - -func NewOptionalGatherer() OptionalGatherer { - return &optionalGatherer{} -} - -func (g *optionalGatherer) Gather() ([]*dto.MetricFamily, error) { - g.lock.RLock() - defer g.lock.RUnlock() - - if g.gatherer == nil { - return nil, nil - } - return g.gatherer.Gather() -} - -func (g *optionalGatherer) Register(gatherer prometheus.Gatherer) error { - g.lock.Lock() - defer g.lock.Unlock() - - if g.gatherer != nil { - return fmt.Errorf("%w; existing: %#v; new: %#v", - errReregisterGatherer, - g.gatherer, - gatherer, - ) - } - g.gatherer = gatherer - return nil -} diff --git a/api/metrics/optional_gatherer_test.go b/api/metrics/optional_gatherer_test.go deleted file mode 100644 index 201750701313..000000000000 --- a/api/metrics/optional_gatherer_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package metrics - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - dto "github.com/prometheus/client_model/go" -) - -var errTest = errors.New("non-nil error") - -func TestOptionalGathererEmptyGather(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - - mfs, err := g.Gather() - require.NoError(err) - require.Empty(mfs) -} - -func TestOptionalGathererDuplicated(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - og := NewOptionalGatherer() - - require.NoError(g.Register(og)) - err := g.Register(og) - require.ErrorIs(err, errReregisterGatherer) -} - -func TestOptionalGathererAddedError(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - - tg := &testGatherer{ - err: errTest, - } - - require.NoError(g.Register(tg)) - - mfs, err := g.Gather() - require.ErrorIs(err, errTest) - require.Empty(mfs) -} - -func TestMultiGathererAdded(t *testing.T) { - require := require.New(t) - - g := NewOptionalGatherer() - - tg := &testGatherer{ - mfs: []*dto.MetricFamily{{ - Name: &hello, - }}, - } - - require.NoError(g.Register(tg)) - - mfs, err := g.Gather() - require.NoError(err) - require.Len(mfs, 1) - require.Equal(&hello, mfs[0].Name) -} diff --git a/api/metrics/prefix_gatherer.go b/api/metrics/prefix_gatherer.go new file mode 100644 index 000000000000..fae7adb26e84 --- /dev/null +++ b/api/metrics/prefix_gatherer.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "errors" + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/utils/metric" + + dto "github.com/prometheus/client_model/go" +) + +var ( + _ MultiGatherer = (*prefixGatherer)(nil) + + errOverlappingNamespaces = errors.New("prefix could create overlapping namespaces") +) + +// NewPrefixGatherer returns a new MultiGatherer that merges metrics by adding a +// prefix to their names. +func NewPrefixGatherer() MultiGatherer { + return &prefixGatherer{} +} + +type prefixGatherer struct { + multiGatherer +} + +func (g *prefixGatherer) Register(prefix string, gatherer prometheus.Gatherer) error { + g.lock.Lock() + defer g.lock.Unlock() + + for _, existingPrefix := range g.names { + if eitherIsPrefix(prefix, existingPrefix) { + return fmt.Errorf("%w: %q conflicts with %q", + errOverlappingNamespaces, + prefix, + existingPrefix, + ) + } + } + + g.names = append(g.names, prefix) + g.gatherers = append(g.gatherers, &prefixedGatherer{ + prefix: prefix, + gatherer: gatherer, + }) + return nil +} + +type prefixedGatherer struct { + prefix string + gatherer prometheus.Gatherer +} + +func (g *prefixedGatherer) Gather() ([]*dto.MetricFamily, error) { + // Gather returns partially filled metrics in the case of an error. So, it + // is expected to still return the metrics in the case an error is returned. + metricFamilies, err := g.gatherer.Gather() + for _, metricFamily := range metricFamilies { + metricFamily.Name = proto.String(metric.AppendNamespace( + g.prefix, + metricFamily.GetName(), + )) + } + return metricFamilies, err +} + +// eitherIsPrefix returns true if either [a] is a prefix of [b] or [b] is a +// prefix of [a]. +// +// This function accounts for the usage of the namespace boundary, so "hello" is +// not considered a prefix of "helloworld". However, "hello" is considered a +// prefix of "hello_world". +func eitherIsPrefix(a, b string) bool { + if len(a) > len(b) { + a, b = b, a + } + return a == b[:len(a)] && // a is a prefix of b + (len(a) == 0 || // a is empty + len(a) == len(b) || // a is equal to b + b[len(a)] == metric.NamespaceSeparatorByte) // a ends at a namespace boundary of b +} diff --git a/api/metrics/prefix_gatherer_test.go b/api/metrics/prefix_gatherer_test.go new file mode 100644 index 000000000000..ff2526e6742e --- /dev/null +++ b/api/metrics/prefix_gatherer_test.go @@ -0,0 +1,204 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func TestPrefixGatherer_Gather(t *testing.T) { + require := require.New(t) + + gatherer := NewPrefixGatherer() + require.NotNil(gatherer) + + registerA := prometheus.NewRegistry() + require.NoError(gatherer.Register("a", registerA)) + { + counterA := prometheus.NewCounter(counterOpts) + require.NoError(registerA.Register(counterA)) + } + + registerB := prometheus.NewRegistry() + require.NoError(gatherer.Register("b", registerB)) + { + counterB := prometheus.NewCounter(counterOpts) + counterB.Inc() + require.NoError(registerB.Register(counterB)) + } + + metrics, err := gatherer.Gather() + require.NoError(err) + require.Equal( + []*dto.MetricFamily{ + { + Name: proto.String("a_counter"), + Help: proto.String(counterOpts.Help), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Counter: &dto.Counter{ + Value: proto.Float64(0), + }, + }, + }, + }, + { + Name: proto.String("b_counter"), + Help: proto.String(counterOpts.Help), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + }, + }, + metrics, + ) +} + +func TestPrefixGatherer_Register(t *testing.T) { + firstPrefixedGatherer := &prefixedGatherer{ + prefix: "first", + gatherer: &testGatherer{}, + } + firstPrefixGatherer := func() *prefixGatherer { + return &prefixGatherer{ + multiGatherer: multiGatherer{ + names: []string{ + firstPrefixedGatherer.prefix, + }, + gatherers: prometheus.Gatherers{ + firstPrefixedGatherer, + }, + }, + } + } + secondPrefixedGatherer := &prefixedGatherer{ + prefix: "second", + gatherer: &testGatherer{ + mfs: []*dto.MetricFamily{{}}, + }, + } + secondPrefixGatherer := &prefixGatherer{ + multiGatherer: multiGatherer{ + names: []string{ + firstPrefixedGatherer.prefix, + secondPrefixedGatherer.prefix, + }, + gatherers: prometheus.Gatherers{ + firstPrefixedGatherer, + secondPrefixedGatherer, + }, + }, + } + + tests := []struct { + name string + prefixGatherer *prefixGatherer + prefix string + gatherer prometheus.Gatherer + expectedErr error + expectedPrefixGatherer *prefixGatherer + }{ + { + name: "first registration", + prefixGatherer: &prefixGatherer{}, + prefix: firstPrefixedGatherer.prefix, + gatherer: firstPrefixedGatherer.gatherer, + expectedErr: nil, + expectedPrefixGatherer: firstPrefixGatherer(), + }, + { + name: "second registration", + prefixGatherer: firstPrefixGatherer(), + prefix: secondPrefixedGatherer.prefix, + gatherer: secondPrefixedGatherer.gatherer, + expectedErr: nil, + expectedPrefixGatherer: secondPrefixGatherer, + }, + { + name: "conflicts with previous registration", + prefixGatherer: firstPrefixGatherer(), + prefix: firstPrefixedGatherer.prefix, + gatherer: secondPrefixedGatherer.gatherer, + expectedErr: errOverlappingNamespaces, + expectedPrefixGatherer: firstPrefixGatherer(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + err := test.prefixGatherer.Register(test.prefix, test.gatherer) + require.ErrorIs(err, test.expectedErr) + require.Equal(test.expectedPrefixGatherer, test.prefixGatherer) + }) + } +} + +func TestEitherIsPrefix(t *testing.T) { + tests := []struct { + name string + a string + b string + expected bool + }{ + { + name: "empty strings", + a: "", + b: "", + expected: true, + }, + { + name: "an empty string", + a: "", + b: "hello", + expected: true, + }, + { + name: "same strings", + a: "x", + b: "x", + expected: true, + }, + { + name: "different strings", + a: "x", + b: "y", + expected: false, + }, + { + name: "splits namespace", + a: "hello", + b: "hello_world", + expected: true, + }, + { + name: "is prefix before separator", + a: "hello", + b: "helloworld", + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, eitherIsPrefix(test.a, test.b)) + require.Equal(test.expected, eitherIsPrefix(test.b, test.a)) + }) + } +} diff --git a/api/metrics/service.md b/api/metrics/service.md new file mode 100644 index 000000000000..08b211d33dde --- /dev/null +++ b/api/metrics/service.md @@ -0,0 +1,40 @@ +--- +tags: [AvalancheGo APIs] +description: This page is an overview of the Metrics API associated with AvalancheGo. +sidebar_label: Metrics API +pagination_label: Metrics API +--- + +# Metrics API + +The API allows clients to get statistics about a node’s health and performance. + +:::info + +This API set is for a specific node, it is unavailable on the [public server](/tooling/rpc-providers.md). + +::: + +## Endpoint + +```text +/ext/metrics +``` + +## Usage + +To get the node metrics: + +```sh +curl -X POST 127.0.0.1:9650/ext/metrics +``` + +## Format + +This API produces Prometheus compatible metrics. See +[here](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md) +for information on Prometheus’ formatting. + +[Here](/nodes/maintain/setting-up-node-monitoring) is a tutorial that +shows how to set up Prometheus and Grafana to monitor AvalancheGo node using the +Metrics API. diff --git a/api/server/metrics.go b/api/server/metrics.go index e3b2d76c83ea..9734f36eeaa1 100644 --- a/api/server/metrics.go +++ b/api/server/metrics.go @@ -18,29 +18,26 @@ type metrics struct { totalDuration *prometheus.GaugeVec } -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { +func newMetrics(registerer prometheus.Registerer) (*metrics, error) { m := &metrics{ numProcessing: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "calls_processing", - Help: "The number of calls this API is currently processing", + Name: "calls_processing", + Help: "The number of calls this API is currently processing", }, []string{"base"}, ), numCalls: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "calls", - Help: "The number of calls this API has processed", + Name: "calls", + Help: "The number of calls this API has processed", }, []string{"base"}, ), totalDuration: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "calls_duration", - Help: "The total amount of time, in nanoseconds, spent handling API calls", + Name: "calls_duration", + Help: "The total amount of time, in nanoseconds, spent handling API calls", }, []string{"base"}, ), diff --git a/api/server/server.go b/api/server/server.go index a761468c62ca..8af570d09bdd 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -108,13 +108,11 @@ func New( nodeID ids.NodeID, tracingEnabled bool, tracer trace.Tracer, - namespace string, registerer prometheus.Registerer, httpConfig HTTPConfig, allowedHosts []string, - wrappers ...Wrapper, ) (Server, error) { - m, err := newMetrics(namespace, registerer) + m, err := newMetrics(registerer) if err != nil { return nil, err } @@ -134,10 +132,6 @@ func New( }, ) - for _, wrapper := range wrappers { - handler = wrapper.WrapHandler(handler) - } - httpServer := &http.Server{ Handler: handler, ReadTimeout: httpConfig.ReadTimeout, diff --git a/api/server/wrapper.go b/api/server/wrapper.go deleted file mode 100644 index b6cca85c731e..000000000000 --- a/api/server/wrapper.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package server - -import "net/http" - -type Wrapper interface { - // WrapHandler wraps an http.Handler. - WrapHandler(h http.Handler) http.Handler -} diff --git a/cache/lru_cache.go b/cache/lru_cache.go index 2a8a7ebe6d80..bab5e9549a58 100644 --- a/cache/lru_cache.go +++ b/cache/lru_cache.go @@ -7,7 +7,7 @@ import ( "sync" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" ) var _ Cacher[struct{}, struct{}] = (*LRU[struct{}, struct{}])(nil) @@ -17,7 +17,7 @@ var _ Cacher[struct{}, struct{}] = (*LRU[struct{}, struct{}])(nil) // done, based on evicting the least recently used value. type LRU[K comparable, V any] struct { lock sync.Mutex - elements linkedhashmap.LinkedHashmap[K, V] + elements *linked.Hashmap[K, V] // If set to <= 0, will be set internally to 1. Size int } @@ -92,7 +92,9 @@ func (c *LRU[K, _]) evict(key K) { } func (c *LRU[K, V]) flush() { - c.elements = linkedhashmap.New[K, V]() + if c.elements != nil { + c.elements.Clear() + } } func (c *LRU[_, _]) len() int { @@ -112,7 +114,7 @@ func (c *LRU[_, _]) portionFilled() float64 { // in the cache == [c.size] if necessary. func (c *LRU[K, V]) resize() { if c.elements == nil { - c.elements = linkedhashmap.New[K, V]() + c.elements = linked.NewHashmap[K, V]() } if c.Size <= 0 { c.Size = 1 diff --git a/cache/lru_sized_cache.go b/cache/lru_sized_cache.go index 5dc9b5fdec01..e8c8b0c76e7b 100644 --- a/cache/lru_sized_cache.go +++ b/cache/lru_sized_cache.go @@ -7,7 +7,7 @@ import ( "sync" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" ) var _ Cacher[struct{}, any] = (*sizedLRU[struct{}, any])(nil) @@ -17,7 +17,7 @@ var _ Cacher[struct{}, any] = (*sizedLRU[struct{}, any])(nil) // honored, based on evicting the least recently used value. type sizedLRU[K comparable, V any] struct { lock sync.Mutex - elements linkedhashmap.LinkedHashmap[K, V] + elements *linked.Hashmap[K, V] maxSize int currentSize int size func(K, V) int @@ -25,7 +25,7 @@ type sizedLRU[K comparable, V any] struct { func NewSizedLRU[K comparable, V any](maxSize int, size func(K, V) int) Cacher[K, V] { return &sizedLRU[K, V]{ - elements: linkedhashmap.New[K, V](), + elements: linked.NewHashmap[K, V](), maxSize: maxSize, size: size, } @@ -113,7 +113,7 @@ func (c *sizedLRU[K, _]) evict(key K) { } func (c *sizedLRU[K, V]) flush() { - c.elements = linkedhashmap.New[K, V]() + c.elements.Clear() c.currentSize = 0 } diff --git a/cache/metercacher/cache.go b/cache/metercacher/cache.go index c2ff666f25e7..f6f9a81abcff 100644 --- a/cache/metercacher/cache.go +++ b/cache/metercacher/cache.go @@ -4,48 +4,55 @@ package metercacher import ( + "time" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/utils/timer/mockable" ) var _ cache.Cacher[struct{}, struct{}] = (*Cache[struct{}, struct{}])(nil) type Cache[K comparable, V any] struct { - metrics cache.Cacher[K, V] - clock mockable.Clock + metrics *metrics } func New[K comparable, V any]( namespace string, registerer prometheus.Registerer, cache cache.Cacher[K, V], -) (cache.Cacher[K, V], error) { - meterCache := &Cache[K, V]{Cacher: cache} - return meterCache, meterCache.metrics.Initialize(namespace, registerer) +) (*Cache[K, V], error) { + metrics, err := newMetrics(namespace, registerer) + return &Cache[K, V]{ + Cacher: cache, + metrics: metrics, + }, err } func (c *Cache[K, V]) Put(key K, value V) { - start := c.clock.Time() + start := time.Now() c.Cacher.Put(key, value) - end := c.clock.Time() - c.put.Observe(float64(end.Sub(start))) - c.len.Set(float64(c.Cacher.Len())) - c.portionFilled.Set(c.Cacher.PortionFilled()) + putDuration := time.Since(start) + + c.metrics.putCount.Inc() + c.metrics.putTime.Add(float64(putDuration)) + c.metrics.len.Set(float64(c.Cacher.Len())) + c.metrics.portionFilled.Set(c.Cacher.PortionFilled()) } func (c *Cache[K, V]) Get(key K) (V, bool) { - start := c.clock.Time() + start := time.Now() value, has := c.Cacher.Get(key) - end := c.clock.Time() - c.get.Observe(float64(end.Sub(start))) + getDuration := time.Since(start) + if has { - c.hit.Inc() + c.metrics.getCount.With(hitLabels).Inc() + c.metrics.getTime.With(hitLabels).Add(float64(getDuration)) } else { - c.miss.Inc() + c.metrics.getCount.With(missLabels).Inc() + c.metrics.getTime.With(missLabels).Add(float64(getDuration)) } return value, has @@ -53,12 +60,14 @@ func (c *Cache[K, V]) Get(key K) (V, bool) { func (c *Cache[K, _]) Evict(key K) { c.Cacher.Evict(key) - c.len.Set(float64(c.Cacher.Len())) - c.portionFilled.Set(c.Cacher.PortionFilled()) + + c.metrics.len.Set(float64(c.Cacher.Len())) + c.metrics.portionFilled.Set(c.Cacher.PortionFilled()) } func (c *Cache[_, _]) Flush() { c.Cacher.Flush() - c.len.Set(float64(c.Cacher.Len())) - c.portionFilled.Set(c.Cacher.PortionFilled()) + + c.metrics.len.Set(float64(c.Cacher.Len())) + c.metrics.portionFilled.Set(c.Cacher.PortionFilled()) } diff --git a/cache/metercacher/metrics.go b/cache/metercacher/metrics.go index 39e0d8066574..c7587f62c979 100644 --- a/cache/metercacher/metrics.go +++ b/cache/metercacher/metrics.go @@ -4,67 +4,86 @@ package metercacher import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) -func newAveragerMetric(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { - return metric.NewAveragerWithErrs( - namespace, - name, - "time (in ns) of a "+name, - reg, - errs, - ) -} +const ( + resultLabel = "result" + hitResult = "hit" + missResult = "miss" +) -func newCounterMetric(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) prometheus.Counter { - c := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: name, - Help: fmt.Sprintf("# of times a %s occurred", name), - }) - errs.Add(reg.Register(c)) - return c -} +var ( + resultLabels = []string{resultLabel} + hitLabels = prometheus.Labels{ + resultLabel: hitResult, + } + missLabels = prometheus.Labels{ + resultLabel: missResult, + } +) type metrics struct { - get metric.Averager - put metric.Averager + getCount *prometheus.CounterVec + getTime *prometheus.GaugeVec + + putCount prometheus.Counter + putTime prometheus.Gauge + len prometheus.Gauge portionFilled prometheus.Gauge - hit prometheus.Counter - miss prometheus.Counter } -func (m *metrics) Initialize( +func newMetrics( namespace string, reg prometheus.Registerer, -) error { - errs := wrappers.Errs{} - m.get = newAveragerMetric(namespace, "get", reg, &errs) - m.put = newAveragerMetric(namespace, "put", reg, &errs) - m.len = prometheus.NewGauge( - prometheus.GaugeOpts{ +) (*metrics, error) { + m := &metrics{ + getCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "get_count", + Help: "number of get calls", + }, + resultLabels, + ), + getTime: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "get_time", + Help: "time spent (ns) in get calls", + }, + resultLabels, + ), + putCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "put_count", + Help: "number of put calls", + }), + putTime: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "put_time", + Help: "time spent (ns) in put calls", + }), + len: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "len", Help: "number of entries", - }, - ) - errs.Add(reg.Register(m.len)) - m.portionFilled = prometheus.NewGauge( - prometheus.GaugeOpts{ + }), + portionFilled: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "portion_filled", Help: "fraction of cache filled", - }, + }), + } + return m, utils.Err( + reg.Register(m.getCount), + reg.Register(m.getTime), + reg.Register(m.putCount), + reg.Register(m.putTime), + reg.Register(m.len), + reg.Register(m.portionFilled), ) - errs.Add(reg.Register(m.portionFilled)) - m.hit = newCounterMetric(namespace, "hit", reg, &errs) - m.miss = newCounterMetric(namespace, "miss", reg, &errs) - return errs.Err } diff --git a/cache/unique_cache.go b/cache/unique_cache.go index b958b1f3a870..6a4d93c5b6c0 100644 --- a/cache/unique_cache.go +++ b/cache/unique_cache.go @@ -4,17 +4,18 @@ package cache import ( - "container/list" "sync" + + "github.com/ava-labs/avalanchego/utils/linked" ) var _ Deduplicator[struct{}, Evictable[struct{}]] = (*EvictableLRU[struct{}, Evictable[struct{}]])(nil) // EvictableLRU is an LRU cache that notifies the objects when they are evicted. -type EvictableLRU[K comparable, _ Evictable[K]] struct { +type EvictableLRU[K comparable, V Evictable[K]] struct { lock sync.Mutex - entryMap map[K]*list.Element - entryList *list.List + entryMap map[K]*linked.ListElement[V] + entryList *linked.List[V] Size int } @@ -32,12 +33,12 @@ func (c *EvictableLRU[_, _]) Flush() { c.flush() } -func (c *EvictableLRU[K, _]) init() { +func (c *EvictableLRU[K, V]) init() { if c.entryMap == nil { - c.entryMap = make(map[K]*list.Element) + c.entryMap = make(map[K]*linked.ListElement[V]) } if c.entryList == nil { - c.entryList = list.New() + c.entryList = linked.NewList[V]() } if c.Size <= 0 { c.Size = 1 @@ -49,9 +50,8 @@ func (c *EvictableLRU[_, V]) resize() { e := c.entryList.Front() c.entryList.Remove(e) - val := e.Value.(V) - delete(c.entryMap, val.Key()) - val.Evict() + delete(c.entryMap, e.Value.Key()) + e.Value.Evict() } } @@ -65,20 +65,21 @@ func (c *EvictableLRU[_, V]) deduplicate(value V) V { e = c.entryList.Front() c.entryList.MoveToBack(e) - val := e.Value.(V) - delete(c.entryMap, val.Key()) - val.Evict() + delete(c.entryMap, e.Value.Key()) + e.Value.Evict() e.Value = value } else { - e = c.entryList.PushBack(value) + e = &linked.ListElement[V]{ + Value: value, + } + c.entryList.PushBack(e) } c.entryMap[key] = e } else { c.entryList.MoveToBack(e) - val := e.Value.(V) - value = val + value = e.Value } return value } diff --git a/chains/atomic/codec.go b/chains/atomic/codec.go index 290713b3c258..f53fac8c3f80 100644 --- a/chains/atomic/codec.go +++ b/chains/atomic/codec.go @@ -5,7 +5,6 @@ package atomic import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -17,7 +16,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt) if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) diff --git a/chains/atomic/state.go b/chains/atomic/state.go index 1eed23803fb7..b134d2f7a5d8 100644 --- a/chains/atomic/state.go +++ b/chains/atomic/state.go @@ -147,12 +147,7 @@ func (s *state) SetValue(e *Element) error { // current engine state. func (s *state) RemoveValue(key []byte) error { value, err := s.loadValue(key) - if err != nil { - if err != database.ErrNotFound { - // An unexpected error occurred, so we should propagate that error - return err - } - + if err == database.ErrNotFound { // The value doesn't exist, so we should optimistically delete it dbElem := dbElement{Present: false} valueBytes, err := Codec.Marshal(CodecVersion, &dbElem) @@ -161,6 +156,9 @@ func (s *state) RemoveValue(key []byte) error { } return s.valueDB.Put(key, valueBytes) } + if err != nil { + return err + } // Don't allow the removal of something that was already removed. if !value.Present { diff --git a/chains/linearizable_vm.go b/chains/linearizable_vm.go index 97fe9eb4d1f4..e7e99b77cb93 100644 --- a/chains/linearizable_vm.go +++ b/chains/linearizable_vm.go @@ -6,7 +6,6 @@ package chains import ( "context" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -29,7 +28,6 @@ type initializeOnLinearizeVM struct { vmToInitialize common.VM vmToLinearize *linearizeOnInitializeVM - registerer metrics.OptionalGatherer ctx *snow.Context db database.Database genesisBytes []byte @@ -42,7 +40,6 @@ type initializeOnLinearizeVM struct { func (vm *initializeOnLinearizeVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { vm.vmToLinearize.stopVertexID = stopVertexID - vm.ctx.Metrics = vm.registerer return vm.vmToInitialize.Initialize( ctx, vm.ctx, diff --git a/chains/manager.go b/chains/manager.go index 8d8ce2a8f150..bdc6d0ef0180 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -6,7 +6,6 @@ package chains import ( "context" "crypto" - "crypto/tls" "errors" "fmt" "os" @@ -14,7 +13,6 @@ import ( "sync" "time" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/health" @@ -28,12 +26,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap/queue" "github.com/ava-labs/avalanchego/snow/engine/avalanche/state" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/syncer" @@ -63,6 +61,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/vms/tracedvm" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" smcon "github.com/ava-labs/avalanchego/snow/consensus/snowman" aveng "github.com/ava-labs/avalanchego/snow/engine/avalanche" avbootstrap "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap" @@ -74,8 +73,19 @@ import ( ) const ( + ChainLabel = "chain" + defaultChannelSize = 1 initialQueueSize = 3 + + avalancheNamespace = constants.PlatformName + metric.NamespaceSeparator + "avalanche" + handlerNamespace = constants.PlatformName + metric.NamespaceSeparator + "handler" + meterchainvmNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterchainvm" + meterdagvmNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterdagvm" + proposervmNamespace = constants.PlatformName + metric.NamespaceSeparator + "proposervm" + p2pNamespace = constants.PlatformName + metric.NamespaceSeparator + "p2p" + snowmanNamespace = constants.PlatformName + metric.NamespaceSeparator + "snowman" + stakeNamespace = constants.PlatformName + metric.NamespaceSeparator + "stake" ) var ( @@ -86,10 +96,10 @@ var ( VertexDBPrefix = []byte("vertex") VertexBootstrappingDBPrefix = []byte("vertex_bs") TxBootstrappingDBPrefix = []byte("tx_bs") - BlockBootstrappingDBPrefix = []byte("block_bs") + BlockBootstrappingDBPrefix = []byte("interval_block_bs") // Bootstrapping prefixes for ChainVMs - ChainBootstrappingDBPrefix = []byte("bs") + ChainBootstrappingDBPrefix = []byte("interval_bs") errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") @@ -173,7 +183,8 @@ type ChainConfig struct { type ManagerConfig struct { SybilProtectionEnabled bool - StakingTLSCert tls.Certificate // needed to sign snowman++ blocks + StakingTLSSigner crypto.Signer + StakingTLSCert *staking.Certificate StakingBLSKey *bls.SecretKey TracingEnabled bool // Must not be used unless [TracingEnabled] is true as this may be nil. @@ -206,7 +217,9 @@ type ManagerConfig struct { // ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node ShutdownNodeFunc func(exitCode int) MeterVMEnabled bool // Should each VM be wrapped with a MeterVM - Metrics metrics.MultiGatherer + + Metrics metrics.MultiGatherer + MeterDBMetrics metrics.MultiGatherer FrontierPollFrequency time.Duration ConsensusAppConcurrency int @@ -239,9 +252,6 @@ type manager struct { ids.Aliaser ManagerConfig - stakingSigner crypto.Signer - stakingCert *staking.Certificate - // Those notified when a chain is created registrants []Registrant @@ -261,20 +271,78 @@ type manager struct { // snowman++ related interface to allow validators retrieval validatorState validators.State + + avalancheGatherer metrics.MultiGatherer // chainID + handlerGatherer metrics.MultiGatherer // chainID + meterChainVMGatherer metrics.MultiGatherer // chainID + meterDAGVMGatherer metrics.MultiGatherer // chainID + proposervmGatherer metrics.MultiGatherer // chainID + p2pGatherer metrics.MultiGatherer // chainID + snowmanGatherer metrics.MultiGatherer // chainID + stakeGatherer metrics.MultiGatherer // chainID + vmGatherer map[ids.ID]metrics.MultiGatherer // vmID -> chainID } // New returns a new Manager -func New(config *ManagerConfig) Manager { +func New(config *ManagerConfig) (Manager, error) { + avalancheGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(avalancheNamespace, avalancheGatherer); err != nil { + return nil, err + } + + handlerGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(handlerNamespace, handlerGatherer); err != nil { + return nil, err + } + + meterChainVMGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(meterchainvmNamespace, meterChainVMGatherer); err != nil { + return nil, err + } + + meterDAGVMGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(meterdagvmNamespace, meterDAGVMGatherer); err != nil { + return nil, err + } + + proposervmGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(proposervmNamespace, proposervmGatherer); err != nil { + return nil, err + } + + p2pGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(p2pNamespace, p2pGatherer); err != nil { + return nil, err + } + + snowmanGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(snowmanNamespace, snowmanGatherer); err != nil { + return nil, err + } + + stakeGatherer := metrics.NewLabelGatherer(ChainLabel) + if err := config.Metrics.Register(stakeNamespace, stakeGatherer); err != nil { + return nil, err + } + return &manager{ Aliaser: ids.NewAliaser(), ManagerConfig: *config, - stakingSigner: config.StakingTLSCert.PrivateKey.(crypto.Signer), - stakingCert: staking.CertificateFromX509(config.StakingTLSCert.Leaf), chains: make(map[ids.ID]handler.Handler), chainsQueue: buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize), unblockChainCreatorCh: make(chan struct{}), chainCreatorShutdownCh: make(chan struct{}), - } + + avalancheGatherer: avalancheGatherer, + handlerGatherer: handlerGatherer, + meterChainVMGatherer: meterChainVMGatherer, + meterDAGVMGatherer: meterDAGVMGatherer, + proposervmGatherer: proposervmGatherer, + p2pGatherer: p2pGatherer, + snowmanGatherer: snowmanGatherer, + stakeGatherer: stakeGatherer, + vmGatherer: make(map[ids.ID]metrics.MultiGatherer), + }, nil } // QueueChainCreation queues a chain creation request @@ -423,25 +491,17 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c return nil, fmt.Errorf("error while creating chain's log %w", err) } - consensusMetrics := prometheus.NewRegistry() - chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) - if err := m.Metrics.Register(chainNamespace, consensusMetrics); err != nil { - return nil, fmt.Errorf("error while registering chain's metrics %w", err) - } - - // This converts the prefix for all the Avalanche consensus metrics from - // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that - // there are no conflicts when registering the Snowman consensus metrics. - avalancheConsensusMetrics := prometheus.NewRegistry() - avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") - if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { - return nil, fmt.Errorf("error while registering DAG metrics %w", err) + snowmanMetrics, err := metrics.MakeAndRegister( + m.snowmanGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } - vmMetrics := metrics.NewOptionalGatherer() - vmNamespace := metric.AppendNamespace(chainNamespace, "vm") - if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { - return nil, fmt.Errorf("error while registering vm's metrics %w", err) + vmMetrics, err := m.getOrMakeVMRegisterer(chainParams.VMID, primaryAlias) + if err != nil { + return nil, err } ctx := &snow.ConsensusContext{ @@ -467,11 +527,11 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c ValidatorState: m.validatorState, ChainDataDir: chainDataDir, }, - BlockAcceptor: m.BlockAcceptorGroup, - TxAcceptor: m.TxAcceptorGroup, - VertexAcceptor: m.VertexAcceptorGroup, - Registerer: consensusMetrics, - AvalancheRegisterer: avalancheConsensusMetrics, + PrimaryAlias: primaryAlias, + Registerer: snowmanMetrics, + BlockAcceptor: m.BlockAcceptorGroup, + TxAcceptor: m.TxAcceptorGroup, + VertexAcceptor: m.VertexAcceptorGroup, } // Get a factory for the vm we want to use on our chain @@ -561,14 +621,24 @@ func (m *manager) createAvalancheChain( defer ctx.Lock.Unlock() ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + Type: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, State: snow.Initializing, }) - meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) + primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + meterDBReg, err := metrics.MakeAndRegister( + m.MeterDBMetrics, + primaryAlias, + ) + if err != nil { + return nil, err + } + + meterDB, err := meterdb.New(meterDBReg, m.DB) if err != nil { return nil, err } + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) vmDB := prefixdb.New(VMDBPrefix, prefixDB) vertexDB := prefixdb.New(VertexDBPrefix, prefixDB) @@ -576,15 +646,19 @@ func (m *manager) createAvalancheChain( txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) - vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) + avalancheMetrics, err := metrics.MakeAndRegister( + m.avalancheGatherer, + primaryAlias, + ) if err != nil { return nil, err } - txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.AvalancheRegisterer) + + vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", avalancheMetrics) if err != nil { return nil, err } - blockBlocker, err := queue.NewWithMissing(blockBootstrappingDB, "block", ctx.Registerer) + txBlocker, err := queue.New(txBootstrappingDB, "tx", avalancheMetrics) if err != nil { return nil, err } @@ -596,8 +670,9 @@ func (m *manager) createAvalancheChain( m.Net, m.ManagerConfig.Router, m.TimeoutManager, - p2p.EngineType_ENGINE_TYPE_AVALANCHE, + p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, sb, + avalancheMetrics, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) @@ -607,16 +682,6 @@ func (m *manager) createAvalancheChain( avalancheMessageSender = sender.Trace(avalancheMessageSender, m.Tracer) } - err = m.VertexAcceptorGroup.RegisterAcceptor( - ctx.ChainID, - "gossip", - avalancheMessageSender, - false, - ) - if err != nil { // Set up the event dispatcher - return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) - } - // Passes messages from the snowman engines to the network snowmanMessageSender, err := sender.New( ctx, @@ -624,8 +689,9 @@ func (m *manager) createAvalancheChain( m.Net, m.ManagerConfig.Router, m.TimeoutManager, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, sb, + ctx.Registerer, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) @@ -635,16 +701,6 @@ func (m *manager) createAvalancheChain( snowmanMessageSender = sender.Trace(snowmanMessageSender, m.Tracer) } - err = m.BlockAcceptorGroup.RegisterAcceptor( - ctx.ChainID, - "gossip", - snowmanMessageSender, - false, - ) - if err != nil { // Set up the event dispatcher - return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) - } - chainConfig, err := m.getChainConfig(ctx.ChainID) if err != nil { return nil, fmt.Errorf("error while fetching chain config: %w", err) @@ -652,7 +708,15 @@ func (m *manager) createAvalancheChain( dagVM := vm if m.MeterVMEnabled { - dagVM = metervm.NewVertexVM(dagVM) + meterdagvmReg, err := metrics.MakeAndRegister( + m.meterDAGVMGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + dagVM = metervm.NewVertexVM(dagVM, meterdagvmReg) } if m.TracingEnabled { dagVM = tracedvm.NewVertexVM(dagVM, m.Tracer) @@ -670,22 +734,6 @@ func (m *manager) createAvalancheChain( }, ) - avalancheRegisterer := metrics.NewOptionalGatherer() - snowmanRegisterer := metrics.NewOptionalGatherer() - - registerer := metrics.NewMultiGatherer() - if err := registerer.Register("avalanche", avalancheRegisterer); err != nil { - return nil, err - } - if err := registerer.Register("", snowmanRegisterer); err != nil { - return nil, err - } - if err := ctx.Context.Metrics.Register(registerer); err != nil { - return nil, err - } - - ctx.Context.Metrics = avalancheRegisterer - // The channel through which a VM may send messages to the consensus engine // VM uses this channel to notify engine that a block is ready to be made msgChan := make(chan common.Message, defaultChannelSize) @@ -725,14 +773,20 @@ func (m *manager) createAvalancheChain( zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) - // Note: this does not use [dagVM] to ensure we use the [vm]'s height index. untracedVMWrappedInsideProposerVM := NewLinearizeOnInitializeVM(vm) var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM if m.TracingEnabled { - vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, chainAlias, m.Tracer) + vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, primaryAlias, m.Tracer) + } + + proposervmReg, err := metrics.MakeAndRegister( + m.proposervmGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } // Note: vmWrappingProposerVM is the VM that the Snowman engines should be @@ -745,13 +799,22 @@ func (m *manager) createAvalancheChain( MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, MinBlkDelay: minBlockDelay, NumHistoricalBlocks: numHistoricalBlocks, - StakingLeafSigner: m.stakingSigner, - StakingCertLeaf: m.stakingCert, + StakingLeafSigner: m.StakingTLSSigner, + StakingCertLeaf: m.StakingTLSCert, + Registerer: proposervmReg, }, ) if m.MeterVMEnabled { - vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM) + meterchainvmReg, err := metrics.MakeAndRegister( + m.meterChainVMGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM, meterchainvmReg) } if m.TracingEnabled { vmWrappingProposerVM = tracedvm.NewBlockVM(vmWrappingProposerVM, "proposervm", m.Tracer) @@ -764,7 +827,6 @@ func (m *manager) createAvalancheChain( vmToInitialize: vmWrappingProposerVM, vmToLinearize: untracedVMWrappedInsideProposerVM, - registerer: snowmanRegisterer, ctx: ctx.Context, db: vmDB, genesisBytes: genesisData, @@ -786,11 +848,46 @@ func (m *manager) createAvalancheChain( sampleK = int(bootstrapWeight) } - connectedValidators, err := tracker.NewMeteredPeers("", ctx.Registerer) + stakeReg, err := metrics.MakeAndRegister( + m.stakeGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + connectedValidators, err := tracker.NewMeteredPeers(stakeReg) + if err != nil { + return nil, fmt.Errorf("error creating peer tracker: %w", err) + } + vdrs.RegisterSetCallbackListener(ctx.SubnetID, connectedValidators) + + p2pReg, err := metrics.MakeAndRegister( + m.p2pGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + peerTracker, err := p2p.NewPeerTracker( + ctx.Log, + "peer_tracker", + p2pReg, + set.Of(ctx.NodeID), + nil, + ) if err != nil { return nil, fmt.Errorf("error creating peer tracker: %w", err) } - vdrs.RegisterCallbackListener(ctx.SubnetID, connectedValidators) + + handlerReg, err := metrics.MakeAndRegister( + m.handlerGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } // Asynchronously passes messages from the network to the consensus engine h, err := handler.New( @@ -803,6 +900,8 @@ func (m *manager) createAvalancheChain( validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector sb, connectedValidators, + peerTracker, + handlerReg, ) if err != nil { return nil, fmt.Errorf("error initializing network handler: %w", err) @@ -810,7 +909,7 @@ func (m *manager) createAvalancheChain( connectedBeacons := tracker.NewPeers() startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) - vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) + vdrs.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) snowGetHandler, err := snowgetter.New( vmWrappingProposerVM, @@ -841,13 +940,14 @@ func (m *manager) createAvalancheChain( Params: consensusParams, Consensus: snowmanConsensus, } - snowmanEngine, err := smeng.New(snowmanEngineConfig) + var snowmanEngine common.Engine + snowmanEngine, err = smeng.New(snowmanEngineConfig) if err != nil { return nil, fmt.Errorf("error initializing snowman engine: %w", err) } if m.TracingEnabled { - snowmanEngine = smeng.TraceEngine(snowmanEngine, m.Tracer) + snowmanEngine = common.TraceEngine(snowmanEngine, m.Tracer) } // create bootstrap gear @@ -860,8 +960,9 @@ func (m *manager) createAvalancheChain( Sender: snowmanMessageSender, BootstrapTracker: sb, Timer: h, + PeerTracker: peerTracker, AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - Blocked: blockBlocker, + DB: blockBootstrappingDB, VM: vmWrappingProposerVM, } var snowmanBootstrapper common.BootstrapableEngine @@ -883,7 +984,7 @@ func (m *manager) createAvalancheChain( ctx.Log, m.BootstrapMaxTimeGetAncestors, m.BootstrapAncestorsMaxContainersSent, - ctx.AvalancheRegisterer, + avalancheMetrics, ) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) @@ -899,9 +1000,9 @@ func (m *manager) createAvalancheChain( avalancheBootstrapperConfig := avbootstrap.Config{ AllGetsServer: avaGetHandler, Ctx: ctx, - Beacons: vdrs, StartupTracker: startupTracker, Sender: avalancheMessageSender, + PeerTracker: peerTracker, AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, VtxBlocked: vtxBlocker, TxBlocked: txBlocker, @@ -915,6 +1016,7 @@ func (m *manager) createAvalancheChain( avalancheBootstrapper, err := avbootstrap.New( avalancheBootstrapperConfig, snowmanBootstrapper.Start, + avalancheMetrics, ) if err != nil { return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) @@ -938,12 +1040,12 @@ func (m *manager) createAvalancheChain( }) // Register health check for this chain - if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { - return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) + if err := m.Health.RegisterHealthCheck(primaryAlias, h, ctx.SubnetID.String()); err != nil { + return nil, fmt.Errorf("couldn't add health check for chain %s: %w", primaryAlias, err) } return &chain{ - Name: chainAlias, + Name: primaryAlias, Context: ctx, VM: dagVM, Handler: h, @@ -964,23 +1066,28 @@ func (m *manager) createSnowmanChain( defer ctx.Lock.Unlock() ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Initializing, }) - meterDB, err := meterdb.New("db", ctx.Registerer, m.DB) + primaryAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + meterDBReg, err := metrics.MakeAndRegister( + m.MeterDBMetrics, + primaryAlias, + ) if err != nil { return nil, err } - prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(VMDBPrefix, prefixDB) - bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) - blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) + meterDB, err := meterdb.New(meterDBReg, m.DB) if err != nil { return nil, err } + prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) + // Passes messages from the consensus engine to the network messageSender, err := sender.New( ctx, @@ -988,8 +1095,9 @@ func (m *manager) createSnowmanChain( m.Net, m.ManagerConfig.Router, m.TimeoutManager, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, sb, + ctx.Registerer, ) if err != nil { return nil, fmt.Errorf("couldn't initialize sender: %w", err) @@ -999,16 +1107,6 @@ func (m *manager) createSnowmanChain( messageSender = sender.Trace(messageSender, m.Tracer) } - err = m.BlockAcceptorGroup.RegisterAcceptor( - ctx.ChainID, - "gossip", - messageSender, - false, - ) - if err != nil { // Set up the event dispatcher - return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) - } - var ( bootstrapFunc func() subnetConnector = validators.UnhandledSubnetConnector @@ -1078,9 +1176,16 @@ func (m *manager) createSnowmanChain( zap.Uint64("numHistoricalBlocks", numHistoricalBlocks), ) - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) if m.TracingEnabled { - vm = tracedvm.NewBlockVM(vm, chainAlias, m.Tracer) + vm = tracedvm.NewBlockVM(vm, primaryAlias, m.Tracer) + } + + proposervmReg, err := metrics.MakeAndRegister( + m.proposervmGatherer, + primaryAlias, + ) + if err != nil { + return nil, err } vm = proposervm.New( @@ -1091,13 +1196,22 @@ func (m *manager) createSnowmanChain( MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, MinBlkDelay: minBlockDelay, NumHistoricalBlocks: numHistoricalBlocks, - StakingLeafSigner: m.stakingSigner, - StakingCertLeaf: m.stakingCert, + StakingLeafSigner: m.StakingTLSSigner, + StakingCertLeaf: m.StakingTLSCert, + Registerer: proposervmReg, }, ) if m.MeterVMEnabled { - vm = metervm.NewBlockVM(vm) + meterchainvmReg, err := metrics.MakeAndRegister( + m.meterChainVMGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + vm = metervm.NewBlockVM(vm, meterchainvmReg) } if m.TracingEnabled { vm = tracedvm.NewBlockVM(vm, "proposervm", m.Tracer) @@ -1132,11 +1246,46 @@ func (m *manager) createSnowmanChain( sampleK = int(bootstrapWeight) } - connectedValidators, err := tracker.NewMeteredPeers("", ctx.Registerer) + stakeReg, err := metrics.MakeAndRegister( + m.stakeGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + connectedValidators, err := tracker.NewMeteredPeers(stakeReg) + if err != nil { + return nil, fmt.Errorf("error creating peer tracker: %w", err) + } + vdrs.RegisterSetCallbackListener(ctx.SubnetID, connectedValidators) + + p2pReg, err := metrics.MakeAndRegister( + m.p2pGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } + + peerTracker, err := p2p.NewPeerTracker( + ctx.Log, + "peer_tracker", + p2pReg, + set.Of(ctx.NodeID), + nil, + ) if err != nil { return nil, fmt.Errorf("error creating peer tracker: %w", err) } - vdrs.RegisterCallbackListener(ctx.SubnetID, connectedValidators) + + handlerReg, err := metrics.MakeAndRegister( + m.handlerGatherer, + primaryAlias, + ) + if err != nil { + return nil, err + } // Asynchronously passes messages from the network to the consensus engine h, err := handler.New( @@ -1149,6 +1298,8 @@ func (m *manager) createSnowmanChain( subnetConnector, sb, connectedValidators, + peerTracker, + handlerReg, ) if err != nil { return nil, fmt.Errorf("couldn't initialize message handler: %w", err) @@ -1156,7 +1307,7 @@ func (m *manager) createSnowmanChain( connectedBeacons := tracker.NewPeers() startupTracker := tracker.NewStartup(connectedBeacons, (3*bootstrapWeight+3)/4) - beacons.RegisterCallbackListener(ctx.SubnetID, startupTracker) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) snowGetHandler, err := snowgetter.New( vm, @@ -1188,13 +1339,14 @@ func (m *manager) createSnowmanChain( Consensus: consensus, PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, } - engine, err := smeng.New(engineConfig) + var engine common.Engine + engine, err = smeng.New(engineConfig) if err != nil { return nil, fmt.Errorf("error initializing snowman engine: %w", err) } if m.TracingEnabled { - engine = smeng.TraceEngine(engine, m.Tracer) + engine = common.TraceEngine(engine, m.Tracer) } // create bootstrap gear @@ -1207,8 +1359,9 @@ func (m *manager) createSnowmanChain( Sender: messageSender, BootstrapTracker: sb, Timer: h, + PeerTracker: peerTracker, AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, - Blocked: blocked, + DB: bootstrappingDB, VM: vm, Bootstrapped: bootstrapFunc, } @@ -1259,12 +1412,12 @@ func (m *manager) createSnowmanChain( }) // Register health checks - if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { - return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) + if err := m.Health.RegisterHealthCheck(primaryAlias, h, ctx.SubnetID.String()); err != nil { + return nil, fmt.Errorf("couldn't add health check for chain %s: %w", primaryAlias, err) } return &chain{ - Name: chainAlias, + Name: primaryAlias, Context: ctx, VM: vm, Handler: h, @@ -1405,3 +1558,27 @@ func (m *manager) getChainConfig(id ids.ID) (ChainConfig, error) { return ChainConfig{}, nil } + +func (m *manager) getOrMakeVMRegisterer(vmID ids.ID, chainAlias string) (metrics.MultiGatherer, error) { + vmGatherer, ok := m.vmGatherer[vmID] + if !ok { + vmName := constants.VMName(vmID) + vmNamespace := metric.AppendNamespace(constants.PlatformName, vmName) + vmGatherer = metrics.NewLabelGatherer(ChainLabel) + err := m.Metrics.Register( + vmNamespace, + vmGatherer, + ) + if err != nil { + return nil, err + } + m.vmGatherer[vmID] = vmGatherer + } + + chainReg := metrics.NewPrefixGatherer() + err := vmGatherer.Register( + chainAlias, + chainReg, + ) + return chainReg, err +} diff --git a/chains/subnets_test.go b/chains/subnets_test.go index 231a8f970a15..b11ad354099e 100644 --- a/chains/subnets_test.go +++ b/chains/subnets_test.go @@ -117,16 +117,12 @@ func TestSubnetConfigs(t *testing.T) { config: map[ids.ID]subnets.Config{ constants.PrimaryNetworkID: {}, testSubnetID: { - GossipConfig: subnets.GossipConfig{ - AcceptedFrontierValidatorSize: 123456789, - }, + ValidatorOnly: true, }, }, subnetID: testSubnetID, want: subnets.Config{ - GossipConfig: subnets.GossipConfig{ - AcceptedFrontierValidatorSize: 123456789, - }, + ValidatorOnly: true, }, }, } diff --git a/codec/codec.go b/codec/codec.go index 7aacb9085848..6ee799667182 100644 --- a/codec/codec.go +++ b/codec/codec.go @@ -15,6 +15,8 @@ var ( ErrDoesNotImplementInterface = errors.New("does not implement interface") ErrUnexportedField = errors.New("unexported field") ErrExtraSpace = errors.New("trailing buffer space") + ErrMarshalZeroLength = errors.New("can't marshal zero length value") + ErrUnmarshalZeroLength = errors.New("can't unmarshal zero length value") ) // Codec marshals and unmarshals diff --git a/codec/hierarchycodec/codec.go b/codec/hierarchycodec/codec.go index db2ffed0425d..b317a5a4006c 100644 --- a/codec/hierarchycodec/codec.go +++ b/codec/hierarchycodec/codec.go @@ -7,7 +7,6 @@ import ( "fmt" "reflect" "sync" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" @@ -15,11 +14,6 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -const ( - // default max length of a slice being marshalled by Marshal(). Should be <= math.MaxUint32. - defaultMaxSliceLength = 256 * 1024 -) - var ( _ Codec = (*hierarchyCodec)(nil) _ codec.Codec = (*hierarchyCodec)(nil) @@ -51,19 +45,19 @@ type hierarchyCodec struct { } // New returns a new, concurrency-safe codec -func New(durangoTime time.Time, tagNames []string, maxSliceLen uint32) Codec { +func New(tagNames []string) Codec { hCodec := &hierarchyCodec{ currentGroupID: 0, nextTypeID: 0, registeredTypes: bimap.New[typeID, reflect.Type](), } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames) return hCodec } // NewDefault returns a new codec with reasonable default values -func NewDefault(durangoTime time.Time) Codec { - return New(durangoTime, []string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) +func NewDefault() Codec { + return New([]string{reflectcodec.DefaultTagName}) } // SkipRegistrations some number of type IDs diff --git a/codec/hierarchycodec/codec_test.go b/codec/hierarchycodec/codec_test.go index 8149cdcc65e2..a5dd8b5fd546 100644 --- a/codec/hierarchycodec/codec_test.go +++ b/codec/hierarchycodec/codec_test.go @@ -5,41 +5,25 @@ package hierarchycodec import ( "testing" - "time" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectors(t *testing.T) { for _, test := range codec.Tests { - c := NewDefault(mockable.MaxTime) + c := NewDefault() test(c, t) } } func TestMultipleTags(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := New(mockable.MaxTime, []string{"tag1", "tag2"}, defaultMaxSliceLength) - test(c, t) - } -} - -func TestEnforceSliceLen(t *testing.T) { - for _, test := range codec.EnforceSliceLenTests { - c := NewDefault(mockable.MaxTime) - test(c, t) - } -} - -func TestIgnoreSliceLen(t *testing.T) { - for _, test := range codec.IgnoreSliceLenTests { - c := NewDefault(time.Time{}) + c := New([]string{"tag1", "tag2"}) test(c, t) } } func FuzzStructUnmarshalHierarchyCodec(f *testing.F) { - c := NewDefault(mockable.MaxTime) + c := NewDefault() codec.FuzzStructUnmarshal(c, f) } diff --git a/codec/linearcodec/codec.go b/codec/linearcodec/codec.go index 6ad36b8a197d..f690d1354fca 100644 --- a/codec/linearcodec/codec.go +++ b/codec/linearcodec/codec.go @@ -7,7 +7,6 @@ import ( "fmt" "reflect" "sync" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/reflectcodec" @@ -15,11 +14,6 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -const ( - // default max length of a slice being marshalled by Marshal(). Should be <= math.MaxUint32. - DefaultMaxSliceLength = 256 * 1024 -) - var ( _ Codec = (*linearCodec)(nil) _ codec.Codec = (*linearCodec)(nil) @@ -43,25 +37,20 @@ type linearCodec struct { registeredTypes *bimap.BiMap[uint32, reflect.Type] } -// New returns a new, concurrency-safe codec; it allow to specify -// both tagNames and maxSlicelenght -func New(durangoTime time.Time, tagNames []string, maxSliceLen uint32) Codec { +// New returns a new, concurrency-safe codec; it allow to specify tagNames. +func New(tagNames []string) Codec { hCodec := &linearCodec{ nextTypeID: 0, registeredTypes: bimap.New[uint32, reflect.Type](), } - hCodec.Codec = reflectcodec.New(hCodec, tagNames, durangoTime, maxSliceLen) + hCodec.Codec = reflectcodec.New(hCodec, tagNames) return hCodec } -// NewDefault is a convenience constructor; it returns a new codec with reasonable default values -func NewDefault(durangoTime time.Time) Codec { - return New(durangoTime, []string{reflectcodec.DefaultTagName}, DefaultMaxSliceLength) -} - -// NewCustomMaxLength is a convenience constructor; it returns a new codec with custom max length and default tags -func NewCustomMaxLength(durangoTime time.Time, maxSliceLen uint32) Codec { - return New(durangoTime, []string{reflectcodec.DefaultTagName}, maxSliceLen) +// NewDefault is a convenience constructor; it returns a new codec with default +// tagNames. +func NewDefault() Codec { + return New([]string{reflectcodec.DefaultTagName}) } // Skip some number of type IDs diff --git a/codec/linearcodec/codec_test.go b/codec/linearcodec/codec_test.go index 3d2f3efff68a..20b886f9cb62 100644 --- a/codec/linearcodec/codec_test.go +++ b/codec/linearcodec/codec_test.go @@ -5,41 +5,25 @@ package linearcodec import ( "testing" - "time" "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestVectors(t *testing.T) { for _, test := range codec.Tests { - c := NewDefault(mockable.MaxTime) + c := NewDefault() test(c, t) } } func TestMultipleTags(t *testing.T) { for _, test := range codec.MultipleTagsTests { - c := New(mockable.MaxTime, []string{"tag1", "tag2"}, DefaultMaxSliceLength) - test(c, t) - } -} - -func TestEnforceSliceLen(t *testing.T) { - for _, test := range codec.EnforceSliceLenTests { - c := NewDefault(mockable.MaxTime) - test(c, t) - } -} - -func TestIgnoreSliceLen(t *testing.T) { - for _, test := range codec.IgnoreSliceLenTests { - c := NewDefault(time.Time{}) + c := New([]string{"tag1", "tag2"}) test(c, t) } } func FuzzStructUnmarshalLinearCodec(f *testing.F) { - c := NewDefault(mockable.MaxTime) + c := NewDefault() codec.FuzzStructUnmarshal(c, f) } diff --git a/codec/manager.go b/codec/manager.go index 6fb48aaad9f8..608bbcabc0fe 100644 --- a/codec/manager.go +++ b/codec/manager.go @@ -13,6 +13,8 @@ import ( ) const ( + VersionSize = wrappers.ShortLen + // default max size, in bytes, of something being marshalled by Marshal() defaultMaxSize = 256 * units.KiB @@ -102,8 +104,8 @@ func (m *manager) Size(version uint16, value interface{}) (int, error) { res, err := c.Size(value) - // Add [wrappers.ShortLen] for the codec version - return wrappers.ShortLen + res, err + // Add [VersionSize] for the codec version + return VersionSize + res, err } // To marshal an interface, [value] must be a pointer to the interface. diff --git a/codec/reflectcodec/type_codec.go b/codec/reflectcodec/type_codec.go index 7f567d0bb7fa..c5ce970a8054 100644 --- a/codec/reflectcodec/type_codec.go +++ b/codec/reflectcodec/type_codec.go @@ -10,7 +10,6 @@ import ( "math" "reflect" "slices" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/utils/set" @@ -26,8 +25,6 @@ const ( var ( _ codec.Codec = (*genericCodec)(nil) - errMarshalNil = errors.New("can't marshal nil pointer or interface") - errUnmarshalNil = errors.New("can't unmarshal nil") errNeedPointer = errors.New("argument to unmarshal must be a pointer") errRecursiveInterfaceTypes = errors.New("recursive interface types") ) @@ -71,25 +68,21 @@ type TypeCodec interface { // 6. Serialized fields must be exported // 7. nil slices are marshaled as empty slices type genericCodec struct { - typer TypeCodec - durangoTime time.Time // Time after which [maxSliceLen] will be ignored - maxSliceLen uint32 - fielder StructFielder + typer TypeCodec + fielder StructFielder } // New returns a new, concurrency-safe codec -func New(typer TypeCodec, tagNames []string, durangoTime time.Time, maxSliceLen uint32) codec.Codec { +func New(typer TypeCodec, tagNames []string) codec.Codec { return &genericCodec{ - typer: typer, - durangoTime: durangoTime, - maxSliceLen: maxSliceLen, - fielder: NewStructFielder(tagNames), + typer: typer, + fielder: NewStructFielder(tagNames), } } func (c *genericCodec) Size(value interface{}) (int, error) { if value == nil { - return 0, errMarshalNil // can't marshal nil + return 0, codec.ErrMarshalNil } size, _, err := c.size(reflect.ValueOf(value), nil /*=typeStack*/) @@ -125,14 +118,14 @@ func (c *genericCodec) size( return wrappers.StringLen(value.String()), false, nil case reflect.Ptr: if value.IsNil() { - return 0, false, errMarshalNil + return 0, false, codec.ErrMarshalNil } return c.size(value.Elem(), typeStack) case reflect.Interface: if value.IsNil() { - return 0, false, errMarshalNil + return 0, false, codec.ErrMarshalNil } underlyingValue := value.Interface() @@ -159,6 +152,10 @@ func (c *genericCodec) size( return 0, false, err } + if size == 0 { + return 0, false, fmt.Errorf("can't marshal slice of zero length values: %w", codec.ErrMarshalZeroLength) + } + // For fixed-size types we manually calculate lengths rather than // processing each element separately to improve performance. if constSize { @@ -235,6 +232,10 @@ func (c *genericCodec) size( return 0, false, err } + if keySize == 0 && valueSize == 0 { + return 0, false, fmt.Errorf("can't marshal map with zero length entries: %w", codec.ErrMarshalZeroLength) + } + switch { case keyConstSize && valueConstSize: numElts := value.Len() @@ -291,7 +292,7 @@ func (c *genericCodec) size( // To marshal an interface, [value] must be a pointer to the interface func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error { if value == nil { - return errMarshalNil // can't marshal nil + return codec.ErrMarshalNil } return c.marshal(reflect.ValueOf(value), p, nil /*=typeStack*/) @@ -338,13 +339,13 @@ func (c *genericCodec) marshal( return p.Err case reflect.Ptr: if value.IsNil() { - return errMarshalNil + return codec.ErrMarshalNil } return c.marshal(value.Elem(), p, typeStack) case reflect.Interface: if value.IsNil() { - return errMarshalNil + return codec.ErrMarshalNil } underlyingValue := value.Interface() @@ -370,13 +371,6 @@ func (c *genericCodec) marshal( math.MaxInt32, ) } - if time.Now().Before(c.durangoTime) && uint32(numElts) > c.maxSliceLen { - return fmt.Errorf("%w; slice length, %d, exceeds maximum length, %d", - codec.ErrMaxSliceLenExceeded, - numElts, - c.maxSliceLen, - ) - } p.PackInt(uint32(numElts)) // pack # elements if p.Err != nil { return p.Err @@ -394,9 +388,13 @@ func (c *genericCodec) marshal( return p.Err } for i := 0; i < numElts; i++ { // Process each element in the slice + startOffset := p.Offset if err := c.marshal(value.Index(i), p, typeStack); err != nil { return err } + if startOffset == p.Offset { + return fmt.Errorf("couldn't marshal slice of zero length values: %w", codec.ErrMarshalZeroLength) + } } return nil case reflect.Array: @@ -433,13 +431,6 @@ func (c *genericCodec) marshal( math.MaxInt32, ) } - if time.Now().Before(c.durangoTime) && uint32(numElts) > c.maxSliceLen { - return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", - codec.ErrMaxSliceLenExceeded, - numElts, - c.maxSliceLen, - ) - } p.PackInt(uint32(numElts)) // pack # elements if p.Err != nil { return p.Err @@ -479,6 +470,8 @@ func (c *genericCodec) marshal( allKeyBytes := slices.Clone(p.Bytes[startOffset:p.Offset]) p.Offset = startOffset for _, key := range sortedKeys { + keyStartOffset := p.Offset + // pack key startIndex := key.startIndex - startOffset endIndex := key.endIndex - startOffset @@ -492,6 +485,9 @@ func (c *genericCodec) marshal( if err := c.marshal(value.MapIndex(key.key), p, typeStack); err != nil { return err } + if keyStartOffset == p.Offset { + return fmt.Errorf("couldn't marshal map with zero length entries: %w", codec.ErrMarshalZeroLength) + } } return nil @@ -504,7 +500,7 @@ func (c *genericCodec) marshal( // interface func (c *genericCodec) Unmarshal(bytes []byte, dest interface{}) error { if dest == nil { - return errUnmarshalNil + return codec.ErrUnmarshalNil } p := wrappers.Packer{ @@ -602,13 +598,6 @@ func (c *genericCodec) unmarshal( math.MaxInt32, ) } - if time.Now().Before(c.durangoTime) && numElts32 > c.maxSliceLen { - return fmt.Errorf("%w; array length, %d, exceeds maximum length, %d", - codec.ErrMaxSliceLenExceeded, - numElts32, - c.maxSliceLen, - ) - } numElts := int(numElts32) sliceType := value.Type() @@ -625,9 +614,14 @@ func (c *genericCodec) unmarshal( zeroValue := reflect.Zero(innerType) for i := 0; i < numElts; i++ { value.Set(reflect.Append(value, zeroValue)) + + startOffset := p.Offset if err := c.unmarshal(p, value.Index(i), typeStack); err != nil { return err } + if startOffset == p.Offset { + return fmt.Errorf("couldn't unmarshal slice of zero length values: %w", codec.ErrUnmarshalZeroLength) + } } return nil case reflect.Array: @@ -710,13 +704,6 @@ func (c *genericCodec) unmarshal( math.MaxInt32, ) } - if time.Now().Before(c.durangoTime) && numElts32 > c.maxSliceLen { - return fmt.Errorf("%w; map length, %d, exceeds maximum length, %d", - codec.ErrMaxSliceLenExceeded, - numElts32, - c.maxSliceLen, - ) - } var ( numElts = int(numElts32) @@ -755,6 +742,9 @@ func (c *genericCodec) unmarshal( if err := c.unmarshal(p, mapValue, typeStack); err != nil { return err } + if keyStartOffset == p.Offset { + return fmt.Errorf("couldn't unmarshal map with zero length entries: %w", codec.ErrUnmarshalZeroLength) + } // Assign the key-value pair in the map value.SetMapIndex(mapKey, mapValue) diff --git a/codec/test_codec.go b/codec/test_codec.go index d58e2d818f9e..2dc8b3e2add9 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -8,8 +8,6 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -36,7 +34,9 @@ var ( TestNilSliceSerialization, TestEmptySliceSerialization, TestSliceWithEmptySerialization, - TestSliceWithEmptySerializationOutOfMemory, + TestSliceWithEmptySerializationError, + TestMapWithEmptySerialization, + TestMapWithEmptySerializationError, TestSliceTooLarge, TestNegativeNumbers, TestTooLargeUnmarshal, @@ -44,20 +44,12 @@ var ( TestExtraSpace, TestSliceLengthOverflow, TestMap, + TestCanMarshalLargeSlices, } MultipleTagsTests = []func(c GeneralCodec, t testing.TB){ TestMultipleTags, } - - EnforceSliceLenTests = []func(c GeneralCodec, t testing.TB){ - TestCanNotMarshalLargeSlices, - TestCanNotUnmarshalLargeSlices, - } - - IgnoreSliceLenTests = []func(c GeneralCodec, t testing.TB){ - TestCanMarshalLargeSlices, - } ) // The below structs and interfaces exist @@ -731,7 +723,7 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { require.Equal(val, valUnmarshaled) } -// Test marshaling slice that is not nil and not empty +// Test marshaling empty slice of zero length structs func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { require := require.New(t) @@ -745,9 +737,9 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ - Arr: make([]emptyStruct, 1000), + Arr: make([]emptyStruct, 0), } - expected := []byte{0x00, 0x00, 0x00, 0x00, 0x03, 0xE8} // codec version (0x00, 0x00) then 1000 for numElts + expected := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00} // codec version (0x00, 0x00) then (0x00, 0x00, 0x00, 0x00) for numElts result, err := manager.Marshal(0, val) require.NoError(err) require.Equal(expected, result) @@ -760,10 +752,10 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { version, err := manager.Unmarshal(expected, &unmarshaled) require.NoError(err) require.Zero(version) - require.Len(unmarshaled.Arr, 1000) + require.Empty(unmarshaled.Arr) } -func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB) { +func TestSliceWithEmptySerializationError(codec GeneralCodec, t testing.TB) { require := require.New(t) type emptyStruct struct{} @@ -776,14 +768,69 @@ func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ - Arr: make([]emptyStruct, math.MaxInt32), + Arr: make([]emptyStruct, 1), } _, err := manager.Marshal(0, val) - require.ErrorIs(err, ErrMaxSliceLenExceeded) + require.ErrorIs(err, ErrMarshalZeroLength) + + _, err = manager.Size(0, val) + require.ErrorIs(err, ErrMarshalZeroLength) + + b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01} // codec version (0x00, 0x00) then (0x00, 0x00, 0x00, 0x01) for numElts + + unmarshaled := nestedSliceStruct{} + _, err = manager.Unmarshal(b, &unmarshaled) + require.ErrorIs(err, ErrUnmarshalZeroLength) +} + +// Test marshaling empty map of zero length structs +func TestMapWithEmptySerialization(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + type emptyStruct struct{} + + manager := NewDefaultManager() + require.NoError(manager.RegisterCodec(0, codec)) + + val := make(map[emptyStruct]emptyStruct) + expected := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00} // codec version (0x00, 0x00) then (0x00, 0x00, 0x00, 0x00) for numElts + result, err := manager.Marshal(0, val) + require.NoError(err) + require.Equal(expected, result) bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(6, bytesLen) // 2 byte codec version + 4 byte length prefix + require.Len(result, bytesLen) + + var unmarshaled map[emptyStruct]emptyStruct + version, err := manager.Unmarshal(expected, &unmarshaled) + require.NoError(err) + require.Zero(version) + require.Empty(unmarshaled) +} + +func TestMapWithEmptySerializationError(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + type emptyStruct struct{} + + manager := NewDefaultManager() + require.NoError(manager.RegisterCodec(0, codec)) + + val := map[emptyStruct]emptyStruct{ + {}: {}, + } + _, err := manager.Marshal(0, val) + require.ErrorIs(err, ErrMarshalZeroLength) + + _, err = manager.Size(0, val) + require.ErrorIs(err, ErrMarshalZeroLength) + + b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01} // codec version (0x00, 0x00) then (0x00, 0x00, 0x00, 0x01) for numElts + + var unmarshaled map[emptyStruct]emptyStruct + _, err = manager.Unmarshal(b, &unmarshaled) + require.ErrorIs(err, ErrUnmarshalZeroLength) } func TestSliceTooLarge(codec GeneralCodec, t testing.TB) { @@ -1031,35 +1078,6 @@ func TestMap(codec GeneralCodec, t testing.TB) { require.Len(outerArrayBytes, outerArraySize) } -func TestCanNotMarshalLargeSlices(codec GeneralCodec, t testing.TB) { - require := require.New(t) - - data := make([]uint16, 1_000_000) - - manager := NewManager(math.MaxInt) - require.NoError(manager.RegisterCodec(0, codec)) - - _, err := manager.Marshal(0, data) - require.ErrorIs(err, ErrMaxSliceLenExceeded) -} - -func TestCanNotUnmarshalLargeSlices(codec GeneralCodec, t testing.TB) { - require := require.New(t) - - writer := wrappers.Packer{ - Bytes: make([]byte, 2+4+2_000_000), - } - writer.PackShort(0) - writer.PackInt(1_000_000) - - manager := NewManager(math.MaxInt) - require.NoError(manager.RegisterCodec(0, codec)) - - var data []uint16 - _, err := manager.Unmarshal(writer.Bytes, &data) - require.ErrorIs(err, ErrMaxSliceLenExceeded) -} - func TestCanMarshalLargeSlices(codec GeneralCodec, t testing.TB) { require := require.New(t) diff --git a/config/config.go b/config/config.go index a9ffa17f646b..422eceedb6e9 100644 --- a/config/config.go +++ b/config/config.go @@ -22,7 +22,6 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/ipcs" "github.com/ava-labs/avalanchego/network" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/throttling" @@ -39,7 +38,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/password" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" @@ -47,6 +45,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/proposervm" ) @@ -55,48 +54,20 @@ const ( chainUpgradeFileName = "upgrade" subnetConfigFileExt = ".json" - authDeprecationMsg = "Auth API is deprecated" - ipcDeprecationMsg = "IPC API is deprecated" - keystoreDeprecationMsg = "keystore API is deprecated" - acceptedFrontierGossipDeprecationMsg = "push-based accepted frontier gossip is deprecated" - peerListPushGossipDeprecationMsg = "push-based peer list gossip is deprecated" + keystoreDeprecationMsg = "keystore API is deprecated" ) var ( // Deprecated key --> deprecation message (i.e. which key replaces it) // TODO: deprecate "BootstrapIDsKey" and "BootstrapIPsKey" - commitThresholdDeprecationMsg = fmt.Sprintf("use --%s instead", SnowCommitThresholdKey) - deprecatedKeys = map[string]string{ - APIAuthRequiredKey: authDeprecationMsg, - APIAuthPasswordKey: authDeprecationMsg, - APIAuthPasswordFileKey: authDeprecationMsg, - - IpcAPIEnabledKey: ipcDeprecationMsg, - IpcsChainIDsKey: ipcDeprecationMsg, - IpcsPathKey: ipcDeprecationMsg, - + deprecatedKeys = map[string]string{ KeystoreAPIEnabledKey: keystoreDeprecationMsg, - - ConsensusGossipAcceptedFrontierValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, - ConsensusGossipAcceptedFrontierNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, - ConsensusGossipAcceptedFrontierPeerSizeKey: acceptedFrontierGossipDeprecationMsg, - ConsensusGossipOnAcceptValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, - ConsensusGossipOnAcceptNonValidatorSizeKey: acceptedFrontierGossipDeprecationMsg, - ConsensusGossipOnAcceptPeerSizeKey: acceptedFrontierGossipDeprecationMsg, - - NetworkPeerListValidatorGossipSizeKey: peerListPushGossipDeprecationMsg, - NetworkPeerListNonValidatorGossipSizeKey: peerListPushGossipDeprecationMsg, - NetworkPeerListPeersGossipSizeKey: peerListPushGossipDeprecationMsg, - NetworkPeerListGossipFreqKey: peerListPushGossipDeprecationMsg, - - SnowRogueCommitThresholdKey: commitThresholdDeprecationMsg, - SnowVirtuousCommitThresholdKey: commitThresholdDeprecationMsg, } errConflictingACPOpinion = errors.New("supporting and objecting to the same ACP") + errConflictingImplicitACPOpinion = errors.New("objecting to enabled ACP") errSybilProtectionDisabledStakerWeights = errors.New("sybil protection disabled weights must be positive") errSybilProtectionDisabledOnPublicNetwork = errors.New("sybil protection disabled on public network") - errAuthPasswordTooWeak = errors.New("API auth password is not strong enough") errInvalidUptimeRequirement = errors.New("uptime requirement must be in the range [0, 1]") errMinValidatorStakeAboveMax = errors.New("minimum validator stake can't be greater than maximum validator stake") errInvalidDelegationFee = errors.New("delegation fee must be in the range [0, 1,000,000]") @@ -114,7 +85,6 @@ var ( errCannotReadDirectory = errors.New("cannot read directory") errUnmarshalling = errors.New("unmarshalling failed") errFileDoesNotExist = errors.New("file does not exist") - errGzipDeprecatedMsg = errors.New("gzip compression is not supported, use zstd or no compression") ) func getConsensusConfig(v *viper.Viper) snowball.Parameters { @@ -122,8 +92,7 @@ func getConsensusConfig(v *viper.Viper) snowball.Parameters { K: v.GetInt(SnowSampleSizeKey), AlphaPreference: v.GetInt(SnowPreferenceQuorumSizeKey), AlphaConfidence: v.GetInt(SnowConfidenceQuorumSizeKey), - BetaVirtuous: v.GetInt(SnowCommitThresholdKey), - BetaRogue: v.GetInt(SnowCommitThresholdKey), + Beta: v.GetInt(SnowCommitThresholdKey), ConcurrentRepolls: v.GetInt(SnowConcurrentRepollsKey), OptimalProcessing: v.GetInt(SnowOptimalProcessingKey), MaxOutstandingItems: v.GetInt(SnowMaxProcessingKey), @@ -133,10 +102,6 @@ func getConsensusConfig(v *viper.Viper) snowball.Parameters { p.AlphaPreference = v.GetInt(SnowQuorumSizeKey) p.AlphaConfidence = p.AlphaPreference } - if v.IsSet(SnowRogueCommitThresholdKey) { - p.BetaVirtuous = v.GetInt(SnowRogueCommitThresholdKey) - p.BetaRogue = v.GetInt(SnowRogueCommitThresholdKey) - } return p } @@ -166,45 +131,6 @@ func getLoggingConfig(v *viper.Viper) (logging.Config, error) { return loggingConfig, err } -func getAPIAuthConfig(v *viper.Viper) (node.APIAuthConfig, error) { - config := node.APIAuthConfig{ - APIRequireAuthToken: v.GetBool(APIAuthRequiredKey), - } - if !config.APIRequireAuthToken { - return config, nil - } - - if v.IsSet(APIAuthPasswordKey) { - config.APIAuthPassword = v.GetString(APIAuthPasswordKey) - } else { - passwordFilePath := v.GetString(APIAuthPasswordFileKey) // picks flag value or default - passwordBytes, err := os.ReadFile(passwordFilePath) - if err != nil { - return node.APIAuthConfig{}, fmt.Errorf("API auth password file %q failed to be read: %w", passwordFilePath, err) - } - config.APIAuthPassword = strings.TrimSpace(string(passwordBytes)) - } - - if !password.SufficientlyStrong(config.APIAuthPassword, password.OK) { - return node.APIAuthConfig{}, errAuthPasswordTooWeak - } - return config, nil -} - -func getIPCConfig(v *viper.Viper) node.IPCConfig { - config := node.IPCConfig{ - IPCAPIEnabled: v.GetBool(IpcAPIEnabledKey), - IPCPath: ipcs.DefaultBaseURL, - } - if v.IsSet(IpcsChainIDsKey) { - config.IPCDefaultChainIDs = strings.Split(v.GetString(IpcsChainIDsKey), ",") - } - if v.IsSet(IpcsPathKey) { - config.IPCPath = GetExpandedArg(v, IpcsPathKey) - } - return config -} - func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { var ( httpsKey []byte @@ -241,7 +167,7 @@ func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { } } - config := node.HTTPConfig{ + return node.HTTPConfig{ HTTPConfig: server.HTTPConfig{ ReadTimeout: v.GetDuration(HTTPReadTimeoutKey), ReadHeaderTimeout: v.GetDuration(HTTPReadHeaderTimeoutKey), @@ -268,14 +194,7 @@ func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { HTTPAllowedHosts: v.GetStringSlice(HTTPAllowedHostsKey), ShutdownTimeout: v.GetDuration(HTTPShutdownTimeoutKey), ShutdownWait: v.GetDuration(HTTPShutdownWaitKey), - } - - config.APIAuthConfig, err = getAPIAuthConfig(v) - if err != nil { - return node.HTTPConfig{}, err - } - config.IPCConfig = getIPCConfig(v) - return config, nil + }, nil } func getRouterHealthConfig(v *viper.Viper, halflife time.Duration) (router.HealthConfig, error) { @@ -321,20 +240,6 @@ func getAdaptiveTimeoutConfig(v *viper.Viper) (timer.AdaptiveTimeoutConfig, erro return config, nil } -func getGossipConfig(v *viper.Viper) subnets.GossipConfig { - return subnets.GossipConfig{ - AcceptedFrontierValidatorSize: uint(v.GetUint32(ConsensusGossipAcceptedFrontierValidatorSizeKey)), - AcceptedFrontierNonValidatorSize: uint(v.GetUint32(ConsensusGossipAcceptedFrontierNonValidatorSizeKey)), - AcceptedFrontierPeerSize: uint(v.GetUint32(ConsensusGossipAcceptedFrontierPeerSizeKey)), - OnAcceptValidatorSize: uint(v.GetUint32(ConsensusGossipOnAcceptValidatorSizeKey)), - OnAcceptNonValidatorSize: uint(v.GetUint32(ConsensusGossipOnAcceptNonValidatorSizeKey)), - OnAcceptPeerSize: uint(v.GetUint32(ConsensusGossipOnAcceptPeerSizeKey)), - AppGossipValidatorSize: uint(v.GetUint32(AppGossipValidatorSizeKey)), - AppGossipNonValidatorSize: uint(v.GetUint32(AppGossipNonValidatorSizeKey)), - AppGossipPeerSize: uint(v.GetUint32(AppGossipPeerSizeKey)), - } -} - func getNetworkConfig( v *viper.Viper, networkID uint32, @@ -352,9 +257,6 @@ func getNetworkConfig( if err != nil { return network.Config{}, err } - if compressionType == compression.TypeGzip { - return network.Config{}, errGzipDeprecatedMsg - } allowPrivateIPs := !constants.ProductionNetworkIDs.Contains(networkID) if v.IsSet(NetworkAllowPrivateIPsKey) { @@ -379,6 +281,18 @@ func getNetworkConfig( if supportedACPs.Overlaps(objectedACPs) { return network.Config{}, errConflictingACPOpinion } + if constants.ScheduledACPs.Overlaps(objectedACPs) { + return network.Config{}, errConflictingImplicitACPOpinion + } + + // Because this node version has scheduled these ACPs, we should notify + // peers that we support these upgrades. + supportedACPs.Union(constants.ScheduledACPs) + + // To decrease unnecessary network traffic, peers will not be notified of + // objection or support of activated ACPs. + supportedACPs.Difference(constants.ActivatedACPs) + objectedACPs.Difference(constants.ActivatedACPs) config := network.Config{ ThrottlerConfig: network.ThrottlerConfig{ @@ -440,13 +354,9 @@ func getNetworkConfig( }, PeerListGossipConfig: network.PeerListGossipConfig{ - PeerListNumValidatorIPs: v.GetUint32(NetworkPeerListNumValidatorIPsKey), - PeerListValidatorGossipSize: v.GetUint32(NetworkPeerListValidatorGossipSizeKey), - PeerListNonValidatorGossipSize: v.GetUint32(NetworkPeerListNonValidatorGossipSizeKey), - PeerListPeersGossipSize: v.GetUint32(NetworkPeerListPeersGossipSizeKey), - PeerListGossipFreq: v.GetDuration(NetworkPeerListGossipFreqKey), - PeerListPullGossipFreq: v.GetDuration(NetworkPeerListPullGossipFreqKey), - PeerListBloomResetFreq: v.GetDuration(NetworkPeerListBloomResetFreqKey), + PeerListNumValidatorIPs: v.GetUint32(NetworkPeerListNumValidatorIPsKey), + PeerListPullGossipFreq: v.GetDuration(NetworkPeerListPullGossipFreqKey), + PeerListBloomResetFreq: v.GetDuration(NetworkPeerListBloomResetFreqKey), }, DelayConfig: network.DelayConfig{ @@ -480,8 +390,6 @@ func getNetworkConfig( return network.Config{}, fmt.Errorf("%s must be in [0,1]", NetworkHealthMaxPortionSendQueueFillKey) case config.DialerConfig.ConnectionTimeout < 0: return network.Config{}, fmt.Errorf("%q must be >= 0", NetworkOutboundConnectionTimeoutKey) - case config.PeerListGossipFreq < 0: - return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListGossipFreqKey) case config.PeerListPullGossipFreq < 0: return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListPullGossipFreqKey) case config.PeerListBloomResetFreq < 0: @@ -542,7 +450,7 @@ func getStateSyncConfig(v *viper.Viper) (node.StateSyncConfig, error) { if ip == "" { continue } - addr, err := ips.ToIPPort(ip) + addr, err := ips.ParseAddrPort(ip) if err != nil { return node.StateSyncConfig{}, fmt.Errorf("couldn't parse state sync ip %s: %w", ip, err) } @@ -599,14 +507,13 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, if ip == "" { continue } - - addr, err := ips.ToIPPort(ip) + addr, err := ips.ParseAddrPort(ip) if err != nil { return node.BootstrapConfig{}, fmt.Errorf("couldn't parse bootstrap ip %s: %w", ip, err) } config.Bootstrappers = append(config.Bootstrappers, genesis.Bootstrapper{ // ID is populated below - IP: ips.IPDesc(addr), + IP: addr, }) } @@ -617,7 +524,6 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, if id == "" { continue } - nodeID, err := ids.NodeIDFromString(id) if err != nil { return node.BootstrapConfig{}, fmt.Errorf("couldn't parse bootstrap peer id %s: %w", id, err) @@ -856,9 +762,9 @@ func getStakingConfig(v *viper.Viper, networkID uint32) (node.StakingConfig, err return config, nil } -func getTxFeeConfig(v *viper.Viper, networkID uint32) genesis.TxFeeConfig { +func getTxFeeConfig(v *viper.Viper, networkID uint32) fee.StaticConfig { if networkID != constants.MainnetID && networkID != constants.FujiID { - return genesis.TxFeeConfig{ + return fee.StaticConfig{ TxFee: v.GetUint64(TxFeeKey), CreateAssetTxFee: v.GetUint64(CreateAssetTxFeeKey), CreateSubnetTxFee: v.GetUint64(CreateSubnetTxFeeKey), @@ -1038,7 +944,7 @@ func getChainConfigs(v *viper.Viper) (map[string]chains.ChainConfig, error) { return getChainConfigsFromDir(v) } -// ReadsChainConfigs reads chain config files from static directories and returns map with contents, +// readChainConfigPath reads chain config files from static directories and returns map with contents, // if successful. func readChainConfigPath(chainConfigPath string) (map[string]chains.ChainConfig, error) { chainDirs, err := filepath.Glob(filepath.Join(chainConfigPath, "*")) @@ -1076,7 +982,7 @@ func readChainConfigPath(chainConfigPath string) (map[string]chains.ChainConfig, return chainConfigMap, nil } -// getSubnetConfigsFromFlags reads subnet configs from the correct place +// getSubnetConfigs reads subnet configs from the correct place // (flag or file) and returns a non-nil map. func getSubnetConfigs(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]subnets.Config, error) { if v.IsSet(SubnetConfigContentKey) { @@ -1178,7 +1084,6 @@ func getDefaultSubnetConfig(v *viper.Viper) subnets.Config { return subnets.Config{ ConsensusParameters: getConsensusConfig(v), ValidatorOnly: false, - GossipConfig: getGossipConfig(v), ProposerMinBlockDelay: proposervm.DefaultMinBlockDelay, ProposerNumHistoricalBlocks: proposervm.DefaultNumHistoricalBlocks, } @@ -1424,7 +1329,7 @@ func GetNodeConfig(v *viper.Viper) (node.Config, error) { nodeConfig.FdLimit = v.GetUint64(FdLimitKey) // Tx Fee - nodeConfig.TxFeeConfig = getTxFeeConfig(v, nodeConfig.NetworkID) + nodeConfig.StaticConfig = getTxFeeConfig(v, nodeConfig.NetworkID) // Genesis Data genesisStakingCfg := nodeConfig.StakingConfig.StakingConfig diff --git a/config/config.md b/config/config.md new file mode 100644 index 000000000000..8eb419024292 --- /dev/null +++ b/config/config.md @@ -0,0 +1,1410 @@ +--- +tags: [Nodes] +description: This document lists all available configuration and flags for AvalancheGo. +sidebar_label: AvalancheGo Configs + Flags +pagination_label: AvalancheGo Configs and Flags +sidebar_position: 0 +--- + +# AvalancheGo Configs and Flags + + + +You can specify the configuration of a node with the arguments below. + +## Data Directory + +#### `--data-dir` (string) + +Sets the base data directory where default sub-directories will be placed unless otherwise specified. +Defaults to `$HOME/.avalanchego`. + +## Config File + +#### `--config-file` (string) + +Path to a JSON file that specifies this node's configuration. Command line +arguments will override arguments set in the config file. This flag is ignored +if `--config-file-content` is specified. + +Example JSON config file: + +```json +{ + "log-level": "debug" +} +``` + +:::tip +[Install Script](/nodes/run/with-installer/installing-avalanchego.md) creates the +node config file at `~/.avalanchego/configs/node.json`. No default file is +created if [AvalancheGo is built from source](/nodes/run/node-manually.md), you +would need to create it manually if needed. +::: + +#### `--config-file-content` (string) + +As an alternative to `--config-file`, it allows specifying base64 encoded config +content. + +#### `--config-file-content-type` (string) + +Specifies the format of the base64 encoded config content. JSON, TOML, YAML are +among currently supported file format (see +[here](https://github.com/spf13/viper#reading-config-files) for full list). Defaults to `JSON`. + +## Avalanche Community Proposals + +#### `--acp-support` (array of integers) + +The `--acp-support` flag allows an AvalancheGo node to indicate support for a +set of [Avalanche Community Proposals](https://github.com/avalanche-foundation/ACPs). + +#### `--acp-object` (array of integers) + +The `--acp-object` flag allows an AvalancheGo node to indicate objection for a +set of [Avalanche Community Proposals](https://github.com/avalanche-foundation/ACPs). + +## APIs + +#### `--api-admin-enabled` (boolean) + +If set to `true`, this node will expose the Admin API. Defaults to `false`. +See [here](/reference/avalanchego/admin-api.md) for more information. + +#### `--api-health-enabled` (boolean) + +If set to `false`, this node will not expose the Health API. Defaults to `true`. See +[here](/reference/avalanchego/health-api.md) for more information. + +#### `--index-enabled` (boolean) + +If set to `true`, this node will enable the indexer and the Index API will be +available. Defaults to `false`. See +[here](/reference/avalanchego/index-api.md) for more information. + +#### `--api-info-enabled` (boolean) + +If set to `false`, this node will not expose the Info API. Defaults to `true`. See +[here](/reference/avalanchego/info-api.md) for more information. + +#### `--api-keystore-enabled` (boolean) + +If set to `true`, this node will expose the Keystore API. Defaults to `false`. +See [here](/reference/avalanchego/keystore-api.md) for more information. + +#### `--api-metrics-enabled` (boolean) + +If set to `false`, this node will not expose the Metrics API. Defaults to +`true`. See [here](/reference/avalanchego/metrics-api.md) for more information. + +#### `--http-shutdown-wait` (duration) + +Duration to wait after receiving SIGTERM or SIGINT before initiating shutdown. +The `/health` endpoint will return unhealthy during this duration (if the Health +API is enabled.) Defaults to `0s`. + +#### `--http-shutdown-timeout` (duration) + +Maximum duration to wait for existing connections to complete during node +shutdown. Defaults to `10s`. + +## Bootstrapping + +#### `--bootstrap-beacon-connection-timeout` (duration) + +Timeout when attempting to connect to bootstrapping beacons. Defaults to `1m`. + +#### `--bootstrap-ids` (string) + +Bootstrap IDs is a comma-separated list of validator IDs. These IDs will be used +to authenticate bootstrapping peers. An example setting of this field would be +`--bootstrap-ids="NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg,NodeID-MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ"`. +The number of given IDs here must be same with number of given +`--bootstrap-ips`. The default value depends on the network ID. + +#### `--bootstrap-ips` (string) + +Bootstrap IPs is a comma-separated list of IP:port pairs. These IP Addresses +will be used to bootstrap the current Avalanche state. An example setting of +this field would be `--bootstrap-ips="127.0.0.1:12345,1.2.3.4:5678"`. The number +of given IPs here must be same with number of given `--bootstrap-ids`. The +default value depends on the network ID. + +#### `--bootstrap-retry-enabled` (boolean) + +If set to `false`, will not retry bootstrapping if it fails. Defaults to `true`. + +#### `--bootstrap-retry-warn-frequency` (uint) + +Specifies how many times bootstrap should be retried before warning the operator. Defaults to `50`. + +#### `--bootstrap-ancestors-max-containers-sent` (uint) + +Max number of containers in an `Ancestors` message sent by this node. Defaults to `2000`. + +#### `--bootstrap-ancestors-max-containers-received` (unit) + +This node reads at most this many containers from an incoming `Ancestors` message. Defaults to `2000`. + +#### `--bootstrap-max-time-get-ancestors` (duration) + +Max Time to spend fetching a container and its ancestors when responding to a GetAncestors message. +Defaults to `50ms`. + +## State Syncing + +#### `--state-sync-ids` (string) + +State sync IDs is a comma-separated list of validator IDs. The specified +validators will be contacted to get and authenticate the starting point (state +summary) for state sync. An example setting of this field would be +`--state-sync-ids="NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg,NodeID-MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ"`. +The number of given IDs here must be same with number of given +`--state-sync-ips`. The default value is empty, which results in all validators +being sampled. + +#### `--state-sync-ips` (string) + +State sync IPs is a comma-separated list of IP:port pairs. These IP Addresses +will be contacted to get and authenticate the starting point (state summary) for +state sync. An example setting of this field would be +`--state-sync-ips="127.0.0.1:12345,1.2.3.4:5678"`. The number of given IPs here +must be the same with the number of given `--state-sync-ids`. + +## Partial Sync Primary Network + +#### `--partial-sync-primary-network` (string) + +Partial sync enables non-validators to optionally sync only the P-chain on the primary network. + +## Chain Configs + +Some blockchains allow the node operator to provide custom configurations for +individual blockchains. These custom configurations are broken down into two +categories: network upgrades and optional chain configurations. AvalancheGo +reads in these configurations from the chain configuration directory and passes +them into the VM on initialization. + +#### `--chain-config-dir` (string) + +Specifies the directory that contains chain configs, as described +[here](/nodes/configure/chain-configs/chain-config-flags.md). Defaults to `$HOME/.avalanchego/configs/chains`. +If this flag is not provided and the default directory does not exist, +AvalancheGo will not exit since custom configs are optional. However, if the +flag is set, the specified folder must exist, or AvalancheGo will exit with an +error. This flag is ignored if `--chain-config-content` is specified. + +:::note +Please replace `chain-config-dir` and `blockchainID` with their actual values. +::: + +Network upgrades are passed in from the location: +`chain-config-dir`/`blockchainID`/`upgrade.*`. +Upgrade files are typically json encoded and therefore named `upgrade.json`. +However, the format of the file is VM dependent. +After a blockchain has activated a network upgrade, the same upgrade +configuration must always be passed in to ensure that the network upgrades +activate at the correct time. + +The chain configs are passed in from the location +`chain-config-dir`/`blockchainID`/`config.*`. +Upgrade files are typically json encoded and therefore named `upgrade.json`. +However, the format of the file is VM dependent. +This configuration is used by the VM to handle optional configuration flags such +as enabling/disabling APIs, updating log level, etc. +The chain configuration is intended to provide optional configuration parameters +and the VM will use default values if nothing is passed in. + +Full reference for all configuration options for some standard chains can be +found in a separate [chain config flags](/nodes/configure/chain-configs/chain-config-flags.md) document. + +Full reference for `subnet-evm` upgrade configuration can be found in a separate +[Customize a Subnet](/build/subnet/upgrade/customize-a-subnet.md) document. + +#### `--chain-config-content` (string) + +As an alternative to `--chain-config-dir`, chains custom configurations can be +loaded altogether from command line via `--chain-config-content` flag. Content +must be base64 encoded. + +Example: + +```bash +cchainconfig="$(echo -n '{"log-level":"trace"}' | base64)" +chainconfig="$(echo -n "{\"C\":{\"Config\":\"${cchainconfig}\",\"Upgrade\":null}}" | base64)" +avalanchego --chain-config-content "${chainconfig}" +``` + +#### `--chain-aliases-file` (string) + +Path to JSON file that defines aliases for Blockchain IDs. Defaults to +`~/.avalanchego/configs/chains/aliases.json`. This flag is ignored if +`--chain-aliases-file-content` is specified. Example content: + +```json +{ + "q2aTwKuyzgs8pynF7UXBZCU7DejbZbZ6EUyHr3JQzYgwNPUPi": ["DFK"] +} +``` + +The above example aliases the Blockchain whose ID is +`"q2aTwKuyzgs8pynF7UXBZCU7DejbZbZ6EUyHr3JQzYgwNPUPi"` to `"DFK"`. Chain +aliases are added after adding primary network aliases and before any changes to +the aliases via the admin API. This means that the first alias included for a +Blockchain on a Subnet will be treated as the `"Primary Alias"` instead of the +full blockchainID. The Primary Alias is used in all metrics and logs. + +#### `--chain-aliases-file-content` (string) + +As an alternative to `--chain-aliases-file`, it allows specifying base64 encoded +aliases for Blockchains. + +#### `--chain-data-dir` (string) + +Chain specific data directory. Defaults to `$HOME/.avalanchego/chainData`. + +## Database + +##### `--db-dir` (string, file path) + +Specifies the directory to which the database is persisted. Defaults to `"$HOME/.avalanchego/db"`. + +##### `--db-type` (string) + +Specifies the type of database to use. Must be one of `leveldb`, `memdb`, or `pebbledb`. +`memdb` is an in-memory, non-persisted database. + +:::note + +`memdb` stores everything in memory. So if you have a 900 GiB LevelDB instance, then using `memdb` +you’d need 900 GiB of RAM. +`memdb` is useful for fast one-off testing, not for running an actual node (on Fuji or Mainnet). +Also note that `memdb` doesn’t persist after restart. So any time you restart the node it would +start syncing from scratch. + +::: + +### Database Config + +#### `--db-config-file` (string) + +Path to the database config file. Ignored if `--config-file-content` is specified. + +#### `--db-config-file-content` (string) + +As an alternative to `--db-config-file`, it allows specifying base64 encoded database config content. + +#### LevelDB Config + +A LevelDB config file must be JSON and may have these keys. +Any keys not given will receive the default value. + +```go +{ + // BlockCacheCapacity defines the capacity of the 'sorted table' block caching. + // Use -1 for zero. + // + // The default value is 12MiB. + "blockCacheCapacity": int + + // BlockSize is the minimum uncompressed size in bytes of each 'sorted table' + // block. + // + // The default value is 4KiB. + "blockSize": int + + // CompactionExpandLimitFactor limits compaction size after expanded. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 25. + "compactionExpandLimitFactor": int + + // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) + // that a single 'sorted table' generates. This will be multiplied by + // table size limit at grandparent level. + // + // The default value is 10. + "compactionGPOverlapsFactor": int + + // CompactionL0Trigger defines number of 'sorted table' at level-0 that will + // trigger compaction. + // + // The default value is 4. + "compactionL0Trigger": int + + // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to + // level-0. + // This will be multiplied by table size limit at compaction target level. + // + // The default value is 1. + "compactionSourceLimitFactor": int + + // CompactionTableSize limits size of 'sorted table' that compaction generates. + // The limits for each level will be calculated as: + // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel. + // + // The default value is 2MiB. + "compactionTableSize": int + + // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize. + // + // The default value is 1. + "compactionTableSizeMultiplier": float + + // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for + // CompactionTableSize. + // Use zero to skip a level. + // + // The default value is nil. + "compactionTableSizeMultiplierPerLevel": []float + + // CompactionTotalSize limits total size of 'sorted table' for each level. + // The limits for each level will be calculated as: + // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level) + // The multiplier for each level can also fine-tuned using + // CompactionTotalSizeMultiplierPerLevel. + // + // The default value is 10MiB. + "compactionTotalSize": int + + // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize. + // + // The default value is 10. + "compactionTotalSizeMultiplier": float + + // DisableSeeksCompaction allows disabling 'seeks triggered compaction'. + // The purpose of 'seeks triggered compaction' is to optimize database so + // that 'level seeks' can be minimized, however this might generate many + // small compaction which may not preferable. + // + // The default is true. + "disableSeeksCompaction": bool + + // OpenFilesCacheCapacity defines the capacity of the open files caching. + // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. + // + // The default value is 1024. + "openFilesCacheCapacity": int + + // WriteBuffer defines maximum size of a 'memdb' before flushed to + // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk + // unsorted journal. + // + // LevelDB may held up to two 'memdb' at the same time. + // + // The default value is 6MiB. + "writeBuffer": int + + // FilterBitsPerKey is the number of bits to add to the bloom filter per + // key. + // + // The default value is 10. + "filterBitsPerKey": int + + // MaxManifestFileSize is the maximum size limit of the MANIFEST-****** file. + // When the MANIFEST-****** file grows beyond this size, LevelDB will create + // a new MANIFEST file. + // + // The default value is infinity. + "maxManifestFileSize": int + + // MetricUpdateFrequency is the frequency to poll LevelDB metrics in + // nanoseconds. + // If <= 0, LevelDB metrics aren't polled. + // + // The default value is 10s. + "metricUpdateFrequency": int +} +``` + +## Genesis + +#### `--genesis-file` (string) + +Path to a JSON file containing the genesis data to use. Ignored when running +standard networks (Mainnet, Fuji Testnet), or when `--genesis-content` is +specified. If not given, uses default genesis data. + +These are the main properties in the JSON file: + +- `networkID`: A unique identifier for the blockchain, must be a number in the range [0, 2^32). +- `allocations`: The list of initial addresses, their initial balances and the unlock schedule for each. +- `startTime`: The time of the beginning of the blockchain, it must be a Unix + timestamp and it can't be a time in the future. +- `initialStakeDuration`: The stake duration, in seconds, of the validators that exist at network genesis. +- `initialStakeDurationOffset`: The offset, in seconds, between the start times + of the validators that exist at genesis. +- `initialStakedFunds`: A list of addresses that own the funds staked at genesis + (each address must be present in `allocations` as well) +- `initialStakers`: The validators that exist at genesis. Each element contains + the `rewardAddress`, NodeID and the `delegationFee` of the validator. +- `cChainGenesis`: The genesis info to be passed to the C-Chain. +- `message`: A message to include in the genesis. Not required. + +For an example of a JSON representation of genesis data, see [genesis_local.json](https://github.com/ava-labs/avalanchego/blob/master/genesis/genesis_local.json). + +#### `--genesis-file-content` (string) + +As an alternative to `--genesis-file`, it allows specifying base64 encoded genesis data to use. + +## HTTP Server + +#### `--http-host` (string) + +The address that HTTP APIs listen on. Defaults to `127.0.0.1`. This means that +by default, your node can only handle API calls made from the same machine. To +allow API calls from other machines, use `--http-host=`. You can also enter +domain names as parameter. + +#### `--http-port` (int) + +Each node runs an HTTP server that provides the APIs for interacting with the +node and the Avalanche network. This argument specifies the port that the HTTP +server will listen on. The default value is `9650`. + +#### `--http-tls-cert-file` (string, file path) + +This argument specifies the location of the TLS certificate used by the node for +the HTTPS server. This must be specified when `--http-tls-enabled=true`. There +is no default value. This flag is ignored if `--http-tls-cert-file-content` is +specified. + +#### `--http-tls-cert-file-content` (string) + +As an alternative to `--http-tls-cert-file`, it allows specifying base64 encoded +content of the TLS certificate used by the node for the HTTPS server. Note that +full certificate content, with the leading and trailing header, must be base64 +encoded. This must be specified when `--http-tls-enabled=true`. + +#### `--http-tls-enabled` (boolean) + +If set to `true`, this flag will attempt to upgrade the server to use HTTPS. Defaults to `false`. + +#### `--http-tls-key-file` (string, file path) + +This argument specifies the location of the TLS private key used by the node for +the HTTPS server. This must be specified when `--http-tls-enabled=true`. There +is no default value. This flag is ignored if `--http-tls-key-file-content` is +specified. + +#### `--http-tls-key-file-content` (string) + +As an alternative to `--http-tls-key-file`, it allows specifying base64 encoded +content of the TLS private key used by the node for the HTTPS server. Note that +full private key content, with the leading and trailing header, must be base64 +encoded. This must be specified when `--http-tls-enabled=true`. + +#### `--http-read-timeout` (string) + +Maximum duration for reading the entire request, including the body. A zero or +negative value means there will be no timeout. + +#### `--http-read-header-timeout` (string) + +Maximum duration to read request headers. The connection’s read deadline is +reset after reading the headers. If `--http-read-header-timeout` is zero, the +value of `--http-read-timeout` is used. If both are zero, there is no timeout. + +#### `--http-write-timeout` (string) + +Maximum duration before timing out writes of the response. It is reset whenever +a new request’s header is read. A zero or negative value means there will be no +timeout. + +#### `--http-idle-timeout` (string) + +Maximum duration to wait for the next request when keep-alives are enabled. If +`--http-idle-timeout` is zero, the value of `--http-read-timeout` is used. If both are zero, +there is no timeout. + +#### `--http-allowed-origins` (string) + +Origins to allow on the HTTP port. Defaults to `*` which allows all origins. Example: +`"https://*.avax.network https://*.avax-test.network"` + +#### `--http-allowed-hosts` (string) + +List of acceptable host names in API requests. Provide the wildcard (`'*'`) to accept +requests from all hosts. API requests where the `Host` field is empty or an IP address +will always be accepted. An API call whose HTTP `Host` field isn't acceptable will +receive a 403 error code. Defaults to `localhost`. + +## File Descriptor Limit + +#### `--fd-limit` (int) + +Attempts to raise the process file descriptor limit to at least this value and +error if the value is above the system max. Linux default `32768`. + +## Logging + +#### `--log-level` (string, `{verbo, debug, trace, info, warn, error, fatal, off}`) + +The log level determines which events to log. There are 8 different levels, in +order from highest priority to lowest. + +- `off`: No logs have this level of logging. Turns off logging. +- `fatal`: Fatal errors that are not recoverable. +- `error`: Errors that the node encounters, these errors were able to be recovered. +- `warn`: A Warning that might be indicative of a spurious byzantine node, or potential future error. +- `info`: Useful descriptions of node status updates. +- `trace`: Traces container (block, vertex, transaction) job results. Useful for + tracing container IDs and their outcomes. +- `debug`: Debug logging is useful when attempting to understand possible bugs + in the code. More information that would be typically desired for normal usage + will be displayed. +- `verbo`: Tracks extensive amounts of information the node is processing. This + includes message contents and binary dumps of data for extremely low level + protocol analysis. + +When specifying a log level note that all logs with the specified priority or +higher will be tracked. Defaults to `info`. + +#### `--log-display-level` (string, `{verbo, debug, trace, info, warn, error, fatal, off}`) + +The log level determines which events to display to stdout. If left blank, +will default to the value provided to `--log-level`. + +#### `--log-format` (string, `{auto, plain, colors, json}`) + +The structure of log format. Defaults to `auto` which formats terminal-like +logs, when the output is a terminal. Otherwise, should be one of `{auto, plain, colors, json}` + +#### `--log-dir` (string, file path) + +Specifies the directory in which system logs are kept. Defaults to `"$HOME/.avalanchego/logs"`. +If you are running the node as a system service (ex. using the installer script) logs will also be +stored in `$HOME/var/log/syslog`. + +#### `--log-disable-display-plugin-logs` (boolean) + +Disables displaying plugin logs in stdout. Defaults to `false`. + +#### `--log-rotater-max-size` (uint) + +The maximum file size in megabytes of the log file before it gets rotated. Defaults to `8`. + +#### `--log-rotater-max-files` (uint) + +The maximum number of old log files to retain. 0 means retain all old log files. Defaults to `7`. + +#### `--log-rotater-max-age` (uint) + +The maximum number of days to retain old log files based on the timestamp +encoded in their filename. 0 means retain all old log files. Defaults to `0`. + +#### `--log-rotater-compress-enabled` (boolean) + +Enables the compression of rotated log files through gzip. Defaults to `false`. + +## Network ID + +#### `--network-id` (string) + +The identity of the network the node should connect to. Can be one of: + +- `--network-id=mainnet` -> Connect to Mainnet (default). +- `--network-id=fuji` -> Connect to the Fuji test-network. +- `--network-id=testnet` -> Connect to the current test-network. (Right now, this is Fuji.) +- `--network-id=local` -> Connect to a local test-network. +- `--network-id=network-{id}` -> Connect to the network with the given ID. + `id` must be in the range `[0, 2^32)`. + +## OpenTelemetry + +AvalancheGo supports collecting and exporting [OpenTelemetry](https://opentelemetry.io/) traces. +This might be useful for debugging, performance analysis, or monitoring. + +#### `--tracing-enabled` (boolean) + +If true, enable OpenTelemetry tracing. Defaults to `false`. + +#### `--tracing-endpoint` (string) + +The endpoint to export trace data to. Defaults to `localhost:4317`. + +#### `--tracing-insecure` (string) + +If true, don't use TLS when exporting trace data. Defaults to `true`. + +#### `--tracing-sample-rate` (float) + +The fraction of traces to sample. If >= 1, always sample. If `<= 0`, never sample. +Defaults to `0.1`. + +#### `--tracing-exporter-type`(string) + +Type of exporter to use for tracing. Options are [`grpc`,`http`]. Defaults to `grpc`. + +## Public IP + +Validators must know one of their public facing IP addresses so they can enable +other nodes to connect to them. + +By default, the node will attempt to perform NAT traversal to get the node's IP +according to its router. + +#### `--public-ip` (string) + +If this argument is provided, the node assume this is its public IP. + +:::tip +When running a local network it may be easiest to set this value to `127.0.0.1`. +::: + +#### `--public-ip-resolution-frequency` (duration) + +Frequency at which this node resolves/updates its public IP and renew NAT +mappings, if applicable. Default to 5 minutes. + +#### `--public-ip-resolution-service` (string) + +When provided, the node will use that service to periodically resolve/update its +public IP. Only acceptable values are `ifconfigCo`, `opendns` or `ifconfigMe`. + +## Staking + +#### `--staking-port` (int) + +The port through which the network peers will connect to this node externally. +Having this port accessible from the internet is required for correct node +operation. Defaults to `9651`. + +#### `--sybil-protection-enabled` (boolean) + +Avalanche uses Proof of Stake (PoS) as sybil resistance to make it prohibitively +expensive to attack the network. If false, sybil resistance is disabled and all +peers will be sampled during consensus. Defaults to `true`. Note that this can +not be disabled on public networks (`Fuji` and `Mainnet`). + +Setting this flag to `false` **does not** mean "this node is not a validator." +It means that this node will sample all nodes, not just validators. +**You should not set this flag to false unless you understand what you are doing.** + +#### `--sybil-protection-disabled-weight` (uint) + +Weight to provide to each peer when staking is disabled. Defaults to `100`. + +#### `--staking-tls-cert-file` (string, file path) + +Avalanche uses two-way authenticated TLS connections to securely connect nodes. +This argument specifies the location of the TLS certificate used by the node. By +default, the node expects the TLS certificate to be at +`$HOME/.avalanchego/staking/staker.crt`. This flag is ignored if +`--staking-tls-cert-file-content` is specified. + +#### `--staking-tls-cert-file-content` (string) + +As an alternative to `--staking-tls-cert-file`, it allows specifying base64 +encoded content of the TLS certificate used by the node. Note that full +certificate content, with the leading and trailing header, must be base64 +encoded. + +#### `--staking-tls-key-file` (string, file path) + +Avalanche uses two-way authenticated TLS connections to securely connect nodes. +This argument specifies the location of the TLS private key used by the node. By +default, the node expects the TLS private key to be at +`$HOME/.avalanchego/staking/staker.key`. This flag is ignored if +`--staking-tls-key-file-content` is specified. + +#### `--staking-tls-key-file-content` (string) + +As an alternative to `--staking-tls-key-file`, it allows specifying base64 +encoded content of the TLS private key used by the node. Note that full private +key content, with the leading and trailing header, must be base64 encoded. + +## Subnets + +### Subnet Tracking + +#### `--track-subnets` (string) + +Comma separated list of Subnet IDs that this node would track if added to. +Defaults to empty (will only validate the Primary Network). + +### Subnet Configs + +It is possible to provide parameters for Subnets. Parameters here apply to all +chains in the specified Subnets. Parameters must be specified with a +`{subnetID}.json` config file under `--subnet-config-dir`. AvalancheGo loads +configs for Subnets specified in +`--track-subnets` parameter. + +Full reference for all configuration options for a Subnet can be found in a +separate [Subnet Configs](./subnet-configs) document. + +#### `--subnet-config-dir` (`string`) + +Specifies the directory that contains Subnet configs, as described above. +Defaults to `$HOME/.avalanchego/configs/subnets`. If the flag is set explicitly, +the specified folder must exist, or AvalancheGo will exit with an error. This +flag is ignored if `--subnet-config-content` is specified. + +Example: Let's say we have a Subnet with ID +`p4jUwqZsA2LuSftroCd3zb4ytH8W99oXKuKVZdsty7eQ3rXD6`. We can create a config file +under the default `subnet-config-dir` at +`$HOME/.avalanchego/configs/subnets/p4jUwqZsA2LuSftroCd3zb4ytH8W99oXKuKVZdsty7eQ3rXD6.json`. +An example config file is: + +```json +{ + "validatorOnly": false, + "consensusParameters": { + "k": 25, + "alpha": 18 + } +} +``` + +:::tip +By default, none of these directories and/or files exist. You would need to create them manually if needed. +::: + +#### `--subnet-config-content` (string) + +As an alternative to `--subnet-config-dir`, it allows specifying base64 encoded parameters for a Subnet. + +## Version + +#### `--version` (boolean) + +If this is `true`, print the version and quit. Defaults to `false`. + +## Advanced Options + +The following options may affect the correctness of a node. Only power users should change these. + +### Gossiping + +#### `--consensus-accepted-frontier-gossip-validator-size` (uint) + +Number of validators to gossip to when gossiping accepted frontier. Defaults to `0`. + +#### `--consensus-accepted-frontier-gossip-non-validator-size` (uint) + +Number of non-validators to gossip to when gossiping accepted frontier. Defaults to `0`. + +#### `--consensus-accepted-frontier-gossip-peer-size` (uint) + +Number of peers to gossip to when gossiping accepted frontier. Defaults to `15`. + +#### `--consensus-accepted-frontier-gossip-frequency` (duration) + +Time between gossiping accepted frontiers. Defaults to `10s`. + +#### `--consensus-on-accept-gossip-validator-size` (uint) + +Number of validators to gossip to each accepted container to. Defaults to `0`. + +#### `--consensus-on-accept-gossip-non-validator-size` (uint) + +Number of non-validators to gossip to each accepted container to. Defaults to `0`. + +#### `--consensus-on-accept-gossip-peer-size` (uint) + +Number of peers to gossip to each accepted container to. Defaults to `10`. + +### Benchlist + +#### `--benchlist-duration` (duration) + +Maximum amount of time a peer is benchlisted after surpassing +`--benchlist-fail-threshold`. Defaults to `15m`. + +#### `--benchlist-fail-threshold` (int) + +Number of consecutive failed queries to a node before benching it (assuming all +queries to it will fail). Defaults to `10`. + +#### `--benchlist-min-failing-duration` (duration) + +Minimum amount of time queries to a peer must be failing before the peer is benched. Defaults to `150s`. + +### Consensus Parameters + +:::note +Some of these parameters can only be set on a local or private network, not on Fuji Testnet or Mainnet +::: + +#### `--consensus-shutdown-timeout` (duration) + +Timeout before killing an unresponsive chain. Defaults to `5s`. + +#### `--create-asset-tx-fee` (int) + +Transaction fee, in nAVAX, for transactions that create new assets. Defaults to +`10000000` nAVAX (.01 AVAX) per transaction. This can only be changed on a local +network. + +#### `--create-subnet-tx-fee` (int) + +Transaction fee, in nAVAX, for transactions that create new Subnets. Defaults to +`1000000000` nAVAX (1 AVAX) per transaction. This can only be changed on a local +network. + +#### `--create-blockchain-tx-fee` (int) + +Transaction fee, in nAVAX, for transactions that create new blockchains. +Defaults to `1000000000` nAVAX (1 AVAX) per transaction. This can only be +changed on a local network. + +#### `--transform-subnet-tx-fee` (int) + +Transaction fee, in nAVAX, for transactions that transform Subnets. Defaults to +`1000000000` nAVAX (1 AVAX) per transaction. This can only be changed on a local network. + +#### `--add-primary-network-validator-fee` (int) + +Transaction fee, in nAVAX, for transactions that add new primary network validators. Defaults to 0. +This can only be changed on a local network. + +#### `--add-primary-network-delegator-fee` (int) + +Transaction fee, in nAVAX, for transactions that add new primary network delegators. Defaults to 0. +This can only be changed on a local network. + +#### `--add-subnet-validator-fee` (int) + +Transaction fee, in nAVAX, for transactions that add new Subnet validators. +Defaults to `10000000` nAVAX (.01 AVAX). + +#### `--add-subnet-delegator-fee` (int) + +Transaction fee, in nAVAX, for transactions that add new Subnet delegators. +Defaults to `10000000` nAVAX (.01 AVAX). + +#### `--min-delegator-stake` (int) + +The minimum stake, in nAVAX, that can be delegated to a validator of the Primary Network. + +Defaults to `25000000000` (25 AVAX) on Mainnet. Defaults to `5000000` (.005 +AVAX) on Test Net. This can only be changed on a local network. + +#### `--min-delegation-fee` (int) + +The minimum delegation fee that can be charged for delegation on the Primary +Network, multiplied by `10,000` . Must be in the range `[0, 1000000]`. Defaults +to `20000` (2%) on Mainnet. This can only be changed on a local network. + +#### `--min-stake-duration` (duration) + +Minimum staking duration. The Default on Mainnet is `336h` (two weeks). This can only be changed on +a local network. This applies to both delegation and validation periods. + +#### `--min-validator-stake` (int) + +The minimum stake, in nAVAX, required to validate the Primary Network. This can +only be changed on a local network. + +Defaults to `2000000000000` (2,000 AVAX) on Mainnet. Defaults to `5000000` (.005 AVAX) on Test Net. + +#### `--max-stake-duration` (duration) + +The maximum staking duration, in hours. Defaults to `8760h` (365 days) on +Mainnet. This can only be changed on a local network. + +#### `--max-validator-stake` (int) + +The maximum stake, in nAVAX, that can be placed on a validator on the primary +network. Defaults to `3000000000000000` (3,000,000 AVAX) on Mainnet. This +includes stake provided by both the validator and by delegators to the +validator. This can only be changed on a local network. + +#### `--stake-minting-period` (duration) + +Consumption period of the staking function, in hours. The Default on Mainnet is +`8760h` (365 days). This can only be changed on a local network. + +#### `--stake-max-consumption-rate` (uint) + +The maximum percentage of the consumption rate for the remaining token supply in +the minting period, which is 1 year on Mainnet. Defaults to `120,000` which is +12% per years. This can only be changed on a local network. + +#### `--stake-min-consumption-rate` (uint) + +The minimum percentage of the consumption rate for the remaining token supply in +the minting period, which is 1 year on Mainnet. Defaults to `100,000` which is +10% per years. This can only be changed on a local network. + +#### `--stake-supply-cap` (uint) + +The maximum stake supply, in nAVAX, that can be placed on a validator. Defaults +to `720,000,000,000,000,000` nAVAX. This can only be changed on a local network. + +#### `--tx-fee` (int) + +The required amount of nAVAX to be burned for a transaction to be valid on the +X-Chain, and for import/export transactions on the P-Chain. This parameter +requires network agreement in its current form. Changing this value from the +default should only be done on private networks or local network. Defaults to +`1,000,000` nAVAX per transaction. + +#### `--uptime-requirement` (float) + +Fraction of time a validator must be online to receive rewards. Defaults to +`0.8`. This can only be changed on a local network. + +#### `--uptime-metric-freq` (duration) + +Frequency of renewing this node's average uptime metric. Defaults to `30s`. + +#### Snow Parameters + +##### `--snow-concurrent-repolls` (int) + +Snow consensus requires repolling transactions that are issued during low time +of network usage. This parameter lets one define how aggressive the client will +be in finalizing these pending transactions. This should only be changed after +careful consideration of the tradeoffs of Snow consensus. The value must be at +least `1` and at most `--snow-commit-threshold`. Defaults to `4`. + +##### `--snow-sample-size` (int) + +Snow consensus defines `k` as the number of validators that are sampled during +each network poll. This parameter lets one define the `k` value used for +consensus. This should only be changed after careful consideration of the +tradeoffs of Snow consensus. The value must be at least `1`. Defaults to `20`. + +##### `--snow-quorum-size` (int) + +Snow consensus defines `alpha` as the number of validators that must prefer a +transaction during each network poll to increase the confidence in the +transaction. This parameter lets us define the `alpha` value used for consensus. +This should only be changed after careful consideration of the tradeoffs of Snow +consensus. The value must be at greater than `k/2`. Defaults to `15`. + +##### `--snow-commit-threshold` (int) + +Snow consensus defines `beta` as the number of consecutive polls that a +container must increase its confidence for it to be accepted. This +parameter lets us define the `beta` value used for consensus. This should only +be changed after careful consideration of the tradeoffs of Snow consensus. The +value must be at least `1`. Defaults to `20`. + +##### `--snow-optimal-processing` (int) + +Optimal number of processing items in consensus. The value must be at least `1`. Defaults to `50`. + +##### `--snow-max-processing` (int) + +Maximum number of processing items to be considered healthy. Reports unhealthy +if more than this number of items are outstanding. The value must be at least +`1`. Defaults to `1024`. + +##### `--snow-max-time-processing` (duration) + +Maximum amount of time an item should be processing and still be healthy. +Reports unhealthy if there is an item processing for longer than this duration. +The value must be greater than `0`. Defaults to `2m`. + +### ProposerVM Parameters + +#### `--proposervm-use-current-height` (bool) + +Have the ProposerVM always report the last accepted P-chain block height. Defaults to `false`. + +### Continuous Profiling + +You can configure your node to continuously run memory/CPU profiles and save the +most recent ones. Continuous memory/CPU profiling is enabled if +`--profile-continuous-enabled` is set. + +#### `--profile-continuous-enabled` (boolean) + +Whether the app should continuously produce performance profiles. Defaults to the false (not enabled). + +#### `--profile-dir` (string) + +If profiling enabled, node continuously runs memory/CPU profiles and puts them +at this directory. Defaults to the `$HOME/.avalanchego/profiles/`. + +#### `--profile-continuous-freq` (duration) + +How often a new CPU/memory profile is created. Defaults to `15m`. + +#### `--profile-continuous-max-files` (int) + +Maximum number of CPU/memory profiles files to keep. Defaults to 5. + +### Health + +#### `--health-check-frequency` (duration) + +Health check runs with this frequency. Defaults to `30s`. + +#### `--health-check-averager-halflife` (duration) + +Half life of averagers used in health checks (to measure the rate of message +failures, for example.) Larger value --> less volatile calculation of +averages. Defaults to `10s`. + +### Network + +#### `--network-allow-private-ips` (bool) + +Allows the node to connect peers with private IPs. Defaults to `true`. + +#### `--network-compression-type` (string) + +The type of compression to use when sending messages to peers. Defaults to `gzip`. +Must be one of [`gzip`, `zstd`, `none`]. + +Nodes can handle inbound `gzip` compressed messages but by default send `zstd` compressed messages. + +#### `--network-initial-timeout` (duration) + +Initial timeout value of the adaptive timeout manager. Defaults to `5s`. + +#### `--network-initial-reconnect-delay` (duration) + +Initial delay duration must be waited before attempting to reconnect a peer. Defaults to `1s`. + +#### `--network-max-reconnect-delay` (duration) + +Maximum delay duration must be waited before attempting to reconnect a peer. Defaults to `1h`. + +#### `--network-minimum-timeout` (duration) + +Minimum timeout value of the adaptive timeout manager. Defaults to `2s`. + +#### `--network-maximum-timeout` (duration) + +Maximum timeout value of the adaptive timeout manager. Defaults to `10s`. + +#### `--network-maximum-inbound-timeout` (duration) + +Maximum timeout value of an inbound message. Defines duration within which an +incoming message must be fulfilled. Incoming messages containing deadline higher +than this value will be overridden with this value. Defaults to `10s`. + +#### `--network-timeout-halflife` (duration) + +Half life used when calculating average network latency. Larger value --> less +volatile network latency calculation. Defaults to `5m`. + +#### `--network-timeout-coefficient` (duration) + +Requests to peers will time out after \[`network-timeout-coefficient`\] \* +\[average request latency\]. Defaults to `2`. + +#### `--network-read-handshake-timeout` (duration) + +Timeout value for reading handshake messages. Defaults to `15s`. + +#### `--network-ping-timeout` (duration) + +Timeout value for Ping-Pong with a peer. Defaults to `30s`. + +#### `--network-ping-frequency` (duration) + +Frequency of pinging other peers. Defaults to `22.5s`. + +#### `--network-health-min-conn-peers` (uint) + +Node will report unhealthy if connected to less than this many peers. Defaults to `1`. + +#### `--network-health-max-time-since-msg-received` (duration) + +Node will report unhealthy if it hasn't received a message for this amount of time. Defaults to `1m`. + +#### `--network-health-max-time-since-msg-sent` (duration) + +Network layer returns unhealthy if haven't sent a message for at least this much time. Defaults to `1m`. + +#### `--network-health-max-portion-send-queue-full` (float) + +Node will report unhealthy if its send queue is more than this portion full. +Must be in \[0,1\]. Defaults to `0.9`. + +#### `--network-health-max-send-fail-rate` (float) + +Node will report unhealthy if more than this portion of message sends fail. Must +be in \[0,1\]. Defaults to `0.25`. + +#### `--network-health-max-outstanding-request-duration` (duration) + +Node reports unhealthy if there has been a request outstanding for this duration. Defaults to `5m`. + +#### `--network-max-clock-difference` (duration) + +Max allowed clock difference value between this node and peers. Defaults to `1m`. + +#### `--network-require-validator-to-connect` (bool) + +If true, this node will only maintain a connection with another node if this +node is a validator, the other node is a validator, or the other node is a +beacon. + +#### `--network-tcp-proxy-enabled` (bool) + +Require all P2P connections to be initiated with a TCP proxy header. Defaults to `false`. + +#### `--network-tcp-proxy-read-timeout` (duration) + +Maximum duration to wait for a TCP proxy header. Defaults to `3s`. + +#### `--network-outbound-connection-timeout` (duration) + +Timeout while dialing a peer. Defaults to `30s`. + +### Message Rate-Limiting + +These flags govern rate-limiting of inbound and outbound messages. For more +information on rate-limiting and the flags below, see package `throttling` in +AvalancheGo. + +#### CPU Based + +Rate-limiting based on how much CPU usage a peer causes. + +##### `--throttler-inbound-cpu-validator-alloc` (float) + +Number of CPU allocated for use by validators. Value should be in range (0, total core count]. +Defaults to half of the number of CPUs on the machine. + +##### `--throttler-inbound-cpu-max-recheck-delay` (duration) + +In the CPU rate-limiter, check at least this often whether the node's CPU usage +has fallen to an acceptable level. Defaults to `5s`. + +##### `--throttler-inbound-disk-max-recheck-delay` (duration) + +In the disk-based network throttler, check at least this often whether the node's disk usage has +fallen to an acceptable level. Defaults to `5s`. + +##### `--throttler-inbound-cpu-max-non-validator-usage` (float) + +Number of CPUs that if fully utilized, will rate limit all non-validators. Value should be in range +[0, total core count]. +Defaults to %80 of the number of CPUs on the machine. + +##### `--throttler-inbound-cpu-max-non-validator-node-usage` (float) + +Maximum number of CPUs that a non-validator can utilize. Value should be in range [0, total core count]. +Defaults to the number of CPUs / 8. + +##### `--throttler-inbound-disk-validator-alloc` (float) + +Maximum number of disk reads/writes per second to allocate for use by validators. Must be > 0. +Defaults to `1000 GiB/s`. + +##### `--throttler-inbound-disk-max-non-validator-usage` (float) + +Number of disk reads/writes per second that, if fully utilized, will rate limit all non-validators. +Must be >= 0. +Defaults to `1000 GiB/s`. + +##### `--throttler-inbound-disk-max-non-validator-node-usage` (float) + +Maximum number of disk reads/writes per second that a non-validator can utilize. Must be >= 0. +Defaults to `1000 GiB/s`. + +#### Bandwidth Based + +Rate-limiting based on the bandwidth a peer uses. + +##### `--throttler-inbound-bandwidth-refill-rate` (uint) + +Max average inbound bandwidth usage of a peer, in bytes per second. See +interface `throttling.BandwidthThrottler`. Defaults to `512`. + +##### `--throttler-inbound-bandwidth-max-burst-size` (uint) + +Max inbound bandwidth a node can use at once. See interface +`throttling.BandwidthThrottler`. Defaults to `2 MiB`. + +#### Message Size Based + +Rate-limiting based on the total size, in bytes, of unprocessed messages. + +##### `--throttler-inbound-at-large-alloc-size` (uint) + +Size, in bytes, of at-large allocation in the inbound message throttler. Defaults to `6291456` (6 MiB). + +##### `--throttler-inbound-validator-alloc-size` (uint) + +Size, in bytes, of validator allocation in the inbound message throttler. +Defaults to `33554432` (32 MiB). + +##### `--throttler-inbound-node-max-at-large-bytes` (uint) + +Maximum number of bytes a node can take from the at-large allocation of the +inbound message throttler. Defaults to `2097152` (2 MiB). + +#### Message Based + +Rate-limiting based on the number of unprocessed messages. + +##### `--throttler-inbound-node-max-processing-msgs` (uint) + +Node will stop reading messages from a peer when it is processing this many messages from the peer. +Will resume reading messages from the peer when it is processing less than this many messages. +Defaults to `1024`. + +#### Outbound + +Rate-limiting for outbound messages. + +##### `--throttler-outbound-at-large-alloc-size` (uint) + +Size, in bytes, of at-large allocation in the outbound message throttler. +Defaults to `33554432` (32 MiB). + +##### `--throttler-outbound-validator-alloc-size` (uint) + +Size, in bytes, of validator allocation in the outbound message throttler. +Defaults to `33554432` (32 MiB). + +##### `--throttler-outbound-node-max-at-large-bytes` (uint) + +Maximum number of bytes a node can take from the at-large allocation of the +outbound message throttler. Defaults to `2097152` (2 MiB). + +### Connection Rate-Limiting + +#### `--network-inbound-connection-throttling-cooldown` (duration) + +Node will upgrade an inbound connection from a given IP at most once within this +duration. Defaults to `10s`. If 0 or negative, will not consider recency of last +upgrade when deciding whether to upgrade. + +#### `--network-inbound-connection-throttling-max-conns-per-sec` (uint) + +Node will accept at most this many inbound connections per second. Defaults to `512`. + +#### `--network-outbound-connection-throttling-rps` (uint) + +Node makes at most this many outgoing peer connection attempts per second. Defaults to `50`. + +### Peer List Gossiping + +Nodes gossip peers to each other so that each node can have an up-to-date peer +list. A node gossips `--network-peer-list-num-validator-ips` validator IPs to +`--network-peer-list-validator-gossip-size` validators, +`--network-peer-list-non-validator-gossip-size` non-validators and +`--network-peer-list-peers-gossip-size` peers every +`--network-peer-list-gossip-frequency`. + +#### `--network-peer-list-num-validator-ips` (int) + +Number of validator IPs to gossip to other nodes Defaults to `15`. + +#### `--network-peer-list-validator-gossip-size` (int) + +Number of validators that the node will gossip peer list to. Defaults to `20`. + +#### `--network-peer-list-non-validator-gossip-size` (int) + +Number of non-validators that the node will gossip peer list to. Defaults to `0`. + +#### `--network-peer-list-peers-gossip-size` (int) + +Number of total peers (including non-validator or validator) that the node will gossip peer list to +Defaults to `0`. + +#### `--network-peer-list-gossip-frequency` (duration) + +Frequency to gossip peers to other nodes. Defaults to `1m`. + +#### ` --network-peer-read-buffer-size` (int) + +Size of the buffer that peer messages are read into (there is one buffer per +peer), defaults to `8` KiB (8192 Bytes). + +#### `--network-peer-write-buffer-size` (int) + +Size of the buffer that peer messages are written into (there is one buffer per +peer), defaults to `8` KiB (8192 Bytes). + +### Resource Usage Tracking + +#### `--meter-vm-enabled` (bool) + +Enable Meter VMs to track VM performance with more granularity. Defaults to `true`. + +#### `--system-tracker-frequency` (duration) + +Frequency to check the real system usage of tracked processes. More frequent +checks --> usage metrics are more accurate, but more expensive to track. +Defaults to `500ms`. + +#### `--system-tracker-processing-halflife` (duration) + +Half life to use for the processing requests tracker. Larger half life --> usage +metrics change more slowly. Defaults to `15s`. + +#### `--system-tracker-cpu-halflife` (duration) + +Half life to use for the CPU tracker. Larger half life --> CPU usage metrics +change more slowly. Defaults to `15s`. + +#### `--system-tracker-disk-halflife` (duration) + +Half life to use for the disk tracker. Larger half life --> disk usage metrics +change more slowly. Defaults to `1m`. + +#### `--system-tracker-disk-required-available-space` (uint) + +"Minimum number of available bytes on disk, under which the node will shutdown. +Defaults to `536870912` (512 MiB). + +#### `--system-tracker-disk-warning-threshold-available-space` (uint) + +Warning threshold for the number of available bytes on disk, under which the +node will be considered unhealthy. Must be >= +`--system-tracker-disk-required-available-space`. Defaults to `1073741824` (1 +GiB). + +### Plugins + +#### `--plugin-dir` (string) + +Sets the directory for [VM plugins](/build/vm/intro.md). The default value is `$HOME/.avalanchego/plugins`. + +### Virtual Machine (VM) Configs + +#### `--vm-aliases-file (string)` + +Path to JSON file that defines aliases for Virtual Machine IDs. Defaults to +`~/.avalanchego/configs/vms/aliases.json`. This flag is ignored if +`--vm-aliases-file-content` is specified. Example content: + +```json +{ + "tGas3T58KzdjLHhBDMnH2TvrddhqTji5iZAMZ3RXs2NLpSnhH": [ + "timestampvm", + "timerpc" + ] +} +``` + +The above example aliases the VM whose ID is +`"tGas3T58KzdjLHhBDMnH2TvrddhqTji5iZAMZ3RXs2NLpSnhH"` to `"timestampvm"` and +`"timerpc"`. + +`--vm-aliases-file-content` (string) + +As an alternative to `--vm-aliases-file`, it allows specifying base64 encoded +aliases for Virtual Machine IDs. + +### Indexing + +#### `--index-allow-incomplete` (boolean) + +If true, allow running the node in such a way that could cause an index to miss transactions. +Ignored if index is disabled. Defaults to `false`. + +### Router + +#### `--router-health-max-drop-rate` (float) + +Node reports unhealthy if the router drops more than this portion of messages. Defaults to `1`. + +#### `--router-health-max-outstanding-requests` (uint) + +Node reports unhealthy if there are more than this many outstanding consensus requests +(Get, PullQuery, etc.) over all chains. Defaults to `1024`. diff --git a/config/config_test.go b/config/config_test.go index 2fab7457efde..68847ca4f6d5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -22,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/subnets" ) +const chainConfigFilenameExtention = ".ex" + func TestGetChainConfigsFromFiles(t *testing.T) { tests := map[string]struct { configs map[string]string @@ -72,11 +74,11 @@ func TestGetChainConfigsFromFiles(t *testing.T) { // Create custom configs for key, value := range test.configs { chainDir := filepath.Join(chainsDir, key) - setupFile(t, chainDir, chainConfigFileName+".ex", value) //nolint:goconst + setupFile(t, chainDir, chainConfigFileName+chainConfigFilenameExtention, value) } for key, value := range test.upgrades { chainDir := filepath.Join(chainsDir, key) - setupFile(t, chainDir, chainUpgradeFileName+".ex", value) + setupFile(t, chainDir, chainUpgradeFileName+chainConfigFilenameExtention, value) } v := setupViper(configFile) @@ -161,7 +163,7 @@ func TestSetChainConfigDefaultDir(t *testing.T) { require.Equal(defaultChainConfigDir, v.GetString(ChainConfigDirKey)) chainsDir := filepath.Join(defaultChainConfigDir, "C") - setupFile(t, chainsDir, chainConfigFileName+".ex", "helloworld") + setupFile(t, chainsDir, chainConfigFileName+chainConfigFilenameExtention, "helloworld") chainConfigs, err := getChainConfigs(v) require.NoError(err) expected := map[string]chains.ChainConfig{"C": {Config: []byte("helloworld"), Upgrade: []byte(nil)}} @@ -419,20 +421,6 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { }, expectedErr: nil, }, - "gossip config": { - fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", - givenJSON: `{"appGossipNonValidatorSize": 100 }`, - testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { - id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") - config, ok := given[id] - require.True(ok) - require.Equal(uint(100), config.GossipConfig.AppGossipNonValidatorSize) - // must still respect defaults - require.Equal(20, config.ConsensusParameters.K) - require.Equal(uint(10), config.GossipConfig.AppGossipValidatorSize) - }, - expectedErr: nil, - }, } for name, test := range tests { @@ -527,7 +515,6 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { require.Equal(20, config.ConsensusParameters.AlphaConfidence) require.Equal(30, config.ConsensusParameters.K) // must still respect defaults - require.Equal(uint(10), config.GossipConfig.AppGossipValidatorSize) require.Equal(256, config.ConsensusParameters.MaxOutstandingItems) }, expectedErr: nil, diff --git a/config/flags.go b/config/flags.go index 1cbd89dfcf8c..3fb99e5f03e3 100644 --- a/config/flags.go +++ b/config/flags.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/pebble" + "github.com/ava-labs/avalanchego/database/pebbledb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/trace" @@ -69,6 +69,7 @@ func deprecateFlags(fs *pflag.FlagSet) error { func addProcessFlags(fs *pflag.FlagSet) { // If true, print the version and quit. fs.Bool(VersionKey, false, "If true, print version and quit") + fs.Bool(VersionJSONKey, false, "If true, print version in JSON format and quit") } func addNodeFlags(fs *pflag.FlagSet) { @@ -109,7 +110,7 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint64(AddSubnetDelegatorFeeKey, genesis.LocalParams.AddSubnetDelegatorFee, "Transaction fee, in nAVAX, for transactions that add new subnet delegators") // Database - fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebble.Name)) + fs.String(DBTypeKey, leveldb.Name, fmt.Sprintf("Database type to use. Must be one of {%s, %s, %s}", leveldb.Name, memdb.Name, pebbledb.Name)) fs.Bool(DBReadOnlyKey, false, "If true, database writes are to memory and never persisted. May still initialize database directory/files on disk if they don't exist") fs.String(DBPathKey, defaultDBDir, "Path to database directory") fs.String(DBConfigFileKey, "", fmt.Sprintf("Path to database config file. Ignored if %s is specified", DBConfigContentKey)) @@ -128,10 +129,6 @@ func addNodeFlags(fs *pflag.FlagSet) { // Peer List Gossip fs.Uint(NetworkPeerListNumValidatorIPsKey, constants.DefaultNetworkPeerListNumValidatorIPs, "Number of validator IPs to gossip to other nodes") - fs.Uint(NetworkPeerListValidatorGossipSizeKey, constants.DefaultNetworkPeerListValidatorGossipSize, "Number of validators that the node will gossip peer list to") - fs.Uint(NetworkPeerListNonValidatorGossipSizeKey, constants.DefaultNetworkPeerListNonValidatorGossipSize, "Number of non-validators that the node will gossip peer list to") - fs.Uint(NetworkPeerListPeersGossipSizeKey, constants.DefaultNetworkPeerListPeersGossipSize, "Number of total peers (including non-validators and validators) that the node will gossip peer list to") - fs.Duration(NetworkPeerListGossipFreqKey, constants.DefaultNetworkPeerListGossipFreq, "Frequency to gossip peers to other nodes") fs.Duration(NetworkPeerListPullGossipFreqKey, constants.DefaultNetworkPeerListPullGossipFreq, "Frequency to request peers from other nodes") fs.Duration(NetworkPeerListBloomResetFreqKey, constants.DefaultNetworkPeerListBloomResetFreq, "Frequency to recalculate the bloom filter used to request new peers from other nodes") @@ -187,15 +184,6 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Uint(ConsensusAppConcurrencyKey, constants.DefaultConsensusAppConcurrency, "Maximum number of goroutines to use when handling App messages on a chain") fs.Duration(ConsensusShutdownTimeoutKey, constants.DefaultConsensusShutdownTimeout, "Timeout before killing an unresponsive chain") fs.Duration(ConsensusFrontierPollFrequencyKey, constants.DefaultFrontierPollFrequency, "Frequency of polling for new consensus frontiers") - fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierValidatorSize, "Number of validators to gossip to when gossiping accepted frontier") - fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierNonValidatorSize, "Number of non-validators to gossip to when gossiping accepted frontier") - fs.Uint(ConsensusGossipAcceptedFrontierPeerSizeKey, constants.DefaultConsensusGossipAcceptedFrontierPeerSize, "Number of peers to gossip to when gossiping accepted frontier") - fs.Uint(ConsensusGossipOnAcceptValidatorSizeKey, constants.DefaultConsensusGossipOnAcceptValidatorSize, "Number of validators to gossip to each accepted container to") - fs.Uint(ConsensusGossipOnAcceptNonValidatorSizeKey, constants.DefaultConsensusGossipOnAcceptNonValidatorSize, "Number of non-validators to gossip to each accepted container to") - fs.Uint(ConsensusGossipOnAcceptPeerSizeKey, constants.DefaultConsensusGossipOnAcceptPeerSize, "Number of peers to gossip to each accepted container to") - fs.Uint(AppGossipValidatorSizeKey, constants.DefaultAppGossipValidatorSize, "Number of validators to gossip an AppGossip message to") - fs.Uint(AppGossipNonValidatorSizeKey, constants.DefaultAppGossipNonValidatorSize, "Number of non-validators to gossip an AppGossip message to") - fs.Uint(AppGossipPeerSizeKey, constants.DefaultAppGossipPeerSize, "Number of peers (which may be validators or non-validators) to gossip an AppGossip message to") // Inbound Throttling fs.Uint64(InboundThrottlerAtLargeAllocSizeKey, constants.DefaultInboundThrottlerAtLargeAllocSize, "Size, in bytes, of at-large byte allocation in inbound message throttler") @@ -228,11 +216,6 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Duration(HTTPReadHeaderTimeoutKey, 30*time.Second, fmt.Sprintf("Maximum duration to read request headers. The connection's read deadline is reset after reading the headers. If %s is zero, the value of %s is used. If both are zero, there is no timeout.", HTTPReadHeaderTimeoutKey, HTTPReadTimeoutKey)) fs.Duration(HTTPWriteTimeoutKey, 30*time.Second, "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read. A zero or negative value means there will be no timeout.") fs.Duration(HTTPIdleTimeoutKey, 120*time.Second, fmt.Sprintf("Maximum duration to wait for the next request when keep-alives are enabled. If %s is zero, the value of %s is used. If both are zero, there is no timeout.", HTTPIdleTimeoutKey, HTTPReadTimeoutKey)) - fs.Bool(APIAuthRequiredKey, false, "Require authorization token to call HTTP APIs") - fs.String(APIAuthPasswordFileKey, "", - fmt.Sprintf("Password file used to initially create/validate API authorization tokens. Ignored if %s is specified. Leading and trailing whitespace is removed from the password. Can be changed via API call", - APIAuthPasswordKey)) - fs.String(APIAuthPasswordKey, "", "Specifies password for API authorization tokens") // Enable/Disable APIs fs.Bool(AdminAPIEnabledKey, false, "If true, this node exposes the Admin API") @@ -240,7 +223,6 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Bool(KeystoreAPIEnabledKey, false, "If true, this node exposes the Keystore API") fs.Bool(MetricsAPIEnabledKey, true, "If true, this node exposes the Metrics API") fs.Bool(HealthAPIEnabledKey, true, "If true, this node exposes the Health API") - fs.Bool(IpcAPIEnabledKey, false, "If true, IPCs can be opened") // Health Checks fs.Duration(HealthCheckFreqKey, 30*time.Second, "Time between health checks") @@ -310,10 +292,7 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Int(SnowPreferenceQuorumSizeKey, snowball.DefaultParameters.AlphaPreference, fmt.Sprintf("Threshold of nodes required to update this node's preference in a network poll. Ignored if %s is provided", SnowQuorumSizeKey)) fs.Int(SnowConfidenceQuorumSizeKey, snowball.DefaultParameters.AlphaConfidence, fmt.Sprintf("Threshold of nodes required to increase this node's confidence in a network poll. Ignored if %s is provided", SnowQuorumSizeKey)) - fs.Int(SnowCommitThresholdKey, snowball.DefaultParameters.BetaRogue, "Beta value to use for transactions") - // TODO: Remove these once enough time has passed with SnowCommitThresholdKey - fs.Int(SnowVirtuousCommitThresholdKey, snowball.DefaultParameters.BetaVirtuous, "This flag is temporarily ignored due to the X-chain linearization") - fs.Int(SnowRogueCommitThresholdKey, snowball.DefaultParameters.BetaRogue, "Beta value to use for rogue transactions") + fs.Int(SnowCommitThresholdKey, snowball.DefaultParameters.Beta, "Beta value to use for consensus") fs.Int(SnowConcurrentRepollsKey, snowball.DefaultParameters.ConcurrentRepolls, "Minimum number of concurrent polls for finalizing consensus") fs.Int(SnowOptimalProcessingKey, snowball.DefaultParameters.OptimalProcessing, "Optimal number of processing containers in consensus") @@ -327,10 +306,6 @@ func addNodeFlags(fs *pflag.FlagSet) { fs.Bool(MeterVMsEnabledKey, true, "Enable Meter VMs to track VM performance with more granularity") fs.Duration(UptimeMetricFreqKey, 30*time.Second, "Frequency of renewing this node's average uptime metric") - // IPC - fs.String(IpcsChainIDsKey, "", "Comma separated list of chain ids to add to the IPC engine. Example: 11111111111111111111111111111111LpoYY,4R5p2RXDGLqaifZE4hHWH9owe34pfoBULn1DrQTWivjg8o4aH") - fs.String(IpcsPathKey, "", "The directory (Unix) or named pipe name prefix (Windows) for IPC sockets") - // Indexer fs.Bool(IndexEnabledKey, false, "If true, index all accepted containers and transactions and expose them via an API") fs.Bool(IndexAllowIncompleteKey, false, "If true, allow running the node in such a way that could cause an index to miss transactions. Ignored if index is disabled") diff --git a/config/keys.go b/config/keys.go index b2ccc16fc63d..289c83170614 100644 --- a/config/keys.go +++ b/config/keys.go @@ -3,64 +3,66 @@ package config -// #nosec G101 +// the HTTPWriteTimeoutKey was moved here so that it would not generate the +// false-positive linter error "G101: Potential hardcoded credentials" when running golangci-lint. +const HTTPWriteTimeoutKey = "http-write-timeout" // #nosec G101 + const ( - DataDirKey = "data-dir" - ConfigFileKey = "config-file" - ConfigContentKey = "config-file-content" - ConfigContentTypeKey = "config-file-content-type" - VersionKey = "version" - GenesisFileKey = "genesis-file" - GenesisFileContentKey = "genesis-file-content" - NetworkNameKey = "network-id" - ACPSupportKey = "acp-support" - ACPObjectKey = "acp-object" - TxFeeKey = "tx-fee" - CreateAssetTxFeeKey = "create-asset-tx-fee" - CreateSubnetTxFeeKey = "create-subnet-tx-fee" - TransformSubnetTxFeeKey = "transform-subnet-tx-fee" - CreateBlockchainTxFeeKey = "create-blockchain-tx-fee" - AddPrimaryNetworkValidatorFeeKey = "add-primary-network-validator-fee" - AddPrimaryNetworkDelegatorFeeKey = "add-primary-network-delegator-fee" - AddSubnetValidatorFeeKey = "add-subnet-validator-fee" - AddSubnetDelegatorFeeKey = "add-subnet-delegator-fee" - UptimeRequirementKey = "uptime-requirement" - MinValidatorStakeKey = "min-validator-stake" - MaxValidatorStakeKey = "max-validator-stake" - MinDelegatorStakeKey = "min-delegator-stake" - MinDelegatorFeeKey = "min-delegation-fee" - MinStakeDurationKey = "min-stake-duration" - MaxStakeDurationKey = "max-stake-duration" - StakeMaxConsumptionRateKey = "stake-max-consumption-rate" - StakeMinConsumptionRateKey = "stake-min-consumption-rate" - StakeMintingPeriodKey = "stake-minting-period" - StakeSupplyCapKey = "stake-supply-cap" - DBTypeKey = "db-type" - DBReadOnlyKey = "db-read-only" - DBPathKey = "db-dir" - DBConfigFileKey = "db-config-file" - DBConfigContentKey = "db-config-file-content" - PublicIPKey = "public-ip" - PublicIPResolutionFreqKey = "public-ip-resolution-frequency" - PublicIPResolutionServiceKey = "public-ip-resolution-service" - HTTPHostKey = "http-host" - HTTPPortKey = "http-port" - HTTPSEnabledKey = "http-tls-enabled" - HTTPSKeyFileKey = "http-tls-key-file" - HTTPSKeyContentKey = "http-tls-key-file-content" - HTTPSCertFileKey = "http-tls-cert-file" - HTTPSCertContentKey = "http-tls-cert-file-content" - HTTPAllowedOrigins = "http-allowed-origins" - HTTPAllowedHostsKey = "http-allowed-hosts" - HTTPShutdownTimeoutKey = "http-shutdown-timeout" - HTTPShutdownWaitKey = "http-shutdown-wait" - HTTPReadTimeoutKey = "http-read-timeout" - HTTPReadHeaderTimeoutKey = "http-read-header-timeout" - HTTPWriteTimeoutKey = "http-write-timeout" + DataDirKey = "data-dir" + ConfigFileKey = "config-file" + ConfigContentKey = "config-file-content" + ConfigContentTypeKey = "config-file-content-type" + VersionKey = "version" + VersionJSONKey = "version-json" + GenesisFileKey = "genesis-file" + GenesisFileContentKey = "genesis-file-content" + NetworkNameKey = "network-id" + ACPSupportKey = "acp-support" + ACPObjectKey = "acp-object" + TxFeeKey = "tx-fee" + CreateAssetTxFeeKey = "create-asset-tx-fee" + CreateSubnetTxFeeKey = "create-subnet-tx-fee" + TransformSubnetTxFeeKey = "transform-subnet-tx-fee" + CreateBlockchainTxFeeKey = "create-blockchain-tx-fee" + AddPrimaryNetworkValidatorFeeKey = "add-primary-network-validator-fee" + AddPrimaryNetworkDelegatorFeeKey = "add-primary-network-delegator-fee" + AddSubnetValidatorFeeKey = "add-subnet-validator-fee" + AddSubnetDelegatorFeeKey = "add-subnet-delegator-fee" + UptimeRequirementKey = "uptime-requirement" + MinValidatorStakeKey = "min-validator-stake" + MaxValidatorStakeKey = "max-validator-stake" + MinDelegatorStakeKey = "min-delegator-stake" + MinDelegatorFeeKey = "min-delegation-fee" + MinStakeDurationKey = "min-stake-duration" + MaxStakeDurationKey = "max-stake-duration" + StakeMaxConsumptionRateKey = "stake-max-consumption-rate" + StakeMinConsumptionRateKey = "stake-min-consumption-rate" + StakeMintingPeriodKey = "stake-minting-period" + StakeSupplyCapKey = "stake-supply-cap" + DBTypeKey = "db-type" + DBReadOnlyKey = "db-read-only" + DBPathKey = "db-dir" + DBConfigFileKey = "db-config-file" + DBConfigContentKey = "db-config-file-content" + PublicIPKey = "public-ip" + PublicIPResolutionFreqKey = "public-ip-resolution-frequency" + PublicIPResolutionServiceKey = "public-ip-resolution-service" + HTTPHostKey = "http-host" + HTTPPortKey = "http-port" + HTTPSEnabledKey = "http-tls-enabled" + HTTPSKeyFileKey = "http-tls-key-file" + HTTPSKeyContentKey = "http-tls-key-file-content" + HTTPSCertFileKey = "http-tls-cert-file" + HTTPSCertContentKey = "http-tls-cert-file-content" + + HTTPAllowedOrigins = "http-allowed-origins" + HTTPAllowedHostsKey = "http-allowed-hosts" + HTTPShutdownTimeoutKey = "http-shutdown-timeout" + HTTPShutdownWaitKey = "http-shutdown-wait" + HTTPReadTimeoutKey = "http-read-timeout" + HTTPReadHeaderTimeoutKey = "http-read-header-timeout" + HTTPIdleTimeoutKey = "http-idle-timeout" - APIAuthRequiredKey = "api-auth-required" - APIAuthPasswordKey = "api-auth-password" - APIAuthPasswordFileKey = "api-auth-password-file" StateSyncIPsKey = "state-sync-ips" StateSyncIDsKey = "state-sync-ids" BootstrapIPsKey = "bootstrap-ips" @@ -90,10 +92,6 @@ const ( NetworkHealthMaxSendFailRateKey = "network-health-max-send-fail-rate" NetworkHealthMaxOutstandingDurationKey = "network-health-max-outstanding-request-duration" NetworkPeerListNumValidatorIPsKey = "network-peer-list-num-validator-ips" - NetworkPeerListValidatorGossipSizeKey = "network-peer-list-validator-gossip-size" - NetworkPeerListNonValidatorGossipSizeKey = "network-peer-list-non-validator-gossip-size" - NetworkPeerListPeersGossipSizeKey = "network-peer-list-peers-gossip-size" - NetworkPeerListGossipFreqKey = "network-peer-list-gossip-frequency" NetworkPeerListPullGossipFreqKey = "network-peer-list-pull-gossip-frequency" NetworkPeerListBloomResetFreqKey = "network-peer-list-bloom-reset-frequency" NetworkInitialReconnectDelayKey = "network-initial-reconnect-delay" @@ -130,8 +128,6 @@ const ( SnowQuorumSizeKey = "snow-quorum-size" SnowPreferenceQuorumSizeKey = "snow-preference-quorum-size" SnowConfidenceQuorumSizeKey = "snow-confidence-quorum-size" - SnowVirtuousCommitThresholdKey = "snow-virtuous-commit-threshold" - SnowRogueCommitThresholdKey = "snow-rogue-commit-threshold" SnowCommitThresholdKey = "snow-commit-threshold" SnowConcurrentRepollsKey = "snow-concurrent-repolls" SnowOptimalProcessingKey = "snow-optimal-processing" @@ -144,22 +140,10 @@ const ( KeystoreAPIEnabledKey = "api-keystore-enabled" MetricsAPIEnabledKey = "api-metrics-enabled" HealthAPIEnabledKey = "api-health-enabled" - IpcAPIEnabledKey = "api-ipcs-enabled" - IpcsChainIDsKey = "ipcs-chain-ids" - IpcsPathKey = "ipcs-path" MeterVMsEnabledKey = "meter-vms-enabled" ConsensusAppConcurrencyKey = "consensus-app-concurrency" ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" ConsensusFrontierPollFrequencyKey = "consensus-frontier-poll-frequency" - ConsensusGossipAcceptedFrontierValidatorSizeKey = "consensus-accepted-frontier-gossip-validator-size" - ConsensusGossipAcceptedFrontierNonValidatorSizeKey = "consensus-accepted-frontier-gossip-non-validator-size" - ConsensusGossipAcceptedFrontierPeerSizeKey = "consensus-accepted-frontier-gossip-peer-size" - ConsensusGossipOnAcceptValidatorSizeKey = "consensus-on-accept-gossip-validator-size" - ConsensusGossipOnAcceptNonValidatorSizeKey = "consensus-on-accept-gossip-non-validator-size" - ConsensusGossipOnAcceptPeerSizeKey = "consensus-on-accept-gossip-peer-size" - AppGossipValidatorSizeKey = "consensus-app-gossip-validator-size" - AppGossipNonValidatorSizeKey = "consensus-app-gossip-non-validator-size" - AppGossipPeerSizeKey = "consensus-app-gossip-peer-size" ProposerVMUseCurrentHeightKey = "proposervm-use-current-height" FdLimitKey = "fd-limit" IndexEnabledKey = "index-enabled" diff --git a/config/viper.go b/config/viper.go index 59ecf1941687..cc15ac55c35b 100644 --- a/config/viper.go +++ b/config/viper.go @@ -15,6 +15,10 @@ import ( "github.com/spf13/viper" ) +const EnvPrefix = "avago" + +var DashesToUnderscores = strings.NewReplacer("-", "_") + // BuildViper returns the viper environment from parsing config file from // default search paths and any parsed command line flags func BuildViper(fs *pflag.FlagSet, args []string) (*viper.Viper, error) { @@ -27,8 +31,8 @@ func BuildViper(fs *pflag.FlagSet, args []string) (*viper.Viper, error) { v := viper.New() v.AutomaticEnv() - v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - v.SetEnvPrefix("avago") + v.SetEnvKeyReplacer(DashesToUnderscores) + v.SetEnvPrefix(EnvPrefix) if err := v.BindPFlags(fs); err != nil { return nil, err } diff --git a/database/encdb/codec.go b/database/encdb/codec.go index 62223b4fdd2f..b786bec66916 100644 --- a/database/encdb/codec.go +++ b/database/encdb/codec.go @@ -4,8 +4,6 @@ package encdb import ( - "time" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" ) @@ -15,7 +13,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewDefaultManager() if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { diff --git a/database/leveldb/db.go b/database/leveldb/db.go index 6c09606128db..7c54b1d86e32 100644 --- a/database/leveldb/db.go +++ b/database/leveldb/db.go @@ -186,7 +186,7 @@ type config struct { } // New returns a wrapped LevelDB object. -func New(file string, configBytes []byte, log logging.Logger, namespace string, reg prometheus.Registerer) (database.Database, error) { +func New(file string, configBytes []byte, log logging.Logger, reg prometheus.Registerer) (database.Database, error) { parsedConfig := config{ BlockCacheCapacity: DefaultBlockCacheSize, DisableSeeksCompaction: true, @@ -236,7 +236,7 @@ func New(file string, configBytes []byte, log logging.Logger, namespace string, closeCh: make(chan struct{}), } if parsedConfig.MetricUpdateFrequency > 0 { - metrics, err := newMetrics(namespace, reg) + metrics, err := newMetrics(reg) if err != nil { // Drop any close error to report the original error _ = db.Close() diff --git a/database/leveldb/db_test.go b/database/leveldb/db_test.go index 8352e53bd532..65214d080846 100644 --- a/database/leveldb/db_test.go +++ b/database/leveldb/db_test.go @@ -18,7 +18,7 @@ func TestInterface(t *testing.T) { for name, test := range database.Tests { t.Run(name, func(t *testing.T) { folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) test(t, db) @@ -30,7 +30,7 @@ func TestInterface(t *testing.T) { func newDB(t testing.TB) database.Database { folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) return db } diff --git a/database/leveldb/metrics.go b/database/leveldb/metrics.go index 004e1774264a..d1edab6f98e7 100644 --- a/database/leveldb/metrics.go +++ b/database/leveldb/metrics.go @@ -19,7 +19,7 @@ type metrics struct { writesDelayedCount prometheus.Counter // total amount of time (in ns) that writes that have been delayed due to // compaction - writesDelayedDuration prometheus.Counter + writesDelayedDuration prometheus.Gauge // set to 1 if there is currently at least one write that is being delayed // due to compaction writeIsDelayed prometheus.Gauge @@ -44,7 +44,7 @@ type metrics struct { // size of each level levelSize *prometheus.GaugeVec // amount of time spent compacting each level - levelDuration *prometheus.CounterVec + levelDuration *prometheus.GaugeVec // amount of bytes read while compacting each level levelReads *prometheus.CounterVec // amount of bytes written while compacting each level @@ -62,117 +62,99 @@ type metrics struct { priorStats, currentStats *leveldb.DBStats } -func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { +func newMetrics(reg prometheus.Registerer) (metrics, error) { m := metrics{ writesDelayedCount: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "writes_delayed", - Help: "number of cumulative writes that have been delayed due to compaction", + Name: "writes_delayed", + Help: "number of cumulative writes that have been delayed due to compaction", }), - writesDelayedDuration: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "writes_delayed_duration", - Help: "amount of time (in ns) that writes have been delayed due to compaction", + writesDelayedDuration: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "writes_delayed_duration", + Help: "amount of time (in ns) that writes have been delayed due to compaction", }), writeIsDelayed: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "write_delayed", - Help: "1 if there is currently a write that is being delayed due to compaction", + Name: "write_delayed", + Help: "1 if there is currently a write that is being delayed due to compaction", }), aliveSnapshots: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "alive_snapshots", - Help: "number of currently alive snapshots", + Name: "alive_snapshots", + Help: "number of currently alive snapshots", }), aliveIterators: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "alive_iterators", - Help: "number of currently alive iterators", + Name: "alive_iterators", + Help: "number of currently alive iterators", }), ioWrite: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "io_write", - Help: "cumulative amount of io write during compaction", + Name: "io_write", + Help: "cumulative amount of io write during compaction", }), ioRead: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "io_read", - Help: "cumulative amount of io read during compaction", + Name: "io_read", + Help: "cumulative amount of io read during compaction", }), blockCacheSize: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "block_cache_size", - Help: "total size of cached blocks", + Name: "block_cache_size", + Help: "total size of cached blocks", }), openTables: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "open_tables", - Help: "number of currently opened tables", + Name: "open_tables", + Help: "number of currently opened tables", }), levelTableCount: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "table_count", - Help: "number of tables allocated by level", + Name: "table_count", + Help: "number of tables allocated by level", }, levelLabels, ), levelSize: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "size", - Help: "amount of bytes allocated by level", + Name: "size", + Help: "amount of bytes allocated by level", }, levelLabels, ), - levelDuration: prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "duration", - Help: "amount of time (in ns) spent in compaction by level", + levelDuration: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "duration", + Help: "amount of time (in ns) spent in compaction by level", }, levelLabels, ), levelReads: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "reads", - Help: "amount of bytes read during compaction by level", + Name: "reads", + Help: "amount of bytes read during compaction by level", }, levelLabels, ), levelWrites: prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "writes", - Help: "amount of bytes written during compaction by level", + Name: "writes", + Help: "amount of bytes written during compaction by level", }, levelLabels, ), memCompactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "mem_comps", - Help: "total number of memory compactions performed", + Name: "mem_comps", + Help: "total number of memory compactions performed", }), level0Compactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "level_0_comps", - Help: "total number of level 0 compactions performed", + Name: "level_0_comps", + Help: "total number of level 0 compactions performed", }), nonLevel0Compactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "non_level_0_comps", - Help: "total number of non-level 0 compactions performed", + Name: "non_level_0_comps", + Help: "total number of non-level 0 compactions performed", }), seekCompactions: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "seek_comps", - Help: "total number of seek compactions performed", + Name: "seek_comps", + Help: "total number of seek compactions performed", }), priorStats: &leveldb.DBStats{}, diff --git a/database/linkeddb/codec.go b/database/linkeddb/codec.go index f1982e1c7cfd..63516a8480c7 100644 --- a/database/linkeddb/codec.go +++ b/database/linkeddb/codec.go @@ -5,7 +5,6 @@ package linkeddb import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -16,7 +15,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt32) if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { diff --git a/database/meterdb/db.go b/database/meterdb/db.go index fd3b3b77d7a8..af41746b32e4 100644 --- a/database/meterdb/db.go +++ b/database/meterdb/db.go @@ -5,88 +5,187 @@ package meterdb import ( "context" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils" ) +const methodLabel = "method" + var ( _ database.Database = (*Database)(nil) _ database.Batch = (*batch)(nil) _ database.Iterator = (*iterator)(nil) + + methodLabels = []string{methodLabel} + hasLabel = prometheus.Labels{ + methodLabel: "has", + } + getLabel = prometheus.Labels{ + methodLabel: "get", + } + putLabel = prometheus.Labels{ + methodLabel: "put", + } + deleteLabel = prometheus.Labels{ + methodLabel: "delete", + } + newBatchLabel = prometheus.Labels{ + methodLabel: "new_batch", + } + newIteratorLabel = prometheus.Labels{ + methodLabel: "new_iterator", + } + compactLabel = prometheus.Labels{ + methodLabel: "compact", + } + closeLabel = prometheus.Labels{ + methodLabel: "close", + } + healthCheckLabel = prometheus.Labels{ + methodLabel: "health_check", + } + batchPutLabel = prometheus.Labels{ + methodLabel: "batch_put", + } + batchDeleteLabel = prometheus.Labels{ + methodLabel: "batch_delete", + } + batchSizeLabel = prometheus.Labels{ + methodLabel: "batch_size", + } + batchWriteLabel = prometheus.Labels{ + methodLabel: "batch_write", + } + batchResetLabel = prometheus.Labels{ + methodLabel: "batch_reset", + } + batchReplayLabel = prometheus.Labels{ + methodLabel: "batch_replay", + } + batchInnerLabel = prometheus.Labels{ + methodLabel: "batch_inner", + } + iteratorNextLabel = prometheus.Labels{ + methodLabel: "iterator_next", + } + iteratorErrorLabel = prometheus.Labels{ + methodLabel: "iterator_error", + } + iteratorKeyLabel = prometheus.Labels{ + methodLabel: "iterator_key", + } + iteratorValueLabel = prometheus.Labels{ + methodLabel: "iterator_value", + } + iteratorReleaseLabel = prometheus.Labels{ + methodLabel: "iterator_release", + } ) // Database tracks the amount of time each operation takes and how many bytes // are read/written to the underlying database instance. type Database struct { - metrics - db database.Database - clock mockable.Clock + db database.Database + + calls *prometheus.CounterVec + duration *prometheus.GaugeVec + size *prometheus.CounterVec } // New returns a new database with added metrics func New( - namespace string, - registerer prometheus.Registerer, + reg prometheus.Registerer, db database.Database, ) (*Database, error) { - metrics, err := newMetrics(namespace, registerer) - return &Database{ - metrics: metrics, - db: db, - }, err + meterDB := &Database{ + db: db, + calls: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "calls", + Help: "number of calls to the database", + }, + methodLabels, + ), + duration: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "duration", + Help: "time spent in database calls (ns)", + }, + methodLabels, + ), + size: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "size", + Help: "size of data passed in database calls", + }, + methodLabels, + ), + } + return meterDB, utils.Err( + reg.Register(meterDB.calls), + reg.Register(meterDB.duration), + reg.Register(meterDB.size), + ) } func (db *Database) Has(key []byte) (bool, error) { - start := db.clock.Time() + start := time.Now() has, err := db.db.Has(key) - end := db.clock.Time() - db.readSize.Observe(float64(len(key))) - db.has.Observe(float64(end.Sub(start))) - db.hasSize.Observe(float64(len(key))) + duration := time.Since(start) + + db.calls.With(hasLabel).Inc() + db.duration.With(hasLabel).Add(float64(duration)) + db.size.With(hasLabel).Add(float64(len(key))) return has, err } func (db *Database) Get(key []byte) ([]byte, error) { - start := db.clock.Time() + start := time.Now() value, err := db.db.Get(key) - end := db.clock.Time() - db.readSize.Observe(float64(len(key) + len(value))) - db.get.Observe(float64(end.Sub(start))) - db.getSize.Observe(float64(len(key) + len(value))) + duration := time.Since(start) + + db.calls.With(getLabel).Inc() + db.duration.With(getLabel).Add(float64(duration)) + db.size.With(getLabel).Add(float64(len(key) + len(value))) return value, err } func (db *Database) Put(key, value []byte) error { - start := db.clock.Time() + start := time.Now() err := db.db.Put(key, value) - end := db.clock.Time() - db.writeSize.Observe(float64(len(key) + len(value))) - db.put.Observe(float64(end.Sub(start))) - db.putSize.Observe(float64(len(key) + len(value))) + duration := time.Since(start) + + db.calls.With(putLabel).Inc() + db.duration.With(putLabel).Add(float64(duration)) + db.size.With(putLabel).Add(float64(len(key) + len(value))) return err } func (db *Database) Delete(key []byte) error { - start := db.clock.Time() + start := time.Now() err := db.db.Delete(key) - end := db.clock.Time() - db.writeSize.Observe(float64(len(key))) - db.delete.Observe(float64(end.Sub(start))) - db.deleteSize.Observe(float64(len(key))) + duration := time.Since(start) + + db.calls.With(deleteLabel).Inc() + db.duration.With(deleteLabel).Add(float64(duration)) + db.size.With(deleteLabel).Add(float64(len(key))) return err } func (db *Database) NewBatch() database.Batch { - start := db.clock.Time() + start := time.Now() b := &batch{ batch: db.db.NewBatch(), db: db, } - end := db.clock.Time() - db.newBatch.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + db.calls.With(newBatchLabel).Inc() + db.duration.With(newBatchLabel).Add(float64(duration)) return b } @@ -106,37 +205,45 @@ func (db *Database) NewIteratorWithStartAndPrefix( start, prefix []byte, ) database.Iterator { - startTime := db.clock.Time() + startTime := time.Now() it := &iterator{ iterator: db.db.NewIteratorWithStartAndPrefix(start, prefix), db: db, } - end := db.clock.Time() - db.newIterator.Observe(float64(end.Sub(startTime))) + duration := time.Since(startTime) + + db.calls.With(newIteratorLabel).Inc() + db.duration.With(newIteratorLabel).Add(float64(duration)) return it } func (db *Database) Compact(start, limit []byte) error { - startTime := db.clock.Time() + startTime := time.Now() err := db.db.Compact(start, limit) - end := db.clock.Time() - db.compact.Observe(float64(end.Sub(startTime))) + duration := time.Since(startTime) + + db.calls.With(compactLabel).Inc() + db.duration.With(compactLabel).Add(float64(duration)) return err } func (db *Database) Close() error { - start := db.clock.Time() + start := time.Now() err := db.db.Close() - end := db.clock.Time() - db.close.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + db.calls.With(closeLabel).Inc() + db.duration.With(closeLabel).Add(float64(duration)) return err } func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { - start := db.clock.Time() + start := time.Now() result, err := db.db.HealthCheck(ctx) - end := db.clock.Time() - db.healthCheck.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + db.calls.With(healthCheckLabel).Inc() + db.duration.With(healthCheckLabel).Add(float64(duration)) return result, err } @@ -146,62 +253,75 @@ type batch struct { } func (b *batch) Put(key, value []byte) error { - start := b.db.clock.Time() + start := time.Now() err := b.batch.Put(key, value) - end := b.db.clock.Time() - b.db.bPut.Observe(float64(end.Sub(start))) - b.db.bPutSize.Observe(float64(len(key) + len(value))) + duration := time.Since(start) + + b.db.calls.With(batchPutLabel).Inc() + b.db.duration.With(batchPutLabel).Add(float64(duration)) + b.db.size.With(batchPutLabel).Add(float64(len(key) + len(value))) return err } func (b *batch) Delete(key []byte) error { - start := b.db.clock.Time() + start := time.Now() err := b.batch.Delete(key) - end := b.db.clock.Time() - b.db.bDelete.Observe(float64(end.Sub(start))) - b.db.bDeleteSize.Observe(float64(len(key))) + duration := time.Since(start) + + b.db.calls.With(batchDeleteLabel).Inc() + b.db.duration.With(batchDeleteLabel).Add(float64(duration)) + b.db.size.With(batchDeleteLabel).Add(float64(len(key))) return err } func (b *batch) Size() int { - start := b.db.clock.Time() + start := time.Now() size := b.batch.Size() - end := b.db.clock.Time() - b.db.bSize.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + b.db.calls.With(batchSizeLabel).Inc() + b.db.duration.With(batchSizeLabel).Add(float64(duration)) return size } func (b *batch) Write() error { - start := b.db.clock.Time() + start := time.Now() err := b.batch.Write() - end := b.db.clock.Time() - batchSize := float64(b.batch.Size()) - b.db.writeSize.Observe(batchSize) - b.db.bWrite.Observe(float64(end.Sub(start))) - b.db.bWriteSize.Observe(batchSize) + duration := time.Since(start) + size := b.batch.Size() + + b.db.calls.With(batchWriteLabel).Inc() + b.db.duration.With(batchWriteLabel).Add(float64(duration)) + b.db.size.With(batchWriteLabel).Add(float64(size)) return err } func (b *batch) Reset() { - start := b.db.clock.Time() + start := time.Now() b.batch.Reset() - end := b.db.clock.Time() - b.db.bReset.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + b.db.calls.With(batchResetLabel).Inc() + b.db.duration.With(batchResetLabel).Add(float64(duration)) } func (b *batch) Replay(w database.KeyValueWriterDeleter) error { - start := b.db.clock.Time() + start := time.Now() err := b.batch.Replay(w) - end := b.db.clock.Time() - b.db.bReplay.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + b.db.calls.With(batchReplayLabel).Inc() + b.db.duration.With(batchReplayLabel).Add(float64(duration)) return err } func (b *batch) Inner() database.Batch { - start := b.db.clock.Time() + start := time.Now() inner := b.batch.Inner() - end := b.db.clock.Time() - b.db.bInner.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + b.db.calls.With(batchInnerLabel).Inc() + b.db.duration.With(batchInnerLabel).Add(float64(duration)) return inner } @@ -211,43 +331,52 @@ type iterator struct { } func (it *iterator) Next() bool { - start := it.db.clock.Time() + start := time.Now() next := it.iterator.Next() - end := it.db.clock.Time() - it.db.iNext.Observe(float64(end.Sub(start))) - size := float64(len(it.iterator.Key()) + len(it.iterator.Value())) - it.db.readSize.Observe(size) - it.db.iNextSize.Observe(size) + duration := time.Since(start) + size := len(it.iterator.Key()) + len(it.iterator.Value()) + + it.db.calls.With(iteratorNextLabel).Inc() + it.db.duration.With(iteratorNextLabel).Add(float64(duration)) + it.db.size.With(iteratorNextLabel).Add(float64(size)) return next } func (it *iterator) Error() error { - start := it.db.clock.Time() + start := time.Now() err := it.iterator.Error() - end := it.db.clock.Time() - it.db.iError.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + it.db.calls.With(iteratorErrorLabel).Inc() + it.db.duration.With(iteratorErrorLabel).Add(float64(duration)) return err } func (it *iterator) Key() []byte { - start := it.db.clock.Time() + start := time.Now() key := it.iterator.Key() - end := it.db.clock.Time() - it.db.iKey.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + it.db.calls.With(iteratorKeyLabel).Inc() + it.db.duration.With(iteratorKeyLabel).Add(float64(duration)) return key } func (it *iterator) Value() []byte { - start := it.db.clock.Time() + start := time.Now() value := it.iterator.Value() - end := it.db.clock.Time() - it.db.iValue.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + it.db.calls.With(iteratorValueLabel).Inc() + it.db.duration.With(iteratorValueLabel).Add(float64(duration)) return value } func (it *iterator) Release() { - start := it.db.clock.Time() + start := time.Now() it.iterator.Release() - end := it.db.clock.Time() - it.db.iRelease.Observe(float64(end.Sub(start))) + duration := time.Since(start) + + it.db.calls.With(iteratorReleaseLabel).Inc() + it.db.duration.With(iteratorReleaseLabel).Add(float64(duration)) } diff --git a/database/meterdb/db_test.go b/database/meterdb/db_test.go index 48a8966b2772..57cedc181043 100644 --- a/database/meterdb/db_test.go +++ b/database/meterdb/db_test.go @@ -18,7 +18,7 @@ func TestInterface(t *testing.T) { for name, test := range database.Tests { t.Run(name, func(t *testing.T) { baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) + db, err := New(prometheus.NewRegistry(), baseDB) require.NoError(t, err) test(t, db) @@ -28,7 +28,7 @@ func TestInterface(t *testing.T) { func newDB(t testing.TB) database.Database { baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) + db, err := New(prometheus.NewRegistry(), baseDB) require.NoError(t, err) return db } diff --git a/database/meterdb/metrics.go b/database/meterdb/metrics.go deleted file mode 100644 index f311607cd467..000000000000 --- a/database/meterdb/metrics.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package meterdb - -import ( - "fmt" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -func newSizeMetric(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { - return metric.NewAveragerWithErrs( - namespace, - name+"_size", - fmt.Sprintf("bytes passed in a %s call", name), - reg, - errs, - ) -} - -func newTimeMetric(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { - return metric.NewAveragerWithErrs( - namespace, - name, - "time (in ns) of a "+name, - reg, - errs, - ) -} - -type metrics struct { - readSize, - writeSize, - has, hasSize, - get, getSize, - put, putSize, - delete, deleteSize, - newBatch, - newIterator, - compact, - close, - healthCheck, - bPut, bPutSize, - bDelete, bDeleteSize, - bSize, - bWrite, bWriteSize, - bReset, - bReplay, - bInner, - iNext, iNextSize, - iError, - iKey, - iValue, - iRelease metric.Averager -} - -func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { - errs := wrappers.Errs{} - return metrics{ - readSize: newSizeMetric(namespace, "read", reg, &errs), - writeSize: newSizeMetric(namespace, "write", reg, &errs), - has: newTimeMetric(namespace, "has", reg, &errs), - hasSize: newSizeMetric(namespace, "has", reg, &errs), - get: newTimeMetric(namespace, "get", reg, &errs), - getSize: newSizeMetric(namespace, "get", reg, &errs), - put: newTimeMetric(namespace, "put", reg, &errs), - putSize: newSizeMetric(namespace, "put", reg, &errs), - delete: newTimeMetric(namespace, "delete", reg, &errs), - deleteSize: newSizeMetric(namespace, "delete", reg, &errs), - newBatch: newTimeMetric(namespace, "new_batch", reg, &errs), - newIterator: newTimeMetric(namespace, "new_iterator", reg, &errs), - compact: newTimeMetric(namespace, "compact", reg, &errs), - close: newTimeMetric(namespace, "close", reg, &errs), - healthCheck: newTimeMetric(namespace, "health_check", reg, &errs), - bPut: newTimeMetric(namespace, "batch_put", reg, &errs), - bPutSize: newSizeMetric(namespace, "batch_put", reg, &errs), - bDelete: newTimeMetric(namespace, "batch_delete", reg, &errs), - bDeleteSize: newSizeMetric(namespace, "batch_delete", reg, &errs), - bSize: newTimeMetric(namespace, "batch_size", reg, &errs), - bWrite: newTimeMetric(namespace, "batch_write", reg, &errs), - bWriteSize: newSizeMetric(namespace, "batch_write", reg, &errs), - bReset: newTimeMetric(namespace, "batch_reset", reg, &errs), - bReplay: newTimeMetric(namespace, "batch_replay", reg, &errs), - bInner: newTimeMetric(namespace, "batch_inner", reg, &errs), - iNext: newTimeMetric(namespace, "iterator_next", reg, &errs), - iNextSize: newSizeMetric(namespace, "iterator_next", reg, &errs), - iError: newTimeMetric(namespace, "iterator_error", reg, &errs), - iKey: newTimeMetric(namespace, "iterator_key", reg, &errs), - iValue: newTimeMetric(namespace, "iterator_value", reg, &errs), - iRelease: newTimeMetric(namespace, "iterator_release", reg, &errs), - }, errs.Err -} diff --git a/database/pebble/batch.go b/database/pebbledb/batch.go similarity index 77% rename from database/pebble/batch.go rename to database/pebbledb/batch.go index a53b962dc7be..a14666749bc9 100644 --- a/database/pebble/batch.go +++ b/database/pebbledb/batch.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "fmt" @@ -56,26 +56,18 @@ func (b *batch) Write() error { return database.ErrClosed } - if !b.written { - // This batch has not been written to the database yet. - if err := updateError(b.batch.Commit(pebble.Sync)); err != nil { + if b.written { + // pebble doesn't support writing a batch twice so we have to clone the + // batch before writing it. + newBatch := b.db.pebbleDB.NewBatch() + if err := newBatch.Apply(b.batch, nil); err != nil { return err } - b.written = true - return nil + b.batch = newBatch } - // pebble doesn't support writing a batch twice so we have to clone - // [b] and commit the clone. - batchClone := b.db.pebbleDB.NewBatch() - - // Copy the batch. - if err := batchClone.Apply(b.batch, nil); err != nil { - return err - } - - // Commit the new batch. - return updateError(batchClone.Commit(pebble.Sync)) + b.written = true + return updateError(b.batch.Commit(pebble.Sync)) } func (b *batch) Reset() { diff --git a/database/pebble/batch_test.go b/database/pebbledb/batch_test.go similarity index 89% rename from database/pebble/batch_test.go rename to database/pebbledb/batch_test.go index 3d657a874fd3..98ab0e28eb35 100644 --- a/database/pebble/batch_test.go +++ b/database/pebbledb/batch_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "testing" @@ -17,7 +17,7 @@ func TestBatch(t *testing.T) { require := require.New(t) dirName := t.TempDir() - db, err := New(dirName, DefaultConfigBytes, logging.NoLog{}, "", prometheus.NewRegistry()) + db, err := New(dirName, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(err) batchIntf := db.NewBatch() diff --git a/database/pebble/db.go b/database/pebbledb/db.go similarity index 83% rename from database/pebble/db.go rename to database/pebbledb/db.go index 77259a217d87..1de9c2ce5de5 100644 --- a/database/pebble/db.go +++ b/database/pebbledb/db.go @@ -1,10 +1,9 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( - "bytes" "context" "encoding/json" "errors" @@ -22,11 +21,13 @@ import ( ) const ( - Name = "pebble" + Name = "pebbledb" // pebbleByteOverHead is the number of bytes of constant overhead that // should be added to a batch size per operation. pebbleByteOverHead = 8 + + defaultCacheSize = 512 * units.MiB ) var ( @@ -34,8 +35,7 @@ var ( errInvalidOperation = errors.New("invalid operation") - defaultCacheSize = 512 * units.MiB - DefaultConfig = Config{ + DefaultConfig = Config{ CacheSize: defaultCacheSize, BytesPerSync: 512 * units.KiB, WALBytesPerSync: 0, // Default to no background syncing. @@ -44,18 +44,8 @@ var ( MaxOpenFiles: 4096, MaxConcurrentCompactions: 1, } - - DefaultConfigBytes []byte ) -func init() { - var err error - DefaultConfigBytes, err = json.Marshal(DefaultConfig) - if err != nil { - panic(err) - } -} - type Database struct { lock sync.RWMutex pebbleDB *pebble.DB @@ -64,17 +54,17 @@ type Database struct { } type Config struct { - CacheSize int `json:"cacheSize"` - BytesPerSync int `json:"bytesPerSync"` - WALBytesPerSync int `json:"walBytesPerSync"` // 0 means no background syncing - MemTableStopWritesThreshold int `json:"memTableStopWritesThreshold"` - MemTableSize int `json:"memTableSize"` - MaxOpenFiles int `json:"maxOpenFiles"` - MaxConcurrentCompactions int `json:"maxConcurrentCompactions"` + CacheSize int64 `json:"cacheSize"` + BytesPerSync int `json:"bytesPerSync"` + WALBytesPerSync int `json:"walBytesPerSync"` // 0 means no background syncing + MemTableStopWritesThreshold int `json:"memTableStopWritesThreshold"` + MemTableSize uint64 `json:"memTableSize"` + MaxOpenFiles int `json:"maxOpenFiles"` + MaxConcurrentCompactions int `json:"maxConcurrentCompactions"` } // TODO: Add metrics -func New(file string, configBytes []byte, log logging.Logger, _ string, _ prometheus.Registerer) (database.Database, error) { +func New(file string, configBytes []byte, log logging.Logger, _ prometheus.Registerer) (database.Database, error) { cfg := DefaultConfig if len(configBytes) > 0 { if err := json.Unmarshal(configBytes, &cfg); err != nil { @@ -83,7 +73,7 @@ func New(file string, configBytes []byte, log logging.Logger, _ string, _ promet } opts := &pebble.Options{ - Cache: pebble.NewCache(int64(cfg.CacheSize)), + Cache: pebble.NewCache(cfg.CacheSize), BytesPerSync: cfg.BytesPerSync, Comparer: pebble.DefaultComparer, WALBytesPerSync: cfg.WALBytesPerSync, @@ -200,17 +190,21 @@ func (db *Database) Compact(start []byte, end []byte) error { } if end == nil { - // The database.Database spec treats a nil [limit] as a key after all keys - // but pebble treats a nil [limit] as a key before all keys in Compact. - // Use the greatest key in the database as the [limit] to get the desired behavior. - it := db.pebbleDB.NewIter(&pebble.IterOptions{}) + // The database.Database spec treats a nil [limit] as a key after all + // keys but pebble treats a nil [limit] as a key before all keys in + // Compact. Use the greatest key in the database as the [limit] to get + // the desired behavior. + it, err := db.pebbleDB.NewIter(&pebble.IterOptions{}) + if err != nil { + return updateError(err) + } if !it.Last() { // The database is empty. return it.Close() } - end = it.Key() + end = slices.Clone(it.Key()) if err := it.Close(); err != nil { return err } @@ -248,9 +242,18 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database } } + it, err := db.pebbleDB.NewIter(keyRange(start, prefix)) + if err != nil { + return &iter{ + db: db, + closed: true, + err: updateError(err), + } + } + iter := &iter{ db: db, - iter: db.pebbleDB.NewIter(keyRange(start, prefix)), + iter: it, } db.openIterators.Add(iter) return iter @@ -273,7 +276,7 @@ func keyRange(start, prefix []byte) *pebble.IterOptions { LowerBound: prefix, UpperBound: prefixToUpperBound(prefix), } - if bytes.Compare(start, prefix) == 1 { + if pebble.DefaultComparer.Compare(start, prefix) == 1 { opt.LowerBound = start } return opt diff --git a/database/pebble/db_test.go b/database/pebbledb/db_test.go similarity index 96% rename from database/pebble/db_test.go rename to database/pebbledb/db_test.go index ec6dd3e0fa2d..7d48a00c627a 100644 --- a/database/pebble/db_test.go +++ b/database/pebbledb/db_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "fmt" @@ -16,7 +16,7 @@ import ( func newDB(t testing.TB) *Database { folder := t.TempDir() - db, err := New(folder, DefaultConfigBytes, logging.NoLog{}, "pebble", prometheus.NewRegistry()) + db, err := New(folder, nil, logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) return db.(*Database) } diff --git a/database/pebble/iterator.go b/database/pebbledb/iterator.go similarity index 83% rename from database/pebble/iterator.go rename to database/pebbledb/iterator.go index ab7d8aad11a3..5e9786a318c8 100644 --- a/database/pebble/iterator.go +++ b/database/pebbledb/iterator.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package pebble +package pebbledb import ( "errors" @@ -17,7 +17,7 @@ import ( var ( _ database.Iterator = (*iter)(nil) - errCouldntGetValue = errors.New("couldnt get iterator value") + errCouldNotGetValue = errors.New("could not get iterator value") ) type iter struct { @@ -63,16 +63,16 @@ func (it *iter) Next() bool { return false } - it.nextKey = it.iter.Key() - - var err error - it.nextVal, err = it.iter.ValueAndErr() + key := it.iter.Key() + value, err := it.iter.ValueAndErr() if err != nil { it.hasNext = false - it.err = fmt.Errorf("%w: %w", errCouldntGetValue, err) + it.err = fmt.Errorf("%w: %w", errCouldNotGetValue, err) return false } + it.nextKey = key + it.nextVal = value return true } @@ -122,6 +122,11 @@ func (it *iter) release() { return } + // Cloning these values ensures that calling it.Key() or it.Value() after + // releasing the iterator will not segfault. + it.nextKey = slices.Clone(it.nextKey) + it.nextVal = slices.Clone(it.nextVal) + // Remove the iterator from the list of open iterators. it.db.openIterators.Remove(it) diff --git a/database/prefixdb/db.go b/database/prefixdb/db.go index 8698453683ff..b3082d9e986e 100644 --- a/database/prefixdb/db.go +++ b/database/prefixdb/db.go @@ -9,13 +9,10 @@ import ( "sync" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) -const ( - defaultBufCap = 256 -) - var ( _ database.Database = (*Database)(nil) _ database.Batch = (*batch)(nil) @@ -27,8 +24,9 @@ var ( type Database struct { // All keys in this db begin with this byte slice dbPrefix []byte - // Holds unused []byte - bufferPool sync.Pool + // Lexically one greater than dbPrefix, defining the end of this db's key range + dbLimit []byte + bufferPool *utils.BytesPool // lock needs to be held during Close to guarantee db will not be set to nil // concurrently with another operation. All other operations can hold RLock. @@ -40,14 +38,24 @@ type Database struct { func newDB(prefix []byte, db database.Database) *Database { return &Database{ - dbPrefix: prefix, - db: db, - bufferPool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufCap) - }, - }, + dbPrefix: prefix, + dbLimit: incrementByteSlice(prefix), + db: db, + bufferPool: utils.NewBytesPool(), + } +} + +func incrementByteSlice(orig []byte) []byte { + n := len(orig) + buf := make([]byte, n) + copy(buf, orig) + for i := n - 1; i >= 0; i-- { + buf[i]++ + if buf[i] != 0 { + break + } } + return buf } // New returns a new prefixed database @@ -91,9 +99,6 @@ func PrefixKey(prefix, key []byte) []byte { return prefixedKey } -// Assumes that it is OK for the argument to db.db.Has -// to be modified after db.db.Has returns -// [key] may be modified after this method returns. func (db *Database) Has(key []byte) (bool, error) { db.lock.RLock() defer db.lock.RUnlock() @@ -102,14 +107,11 @@ func (db *Database) Has(key []byte) (bool, error) { return false, database.ErrClosed } prefixedKey := db.prefix(key) - has, err := db.db.Has(prefixedKey) - db.bufferPool.Put(prefixedKey) - return has, err + defer db.bufferPool.Put(prefixedKey) + + return db.db.Has(*prefixedKey) } -// Assumes that it is OK for the argument to db.db.Get -// to be modified after db.db.Get returns. -// [key] may be modified after this method returns. func (db *Database) Get(key []byte) ([]byte, error) { db.lock.RLock() defer db.lock.RUnlock() @@ -118,15 +120,11 @@ func (db *Database) Get(key []byte) ([]byte, error) { return nil, database.ErrClosed } prefixedKey := db.prefix(key) - val, err := db.db.Get(prefixedKey) - db.bufferPool.Put(prefixedKey) - return val, err + defer db.bufferPool.Put(prefixedKey) + + return db.db.Get(*prefixedKey) } -// Assumes that it is OK for the argument to db.db.Put -// to be modified after db.db.Put returns. -// [key] can be modified after this method returns. -// [value] should not be modified. func (db *Database) Put(key, value []byte) error { db.lock.RLock() defer db.lock.RUnlock() @@ -135,14 +133,11 @@ func (db *Database) Put(key, value []byte) error { return database.ErrClosed } prefixedKey := db.prefix(key) - err := db.db.Put(prefixedKey, value) - db.bufferPool.Put(prefixedKey) - return err + defer db.bufferPool.Put(prefixedKey) + + return db.db.Put(*prefixedKey, value) } -// Assumes that it is OK for the argument to db.db.Delete -// to be modified after db.db.Delete returns. -// [key] may be modified after this method returns. func (db *Database) Delete(key []byte) error { db.lock.RLock() defer db.lock.RUnlock() @@ -151,9 +146,9 @@ func (db *Database) Delete(key []byte) error { return database.ErrClosed } prefixedKey := db.prefix(key) - err := db.db.Delete(prefixedKey) - db.bufferPool.Put(prefixedKey) - return err + defer db.bufferPool.Put(prefixedKey) + + return db.db.Delete(*prefixedKey) } func (db *Database) NewBatch() database.Batch { @@ -186,15 +181,17 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database Err: database.ErrClosed, } } + prefixedStart := db.prefix(start) + defer db.bufferPool.Put(prefixedStart) + prefixedPrefix := db.prefix(prefix) - it := &iterator{ - Iterator: db.db.NewIteratorWithStartAndPrefix(prefixedStart, prefixedPrefix), + defer db.bufferPool.Put(prefixedPrefix) + + return &iterator{ + Iterator: db.db.NewIteratorWithStartAndPrefix(*prefixedStart, *prefixedPrefix), db: db, } - db.bufferPool.Put(prefixedStart) - db.bufferPool.Put(prefixedPrefix) - return it } func (db *Database) Compact(start, limit []byte) error { @@ -204,7 +201,17 @@ func (db *Database) Compact(start, limit []byte) error { if db.closed { return database.ErrClosed } - return db.db.Compact(db.prefix(start), db.prefix(limit)) + + prefixedStart := db.prefix(start) + defer db.bufferPool.Put(prefixedStart) + + if limit == nil { + return db.db.Compact(*prefixedStart, db.dbLimit) + } + prefixedLimit := db.prefix(limit) + defer db.bufferPool.Put(prefixedLimit) + + return db.db.Compact(*prefixedStart, *prefixedLimit) } func (db *Database) Close() error { @@ -236,23 +243,12 @@ func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { } // Return a copy of [key], prepended with this db's prefix. -// The returned slice should be put back in the pool -// when it's done being used. -func (db *Database) prefix(key []byte) []byte { - // Get a []byte from the pool - prefixedKey := db.bufferPool.Get().([]byte) +// The returned slice should be put back in the pool when it's done being used. +func (db *Database) prefix(key []byte) *[]byte { keyLen := len(db.dbPrefix) + len(key) - if cap(prefixedKey) >= keyLen { - // The [] byte we got from the pool is big enough to hold the prefixed key - prefixedKey = prefixedKey[:keyLen] - } else { - // The []byte from the pool wasn't big enough. - // Put it back and allocate a new, bigger one - db.bufferPool.Put(prefixedKey) - prefixedKey = make([]byte, keyLen) - } - copy(prefixedKey, db.dbPrefix) - copy(prefixedKey[len(db.dbPrefix):], key) + prefixedKey := db.bufferPool.Get(keyLen) + copy(*prefixedKey, db.dbPrefix) + copy((*prefixedKey)[len(db.dbPrefix):], key) return prefixedKey } @@ -264,33 +260,32 @@ type batch struct { // Each key is prepended with the database's prefix. // Each byte slice underlying a key should be returned to the pool // when this batch is reset. - ops []database.BatchOp + ops []batchOp +} + +type batchOp struct { + Key *[]byte + Value []byte + Delete bool } -// Assumes that it is OK for the argument to b.Batch.Put -// to be modified after b.Batch.Put returns -// [key] may be modified after this method returns. -// [value] may be modified after this method returns. func (b *batch) Put(key, value []byte) error { prefixedKey := b.db.prefix(key) copiedValue := slices.Clone(value) - b.ops = append(b.ops, database.BatchOp{ + b.ops = append(b.ops, batchOp{ Key: prefixedKey, Value: copiedValue, }) - return b.Batch.Put(prefixedKey, copiedValue) + return b.Batch.Put(*prefixedKey, copiedValue) } -// Assumes that it is OK for the argument to b.Batch.Delete -// to be modified after b.Batch.Delete returns -// [key] may be modified after this method returns. func (b *batch) Delete(key []byte) error { prefixedKey := b.db.prefix(key) - b.ops = append(b.ops, database.BatchOp{ + b.ops = append(b.ops, batchOp{ Key: prefixedKey, Delete: true, }) - return b.Batch.Delete(prefixedKey) + return b.Batch.Delete(*prefixedKey) } // Write flushes any accumulated data to the memory database. @@ -316,19 +311,17 @@ func (b *batch) Reset() { // Clear b.writes if cap(b.ops) > len(b.ops)*database.MaxExcessCapacityFactor { - b.ops = make([]database.BatchOp, 0, cap(b.ops)/database.CapacityReductionFactor) + b.ops = make([]batchOp, 0, cap(b.ops)/database.CapacityReductionFactor) } else { b.ops = b.ops[:0] } b.Batch.Reset() } -// Replay replays the batch contents. -// Assumes it's safe to modify the key argument to w.Delete and w.Put -// after those methods return. +// Replay the batch contents. func (b *batch) Replay(w database.KeyValueWriterDeleter) error { for _, op := range b.ops { - keyWithoutPrefix := op.Key[len(b.db.dbPrefix):] + keyWithoutPrefix := (*op.Key)[len(b.db.dbPrefix):] if op.Delete { if err := w.Delete(keyWithoutPrefix); err != nil { return err diff --git a/database/prefixdb/db_test.go b/database/prefixdb/db_test.go index f928d2f635a4..82b801f22e8f 100644 --- a/database/prefixdb/db_test.go +++ b/database/prefixdb/db_test.go @@ -7,6 +7,8 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" ) @@ -25,6 +27,15 @@ func TestInterface(t *testing.T) { } } +func TestPrefixLimit(t *testing.T) { + testString := []string{"hello", "world", "a\xff", "\x01\xff\xff\xff\xff"} + expected := []string{"hellp", "worle", "b\x00", "\x02\x00\x00\x00\x00"} + for i, str := range testString { + db := newDB([]byte(str), nil) + require.Equal(t, db.dbLimit, []byte(expected[i])) + } +} + func FuzzKeyValue(f *testing.F) { database.FuzzKeyValue(f, New([]byte(""), memdb.New())) } diff --git a/database/test_database.go b/database/test_database.go index 99654282250b..e35a98dca36f 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -307,7 +307,7 @@ func TestBatchDelete(t *testing.T, db Database) { require.NoError(db.Delete(key)) } -// TestMemorySafetyDatabase ensures it is safe to modify a key after passing it +// TestMemorySafetyBatch ensures it is safe to modify a key after passing it // to Batch.Put. func TestMemorySafetyBatch(t *testing.T, db Database) { require := require.New(t) diff --git a/genesis/bootstrappers.go b/genesis/bootstrappers.go index 4f39279ebfdd..e8bf95bc2c05 100644 --- a/genesis/bootstrappers.go +++ b/genesis/bootstrappers.go @@ -6,12 +6,12 @@ package genesis import ( "encoding/json" "fmt" + "net/netip" _ "embed" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/sampler" ) @@ -31,8 +31,8 @@ func init() { // Represents the relationship between the nodeID and the nodeIP. // The bootstrapper is sometimes called "anchor" or "beacon" node. type Bootstrapper struct { - ID ids.NodeID `json:"id"` - IP ips.IPDesc `json:"ip"` + ID ids.NodeID `json:"id"` + IP netip.AddrPort `json:"ip"` } // GetBootstrappers returns all default bootstrappers for the provided network. diff --git a/genesis/checkpoints.go b/genesis/checkpoints.go new file mode 100644 index 000000000000..9d72b36b55d8 --- /dev/null +++ b/genesis/checkpoints.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "encoding/json" + "fmt" + + _ "embed" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + //go:embed checkpoints.json + checkpointsPerNetworkJSON []byte + + checkpointsPerNetwork map[string]map[ids.ID]set.Set[ids.ID] +) + +func init() { + if err := json.Unmarshal(checkpointsPerNetworkJSON, &checkpointsPerNetwork); err != nil { + panic(fmt.Sprintf("failed to decode checkpoints.json: %v", err)) + } +} + +// GetCheckpoints returns all known checkpoints for the chain on the requested +// network. +func GetCheckpoints(networkID uint32, chainID ids.ID) set.Set[ids.ID] { + networkName := constants.NetworkIDToNetworkName[networkID] + return checkpointsPerNetwork[networkName][chainID] +} diff --git a/genesis/checkpoints.json b/genesis/checkpoints.json new file mode 100644 index 000000000000..3481746c7a92 --- /dev/null +++ b/genesis/checkpoints.json @@ -0,0 +1,612 @@ +{ + "fuji": { + "11111111111111111111111111111111LpoYY": [ + "223opwBisPqPFJYMGbHvmvNaBb3EVjHqDbaCD2k4DWmBErychP", + "22QytoWHKDyqoSHzEg2jcnpy2K1ocGhfLVvV5eUL8M5WGQyDUo", + "22eMTpCKswsDmHzxC3rq1foxecqX27DveEJD1octSqzu8dLGSH", + "23xcXTT2TBGqos4qsNmfhH9UdvEo6SSgG3K8tHzZSHgqRpuqF1", + "26mZvbGt9PVowp86TZkX5y8AxX66qgcQg48f8m6PGaGyyiMRxt", + "27jmo6kXpEHD4QsPNiXDZvN2mauaMjQdPdfQ6yqjZZTgd5fopi", + "27sYpucSURKu6AbWSKNLz37kjhXVzdkAiWmWyxBef1MaLNXyKr", + "27uCd85pvFdjGsnP5H3WKei9W7DBJ1exsG8ySKzLZ24ffryds4", + "292ig1WtzAxvwfdC5RoqvMHVcyQk9jwJeKPUXmnJMNUBmubjTW", + "29oddG7fursmoQNLHANS6h2WNftczJivnx9srxPLhqyv8yUG89", + "2AAxNFWkm1TEYuCmZP9aaP4MAX4nKuRk6HBGwPAyBsjVSpDait", + "2ArnZkyEykgEYvPBUnDQoeT5hvp2djc5chhPMiBddC3MWTXume", + "2BKC5Rw5guhbtq5ZzPBPA6tB7p6KT3uyqJmL5HJpnJoMCTLJcc", + "2BtVqL41rwv1EpLHbLhGGH7qmbp8CS4iBVbchy4XyDjXpdw9VF", + "2C5hHCqoRLh8VrVVAAPJXXsMd6gTWF7y7T6Zw8cFTWhNBPF959", + "2EALbD88Aa3jG9tyJ1EeTuNrMQEripPgT45ENoWa6kCBey4fj1", + "2F3nmzZ4zJwhDHpXyCp7qHipiL5x3vZ4E7U5GbdMioML28eGYW", + "2FasL7vS1W7KYydt2SRexThpeRc2mj1mdwYQGFVJudXgtNxbK", + "2GGCrHkhqYFsmZzEHGXBKTT1MYicZMUkXENLsxwCJEJnbLds2X", + "2GvUnbuw1Tzat9ANiBe8VKTCqCKqRJhUYbU2ydMHzhacdVjs7d", + "2MyJWUpCj5sEEfocM2QK1yHikmsmUYj1ohy5qpuBTV6kBV7SLV", + "2N3FEhJwqa7yQj4yiEUQWmiUjPXrwbwUdhyjmTWG4RY16dstBG", + "2NdCscMk9ptgb6rNSNN2z9jbAVS8xrHMiXQ14nfs1ZtAebfmrJ", + "2NtjDcF7768wapKgGCP2QMtkviypLQ9PRbEQzVNGHUDKoAFe9r", + "2Q5ME4Qu9tXheNCbbvE9ARYTBwcyeSBsQQQAcvoyX9cfqCzVqi", + "2QGPPTEQn8iyDcQEyMrMH4NnFoeYgKCrfcYd3rBBkvu4YG2FwP", + "2Qya51sKi6kBH4AYAMQLcHpMT5uDsuVZgtounj7GqqqWVvHW8Y", + "2SqsMUwbdGp583RTpewTnVwn16kUQon7N1GpiHJAScE9sw9dwt", + "2T57a4UW8HPRagb6dBftpVEUWCBzwP2ZeLrjL3hJeusDTXB9no", + "2VzjdKN5CuPQbZnP1AmqTnk4zesvpLd1mnbS3Nga4T71Xx6nkH", + "2XSRDfkA8vyd4EEwntPbULegRsV4ZuNkREXnBpdoEw2Fma1KjG", + "2Y3oAtgb3KtxmCEcF4qm7piGFuRX17abzv8dEDGNXeVX9iaJwK", + "2Y61SGxffumcVCkUYw4ycGe4BXYtc94Xm68XPp4hSCYDFZTess", + "2ZbZUZWqvNNe2W2FtdKiXcUCPBrm5bmX3SiaanQREFAxg4ev3Y", + "2aYuGkvExCVfptTid4oqKAeQGYrKZsxwuaLot8tU48KjPQBJK8", + "2dbfhBpFjL5VM4uzbX6npNbWMKPxWF5DhunzEi1Hj5hxDp6hK4", + "2igtVfGcQsSiADeKuRLW39RRZLewkTwhCGRvfMSY7v7y4AD8h5", + "2ipN93J1esuG4e4AYc7ASZBUugNP3x4zNbKahSavkrxinigJu5", + "2j3RXzmGYkyjkqJSXBmZChEuq2d159XAq1dijjWsWRSYi7hmv8", + "2jtrFph9tduRQnrwrKx4qVmfPeZAWzPGTzW8FvRnzRVYhdTYHn", + "2kSoQFdwQiZ7cnH1mVV2pAYBh4M8BcKHHo4HuuVhs3XyRbmBYJ", + "2kXf6mnse3hpoY531Xdoo6fv2vNJdT8y7V4Ud9pjNzEpMREBzo", + "2m797oeWLxKj9UMYfmwZajEjG21PzEd5RHBsb14pmB7mWEbM1J", + "2mzCA32ZmnoCfSGVhqtdrKtXBg16edtoFQJAqqJvy3Be5r2m7N", + "2qnJE1DDHmk4K3R2LrvoJMiMhJh5PymmrtfWjwfVpyPHtUGqK7", + "2qxgRBRKWKNGaBQEyxc81MwyY822vL3EBh28xCVDY7jA2m2Tis", + "2r3JH3so16Sm2wR6DoEW9mztToDm4cotAJkQRxX23J8NrmTKUb", + "2rB1a6pzf9Bk7JaLBfDYUUJbznDCjHVbJdcNzxR2oCMzgqk5gy", + "2rXgjQJgtHsQX1ijwj1XvDULkqkFziDT7oqfYo2gxAe6HAtPd2", + "2tHb5NapTuTEzQNFearDkh2bFTNgk4Bv85vEuwVAxDaESRYGxH", + "2ubwFU7pYNw2EPNsVmb8mLAEp2xfUBnwAACjKVHuzeApEPaZVn", + "3v4Rs8kT4Hr7fr5p3K4Yc5GmvBDieZv96CQH1fEG1QShPLX7q", + "61EZVNqCY4Lu7kzWS7Y2rihXaiRX1jT1o3QZPwhrDFqyQ2ymZ", + "6kqdpwBMmo4YaJs79p2XbuGnH7KTwWSye8c2Sqjc8jfERYuGN", + "8x8PagWMhJDDaya6oTmxbSXC2cfubqkqcKeXRvXkEqPnHwTHn", + "9VEBvpL5jEHxA8bnicvTqkQyapdjrMdp4PQyANp8D3GLGSYxy", + "AFFTGZQmL1GWeucQ89GFNKvjXprQbEzUBthzwjeVmybKgWNbX", + "Ag8rmvXryhZfxGte4sQccqJ3ibKvmdLnYYxwxFZpsiiuadxW6", + "C2jR7wmNuAhqn4y5XAGMFTJJGoxtc6QhX9Td2Bmc2T32DxuSE", + "CPw6ptdo67PWvHhZi1axonjFZP9GyCcZnjSWaAcunU4FPVSdn", + "CeNMWvAE9yUsbAap7u6dquJ6NGrUbPfF6i2UWGRKTkneU29XJ", + "CyyG6WwyVLP8zj5jDCVyaXH11auzRaasikoWbyWRqWfE67YfM", + "DPeEFcS4MvCWEqj6s3MQyGRi8vjc3SKySD56piTZBeWjzRsu4", + "F5QoLN7bJWJsLv6RgG6JWfAe2XWj67wvN7crQnoAdHYiDjark", + "FkqGhwHZLWaAjTEWPpLwvSf9zNWnER4XhhuunqsjuTfNRWyZG", + "GDcvEKFfDbFpdRmUwCBmHdSF12SxxMjzL2PNK6EXGMVWYPPDp", + "GNd8AChiLADLnqTmypEhS86vre6QKYpcPKUKZUhFnyB8gftyF", + "Jjp1kBE4tPMmHqQq8XeopNMREWQa8RxfLtv41VhpUT6jxupCt", + "KKYswxetFUrEcDHgGv86R9TcAegkA454RSFVVFGnnGd4f45qJ", + "LQ71p9PjkYBVWTUz9GbKPaFxA69V5pv4KQkVWNChKbCpKghiN", + "LrxumguWFXGaXty4nPzSzkogUQ8WBUufC2JGQAQ7D7KqgupQd", + "P2PzegHfs769nr5nEz2fFhbnPEBpEWWr6cmD8cahvvkAL7SED", + "TmwsnFjxo1jFsBr5rPH8XHLm3AtimbF26xBaxdmw9SYcHGNax", + "V6oXi4fXSV5Q5tvGYquWfdaZBBzUWQDS5iepyzsUtXg1yB6NB", + "WWfS2sqozxfHD2z3AJ5vgkyNbyYD8nwkJJJEut4xgjstb6JKj", + "X6LFRvgSX4rP9A8Fswtus91fQbxhQm1GFYDgTBktiZxTDkj64", + "XUE6WNfCKmHTcaNEsZQ2KftvnXXfPTKnZi5YBw3pLWXjTYZzu", + "awbBnaBnjKBc79RzRPn4LXHMTxoYL8C97toXVvbKPZf81iUp1", + "e3zK4rU88osx7rB9Nq35uBXJrfShm7f3eqVk3T3CHCJD1SrGg", + "fDLU1xyDSihqWErnm62UeMXZ7HNe2atawSicdWamFGTQbLv5z", + "ffHAHY1qvDLcL3u48VHNEXtzqKgTvoi56vKDMcinoEHyy2FTQ", + "gT5RrrsG2bAephkx3bUT9VZGAGVoosGj9Yn6ZgVwbFVWJVv4x", + "hHQYrV9wu2S7hzkewJpotYymvjYpPb6RBeH1oUHPir1EZwyJc", + "iD2h4Qw88P4ce7rbKGJaxXwJLsPeRt4h3kcThDEXJ82HF84uz", + "jB3N7Rd1HCJeP9wLmrDy3NMFT9fkVJ1B5G5KBoFVVRQxc6fpQ", + "jGSYaQZQjieWrt9yq3yUWkW54RYYKfoHxHaRqo9fEBKZ9zAu8", + "mcAyTfg1i71Y7DXjTggfnWrfdjnSWqtg6uHrH3y51AHTi4Tjc", + "pSLCmmTDhnxHFiY3FX4fqCSii65EhWXaRPef2rTHjcp63nVYS", + "qHaV2XmsBRdXA1YDLn8nCYvRAfQue3CdZM3YBWYvvkcW2gMkQ", + "rghpCpaeyY6PVnWCQ9GnBDGtCsRLsdNS9z3tDCSTobsDCNUeM", + "rq7F6LXvXNioXQQyk7qVgMwoHWWAK2UGQrGyJJjbqYH1CYaVk", + "uLPzkqsPd78pLTUVuuXz257nfJmJcamQdAFjXYCoKDuM8FbTz", + "uUGfX6pZNS7jXJW4uF25xntf27AAZMT91NLRppMZhWw2x2aEu", + "uxDeD1vay1vbN24j7Rs5h4kV5q1oDYBmwj3iUvh2h5meCKacP", + "w9djCKT2Em3S6URCHXooEiDWwjeahf66sAL4tBeSa4ihJofLb", + "wZC4y3LB4SPbYZt1NbqrbTJtEo3iB1VyR1vTPm3UknSdsbA5y", + "wt7gkf58s9uiiapcC5DL3Y3iunfWnewQu2woS7FAQx5yDBL3B", + "yXKPp5iLD5mMZp9cV83yD9Yomm4o9nEPy11hU1aPFvRJhTqwa", + "zoe37ZDDigm6owSYNyEgRe6Dj7EiusjxrsnFzPpHLw7EzSDPP" + ], + "2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm": [ + "1GBo9WQ1SyRFZse5xhiMXQgptnzhepqnajzpdCvAJ9TNSJT5t", + "21xHKbCFFxmo5EdGCqR24Wvind1LR2hMWkHztqRxuu2ezRhE6b", + "23zbXVVjaix7VcFcU3jxJpGiW6TFxaEV29ZixDDtoGTkPF3eUC", + "25H3WwoqYUApdSF5eugBWEwqfpbmDtdeFLKFDwQqMD9aKHSBAD", + "25HTUEFnmPc193wW4LZUFjcyya3jy3Ws4psiWp7EU8X6WhAHn", + "26NADDbcCqF2xx5yaKTA28nQYcPCs1gcda7WwH3Q7sznPXZ71u", + "28SHVtdhn9MvJzMuzzGC8odjvBNcYvyAbtV6qBBricfP65NjJU", + "28co43WdBgJP6pi9CnqkspSvqeujm8oTK4GoGroi5ZZZqPnSQF", + "294ofGLtC3h62jc8aYpbnnsKJ1ufAEmp19rViEC8senqk8iKWS", + "2FGfG9MqLzwq9jjHAfx1QGtss5xKH6p4cZr2KQkrjE1Tin9B1X", + "2Gmqc72MXrXT65hxkDjuGS7cAJmwY3habQcr5HmXLDAFzyVJMJ", + "2HCLX89frJuqbauSLFreHFQ1t6yJSPb213hn8gpDUJW4ABsr8q", + "2HgkU9sT1oN6pUNt7CGonaTrcgQJtTpY1yHMjj4BYoqfECcJT7", + "2KGfWNfFrcc2qYw4s9oMiS29buB52h3UPoz5WDarhs4Kcey6H1", + "2L7ZfaHezUSQKoKNotq481hAsqafuCMX6DjZjvaktRjRfprjw6", + "2MctCP1iN6hskg6iY2EeGJRKJWbficY35UgoUMRd5PoaE9aagz", + "2PDa7wqNYoAHr3oB89weNFZujVdcR8fpjMPmr6MbUsWDsFQ64p", + "2PKwDA8c8BpibsyHim7n152C48viUt4wtccoWy5qiLiKvZHUVR", + "2RydwjCt1qipkTUXtjdxdQzwVCDjPLE6eNVzSxzUHhjBHko6vR", + "2SssQkgDtWc7dTnY4DSw56ZrCXBvL8uLZjj16hS5C3piNqactA", + "2UFZQKZ1gLuzqeEqX2sHLtZNL3Bxmv1NQL7GdqS6ZvSPMjkftT", + "2V3QXva1iArd88hVB7JsEK8tHn12a5qgifcPBkhpZGWc7fRRis", + "2VvgwD21RGHpEJeUTxhEVyuTdMDQN2Di8L37xPtmirYW4r22d", + "2W6MYE4TJZV43hjj5cMBHG1rEeStNQLXE25VMydkecAorZNPRF", + "2WRmUjiKn4xtGyw33s9qtyhzLvYPBByRoP9vWr4H5Hy5WKYr5J", + "2ZX42ozafD1DaWAKK5Nfsh9FUEKiukFKYfSFtwpwBxU5VLpBHV", + "2ZfNzhvVrgMLy5jX3ztgZXxyfvvt5ng9fjqqmkEPBq1Ds39yCp", + "2b9au7DpArhJPXDJCSFR2cMczAacLT9Xi2umZG9MYXLprkm4iU", + "2bKoipmXErGTPCZedKDwPFKR8v6X7yGmtgZr5r3esN9cRZNxud", + "2bLQd8omCjWuUik2LLPKaRo2AQ78BNjTvYbDsANhFgGjLBcEeW", + "2d9azyeuYybkt25YBwSSVukSdWko2EB8Q1ii2TdvjuNoRwigJj", + "2dUUsFXPnytJjnPmX1pQufNUaBh96MFQuPZ92vMa3XcdGCg6pg", + "2fw1AN9jJHGTEuvNBxqbtejWukHRfxNJSqfVpsFYXJY4Vbw2c3", + "2gbWojYtNF8YZmpJUuq17vxRMDreTTHoD2dUXspDN2rtucHmT1", + "2hLra7rDjb77NNF3U9LHMAZ9MqnJzkAY5xEG8ebMjs2DGR1KQD", + "2hNwDNBFeaHZ93VicpE74a5PuLDbQYb71WHQUVdrHyy5ExXPLv", + "2pCJ9gCnbCFC8e1xQNYgu63XQzCLqp9uupk7ePgDX3vWvwoPGb", + "2pWxqGSc8vPJQheXoJNaRRSA2NZKQ6wSdCvdhWXjuKS4VPdDCX", + "2qDzuN2ooKLnEYc5ULhNMEKFTcWu5cSKyipCeJuVjwfu7mVKvy", + "2qTfSXsWBbmuai5iJFhUE4AdpPbwDNR6QWUgjsBPGKdc3R53xw", + "2sE3rjLpaNnu8HAa49hTjYJ3y5dDQwz8TzPphVcJkcihwPqi8q", + "2sTSDCaRduH2oAtCA6cihuDKRbF8b9N3r8bkLYur8dHiYStkQj", + "2tFh16Z6eTzzzUQYDMtMcW7vCYVnZJgNFmvcXKdKueqJeHduLo", + "2vf1B4QJwuhGNUVHitAggar9iZ6GMP6fvkNBBNFxmrwn78Xxqe", + "2wfMYUvLzxtHK5b8PE938LrMLpzL7pMkoFfVZ2P1wb65FVSMDt", + "3oxSU7pEZzr7MMSBLeWprp1e851JtwHT4YCvEzs4vzamAVCfj", + "3srNc7xjafT7pkXDawHisjRyEhdedyEaxpCVdeeSF2eFFFsp5", + "53CPVeV8Jr6amsGVWiB9SVNZVN9vFk7i6omyCrNZxer5mfWqZ", + "6G2qUWZCW2MvgxpWrnR1azuTEu5Gnv6C66eafJsEXbQbV2yRr", + "7qxMYH7p8QsHw5u6pE381gDFESwBHrzbYXK6BhQ9kMp9dmKe5", + "8khD5WmMbwDadv17WNPhsd6Cxu2XLun4n9Pc88Apmd1nNYK7S", + "988Sk4hRBN6V4GQk2rMzKhTXQ8xPy2p7bQBXyiFHknHSLeSdC", + "CCgthPmrL6fsPRtVn5DJAkGs3wnYy8HUSNmJ4PCgDw2beZAKb", + "DKYASkaDm9HyVskLiw9foxUf2exWhWqzgBmnwx5yncX1jDJW4", + "DYdaBt3C2QS1qDoktpFsUQCb43F9a3RzNPefpCQ2csbLjr224", + "DsBjgvFW4AA3GkRav9VCDSnAFYZxvT6CEYAmJvgKaXHkpFNYg", + "DvkrWrKpM86FggT6Yzu4wM63Yw8MPWrNGBa4tujPm9NSR94vf", + "FWXEYzxYxoEQojLLsw9GhFrHA7NxYfrsiFHozt2xV419ih98P", + "FjL4WreWPpTFxCDa3mp2KSxuTDin2yeUfNDiHtMRhnZ7uXWaf", + "GFVDQyM6LiDbYnLQBjYUZQixk2fDLcEf1zSDb7KFLbTwJJfik", + "GaFX7JQGTVQgKuGHZL6fcRqHiKBioCJpYDQ7w4cijaUMQXnut", + "HEEvCX7Af3TPBAJ9KLji1fVfjHKH2ZJwVsV1BnzVKoSR8Xs7C", + "HNgxTBLk1Wa2mTNeYkYYptQLANDTyT5mZHpMdaBjb2axziyAp", + "HqtaYoyEE92oMZaFzdh71Y6zNBs6T8FR6zHJUd3c7cyuhJVGT", + "L82zC58X8yvm8JgfyxtSBUU2GRUAdVbYhWYKbYu7B46ctgNBg", + "M1necnfahQee39ogPGbv7kkokYnUsM6P8npqegLV6GqiAXF1h", + "MEj3pMhy6aNowXkzgTDsnHoJf7E3zKf5sJi5vuxqf4AsdgTjs", + "NbJg4zwzKdc8S77BgoHyYknMrSXzdtWgj98tnPekbQ2z2NTis", + "PD5ZjG1cwFHDjyeWSWYoAhLkDfFmc7ucEmQGZkLPfygXLc4XG", + "QwUfCXBeTiPL3Rv51CxT6BhDw1GkDbQovnQAWzp7sT5FH5a9J", + "Stp9k2uteisQPNwUG9A7RQmW6yHaFz95WbLDRWu6QukoDTpD6", + "X23yYJTmGHgMdAYc2joMAzPc8U6d3crwrTemDNRyWYvTfuUT1", + "XETk5x8XPHkX9tAuJa5LvkAc5BXCzCUeMtwc74gfxauH55mkV", + "XUR1jFfmi3G3CK8e6ckwDis3WJMXGowJH9KRTAs3HaNuCFemR", + "ZhxYDBMMLeDwcbzjmM1uybVcx6Ea5pW6gZ75FMxG2p1gAypPk", + "a7d5UaCCbdW86qszpNc6WSow2a6RbNMALTLZwkjddk6WXfWEC", + "a7yqQ6WwRiJkyxmcFjQ7P6c6HD9ggMzw5RhxCkciBpoeEtwKQ", + "aBARTG3jtQcqY4iBu6ysCYfgv5qxNZ7tRgyc4vfKr42kbtzDo", + "bV2La8TYp38PZZVHLcdf62dBxyshP5QdmxoM6AXwWwck6Bj5w", + "ezDVeSFwtsKVj2TKJgnfFk2Ron7XBAnSHkXQaCxg26kyFKKNY", + "fb2EBwXSKdfVMSANF4suB2gYgPZTTFHEgK31Be7Yduh8aUPwo", + "fr1W1wLpVup55hsC7wap8Ry2TThMxbQYFmV1s8h7RDMTm6m2B", + "iDTh19j3bYnSSkQ2zhpKYPFuep8hHZoRo6pKkWDotQU1CJzJF", + "iZpg4RdbgzWHJLD8fx9SvCTBJMcFRzBbsobSZj6YAYz671gs6", + "ifoWGTKercg2y3gdugrmzh3hZ4NyDm6xq2yGAXGngRLVRHU7S", + "iuaz5BC3icH5f8y6koA9DUpnA34hvUDB1UALnBYednBeztpqC", + "jhzhmCBQRwZZrw7pTt1S9Tx9FdJ9VjHq39z9yCadSk31ab9Hr", + "mbHxp14KyQwjvyaKjP2jErwK3Za6iwXvi5N49dRyzUhgTi8r6", + "mobi3yL3zSq9WwC764pihrR4ZUZyLfD4LTS7C5uosXdC7sQaH", + "nCA3JEEwcSfdwV9aCoG5VApSLHmBqaCBKSXT64KSuDSEix88n", + "odgEc4y2JaqMeoXnRoh78EL6jnQJjdhfeVFabmk8BKqeYhbvu", + "pUgAh6Qik6Q1zgK6iHRkPnW3KHQ4htPJVUQK4SNRFvN3bwvaT", + "pauQnSkSBdq1we7935UxwyPBaVE3ygnLeKk9AMZvy2nFPgtde", + "qTg4ng5mihJsLnuxedfn8pXggCoDEVZ8K4zKbebbCmnLYeST1", + "qx3w3KKVhVPD9hJsDxaVRoy9k8z1iWnQNXxM193T4zwJvJnY2", + "siTgYyVSBf4rFpKzZrW2KRKteSZRjvaPz3qudqQiZfCKFfQqs", + "xfjYyMgV6RU882WBsWoK17D1TmAbUDgsgyVqmmtPVpU8mr1iw", + "ybukLCCdJvShHUs4b5AzLK4J7tJ2R6teio7d2ty5uJcEd65a9", + "zL9t4UtcRCPa2LikAW2Rejdohe1B5zhuUZfaEnk7rpoxmuoxV" + ], + "yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp": [ + "21JvTSoPZPqoqrWobnUorujLTmYXsUWNUv5rBgBseVWPdXiGDa", + "21U1M7PKTzUVgZuQExDPPPiSW9bA3TiNs3D1aJjreqGWPcx2yN", + "24ixT1xYtAUCyVsfW8TbCMBzgtohjnM6YVxy9cScFc1uUrMpdz", + "25UMxnQNE2hbxr2Xy6oyZ2Q3B7cmD87RFhpj3CSAJaeXfyCtnt", + "26LqvB7kgRmoaLwLWaPhwcCCvrihGrmrGk66A1PECikaD3K3Bk", + "26X99fmDv36gVAD3tFQBTUiGBDQ36fB9usfnqPA2MWRVXiZ3eM", + "29FLDEM4v7V8d4xtyZd55pWvLSSTUFpcqcctyQwFFcvLyhA8Yk", + "29XVtjg3s5iZBYAYNoB392FLv6SCW7bdCC9nW723rE9mEve9xi", + "2ATHMbVCerezgWHK5DCSv47sKTL1CpyZcJAavuyoNJQrnjrGbn", + "2DFMZrvfLAQg17dKVVSXMyqT7cXLwSmbTUuM2rVRJHuVKhGRqD", + "2GPn3DL4GjCq75QfBWVsMmJte2BDARTrqvPX4rvDZasQMkuWt6", + "2GXt7XVXurpZWFkQK8eJ8ShJgUwxxr8ZHJU2DK7Gvm9nkkQDUV", + "2MJJKZrbnvxMrDTEHdrmQyVNdaeyy6o8R4n3UsGVUkEtBViSSu", + "2MtCLFzvzNT42WFCW2UooFuxFxdaBqKGVAh3EGr4vHHNnv2Cfi", + "2Prd2E1oQ61Bnx6rQMPvLJXW3DLApiHdH7D5m1j7ohwmHUuzwU", + "2QB2q7TiiFjLUgda1A9bBLBQGgjNgkyLdyHDvBe1bGZsmAVgeX", + "2S6N9zzrmACZymMBadkoseAxj14EKkgY3nABYDegRJiqZQfm7Q", + "2ShjekPdvVJSCPCXVascxcoX3JJ2vn8Utqar9XkBx2LNwLWsWW", + "2TD4e2fq5bvAsJ87k5sbsu7BMGnbA1ffYRxcfdtfHYaYH4i14s", + "2THb6vcU4jNg2EnJ3Q2y6a3UKEoXjp8eSykiPaYfLVj8ZtaHmp", + "2TfNb95FxPuEbZYjBDcQ54cMv5RDS7XiYcAZUUuDqrBkVQbN3w", + "2UYC4hjcgk1UiQwGrCeXX7dSoQe3ep3Er6BHgLKqMKD6e5nyoM", + "2V6r1qcMQFBLB6YcF5xihgFKqwREUGjwin2wBokTx35T1zw5Kx", + "2VnJqxnuW1AydCb8BZq7wc4hduVrhwkuiY48pdwfZ4iUTHqTSS", + "2WaZXPPa7f4vvuqc3Cx5EFjXJSGb4p4cmnu3HBhQZTuBykEZ9v", + "2Z77eDdjw5gLyi15bFxMeV4ur3pZ9Jj6Yg3iVq724iRxrMkN9z", + "2ZhTJ2PBZWmoahQRDCh665bFXpXQ4r9LKGpH5qJwLDU6moYtE6", + "2a4vLzJy2BJEA7EnTyppWWhjBSwmAzF9acgyVfuoXdbi57de9W", + "2aRPLJ3Qc21zTKB6rfzQigeLoPootswtKseS5nA8mbppCS8AZf", + "2bwhz3TBxpvUpsCYjgfLr8EhVSox8N87EjCjU44c95kmM1YTKg", + "2ddNjV8voVqYbhUHJqRZJEq8YVChWFGyexzSvXzoxtsDJziUhn", + "2g6McaqCUaz8qqCKwwL5kqYpuejTKcoGUmBQm3LSZxHunJd4BE", + "2hDZAR56aJCR1M5BTGpoMPB8cqp4SAF36FevBNvoG7iUtmrNxg", + "2hTq3Zz5J81egcdTgCkxDZP2Dkb43SDG78byiL5TAVa6hrYVhj", + "2iWvFy8kCQpYFaiAt9hEos9XYsLASMXdfuESmAQLB7UAVbRfN7", + "2j3kJruoJ4fiLGwS14t3X51jPfi3zK7J7Cmx6wfKQTwdvjmeW1", + "2jJ3eCS7QBy3h62zfFSFMhBEMtKH4ygwE3rJ42nZkdsc44TanY", + "2kSTivAkKgr59AbEiqt9mYq22GMcudguiLSkutHheub82Vhn79", + "2kTeTwm9WVLGgrS3iorTtnK6tKosodzZnG6m1kBwJeaUgtxMdR", + "2q2dqAZ7onwU1uHTBs1CN54QSU32xzPb1ayHdHT1GVcZ5NfmqH", + "2qnaz9dWFgkKSmQ5uoKpDqC5Qxz4P3u67YPd2f2qXGu7WGtLEC", + "2rJ9bGk2wQtzx5fQuj9oX7kEUbSxdHmpXTLgfYyNqGa9ymt12Q", + "2sC1AGd6DvCQUpThn1bXM5bAB8zpgVTfJm9XtHPaqModdcFrzX", + "2sFhs8sYewidvBEaGKvPKFQj68qZ2cJsf1hHvhmUdeaqryiaSo", + "2sUcqBr1wYUBdRv1vmLVcW4NT6c5EnTYDFEz2UzXPnXSrXmJX", + "2t3Z23G2orACLFMhKsPoBGup1X52fbMmjsiH3fenmF7VxJJrqx", + "2unuQJLD19dap3h3WwCMuhYmNrWXhAAMnbzxKDf7GsWyxwPuM5", + "2uvxKKb2zhjjWTGLWouyNJW3zhu66XJiTxjeusN1VuP96gS4Bd", + "2vNaxXF62P3QhRn5EDnpuwHR7BsXs8iVDwnbypjK3KMVBXNoxr", + "2vuhJYm9CUdvbDkGB2H9tyHX3GRR7dn8TcEUbcdBvYencdg2Fw", + "2w8zFCZSs4xw5amonFdynnTKrGR2d7p88YZwaSonxtqMo2HkHg", + "2wUpYxpygcnCAuh3bt4rNfgq8Apz5TebLRADmRJBgoQgJFZAw2", + "3Gswg4voZGNRc9GMRLSk8kt6C3ZcxTqkRXJvPj1DMwNrjrAVv", + "3ZQmVFXx8yG1Pu7oVGUZend251zpjFMNrnDAHrXDYJH34uxpJ", + "3nbPns1MGu9oU625GrNXfhUq6N5on8Ywxt6drcBFdxQFbafar", + "4jnyBE8LLB9eEFGaDbCvQ2L9f5oXCK6BXW6Wts4crPe7LpTVA", + "6YVQC3zCTtBF9HJdgjQaU9USXnAUWizBPXp1Dg7tJpqe1hRCJ", + "6vCbBtofJL4MPK5Axvy7cTDgupqKcnN9D5uabtGvDo7wG2ey8", + "7KSG1AqANFavZ7mmfYAiMJ4k4xuSeUEcNZPEKw4uQm7HbHLBQ", + "8szbitEiP9x8sMqSq9nG232vCUt6SJkY44kwZ6kVfeyiBdeQy", + "9mKsC3XBsdqaBZ5ZcyyUhvfHNQQdNdxzTMPD5XoeZqqJV2QGb", + "BjFDePq9vY6bA35jBhV5WzaD7rCD3SEebrTQUygVpnNw8Ufda", + "EW551Lo3aDRBVgcz8qbGpiUjXZ6kAqtwMhtqnhfJG2Adp9opW", + "FqDib8XetbBKSLnr8rQZAzLfeiEqBb98dXwErgPwedphY7sno", + "FugpHTBdfWeYCQBQz6KuZeBjRAS2VBe4m5HVaK8o3oeZcmQKb", + "GUUR3s8Wvgjz9YMoj6Q261PNHLRKYgFCH8q3ihcGyWh4A8SK5", + "JZqdS91PEVohzTFymZGHdcCfRG93oLjc8a5PH9iJ2Gp7TjPYz", + "JbazoyEJPKjEHprDNBYsfpk23LRrCVAwpESoWVYZ5zAnRqz54", + "Lq4ojZLNYFXZ9Wau2yVwiBVBptqT1bmEZUGKCAGCQjdRgB9Pg", + "M2SSXGemukiiXBi45L233AbYjUxcXuu962VXMjwECi2ez4UbS", + "MYpMva6MUX2vZUKwiQ36i2q5Fhh3U8au2unMJ7fq9HxKcwLPq", + "NXHYS3sy32Bf2KoTUZzb6H4tu6EqnQafYp28ocd77WHh4vFZS", + "Nhs19efmaFctiS14S63YXM4Bub5DwPCLBemDXXs4mTjkeWGMw", + "RRjQBW6bk3XvADmJbzSxryU3vmcus5MAEXy1x15ZDR5psRJZD", + "Ui44APYUHu4TBdc2qp3mif87NmVbwhwsMbH63rW6cALgkCF7n", + "VUUxGtSRdz2n1QzMpLNUs5bBM55t5T94T5RfwyA1PAY8gqk1e", + "W33o9U9nHrf9hjPBfUxxRGxxkJ8oegyYFusE2347nKqwiXwg3", + "YoC76Q2LSxph3zRwEVp8dtayt1x2cHvzxuRihTKgrQzv3kckm", + "aE8bKYQ3nmeRbFWgDxwEJE19m9yvSoswHcu9x23uh1LtS2by1", + "b5x6iFkBDuP4uctnR53uGoFk8HK3w8cvxX2UzcSPYBsJaproQ", + "bSeBDhAPUG8FwJgJEDRVgHqWbcba6Wk2KgHxTDwpdUcTd9t2p", + "dkTyTfRSLJpbj14MH5Nru2GpjyW9d1HR8AWTkDFWyAZhVpNx9", + "hw1KLhzE6Qjsroi3EDm52hueAwgMk5Nz611VrX3MwX9bqa4aQ", + "jMwZAAGm6CxbmPhRz25vXnbpyDBN5D2zJBGLKkDAPFRMA4oZM", + "jSjNuDpRdStvdE1FAgLG6UcgiB7MMT2uQjUvY6p7DmjYSYnqY", + "k1CRCGyW5DwSATvUFmwjdufAVYM7ggENgLunPS6NPXV4dWAYW", + "kew14pgXuZVT2E2ZkE81LECKyD87Cs5umwmmaHnAsxjhsCohu", + "mmQbLnboofBqSwgbhPHo5PhXGjSXH4276BECEteDGJtgTxBkL", + "nf5UKNozY7qvt6HGmDFJQEoNoLF8be1SHLoHQt1yxQihm2AP3", + "oHoyVMzizDpf27HjaK5oRgpPUSQDeE2y1uFd5K8JvSoyZnTFQ", + "ouRzG5HtwcvbF89V84R2HojvA7i9c216hiFhxGiZjaJuMLsQw", + "pvQik9xSAbTHVDB1oT1JuwzKHpcU86k64pBWp29eS5RwHDKAx", + "qstPcyGzSMDEN9qdoKQ8zLZLtdua5HetNqVGv9Pxt3MaAMt45", + "sir3USCoRgGLgRviad9ghYZEa5f78WXjn8gJdXCpkw3iBZyHj", + "tKaXsKyhJeyhqXqeBrrot2y7DeSzAPhxR3JYECuGD6FX8BRtW", + "tV7mPELhdGwmvTDzRfX6nKZQ6vdw7MGkndybx3HMHmzmPXSX8", + "tjjaF5todmrADuDu6pbCUTgF5pRPH4HWyLBMfoxu5nj5zVac7", + "uQsjrStSxqJD4vNRGbiD4quyrU2kxZFYgeCLGXRm4cHr8xKdT", + "wCdMuaDh4iLn1oxrrm9otdQzToSvSHKbqhw4R8NgYZfozJ6Pz" + ] + }, + "mainnet": { + "11111111111111111111111111111111LpoYY": [ + "21dW4Z8zuKUpVskZSwFZbNAF1SPTUUBKhipZYxDWhAs7YtKzQg", + "21pB8ejhmvfmLWPFEicVMHfbYufv6gL5iqvYVeHZFh6ZMk2vtm", + "22yzBbX19yE52VBQgkYpSHZcjCpMP2qg253VnpWiQHeJ4wXW2w", + "23GTgAvaBxN9zhBfwymkEcNcNM4k7xjbf9Hfq5QjjZh2A8WiRa", + "24h1gSWZqD8wYAb7j3SEjJjhdmTXvYAQiPSHt3Eg4mdroprZu6", + "25fSTefQEkpqvrd8xLDiyqUBqLYWJPeksDJP5fJKCUmprg5bF4", + "25xdggMGJh5N8VwLP3ZPqx6Cscwt6oPuX4YM4HNUcvcniimCY9", + "28uHUK3WcLJ4ridusCd14ZsQy2cTDSBVsVNh53ymfhhVrGFmHS", + "2Abe67z1SJxcRfD4Y8EWz99CZQ6oNiayTBotPa4yHmgE9SsX5H", + "2BJ4izHDThAg3T9rSTzZinYYFJwNCmUFTcV9TPv88yvDPbLBBk", + "2BLBzaR5pGFwa4bA8yUMrqm4zDe5LaCTHsepSJ9EVBWVdU8xnT", + "2Bd5ChA8c1x3tHTEdnqM75CQ9sdMYeY9Ccd3Lky4T9CotrK6eH", + "2BpMKjdPVyQupJjjG79otwCqUfQM5cXMcVTpz2UxGkBLTpKzq1", + "2CDKHCkEaS4CZx7UJvQVsZXtsndBaaKuq8Y5wTcoBNp2TTKupx", + "2CfuXwhMXeq3Xo8RSzgP6yZax9oFps7BvFizG3S3c9uLhWVaJS", + "2E3b8DACP9rp6zWSVFkxcmXxYDWRdMHkjcQMz9RmX5XGYvcDB5", + "2H23j5d44EVxwDgDw1XDBQ21Yt8jGi5Bk9kiSi1N7bVNe2nBKW", + "2LXi2TagZMbS2VYZoB1HhV24eGpFN26WtNPS6mUGsDFPYy9PhP", + "2MshV3qy8WmXSUEFjnAiZXPNDpzZb9RR1DZMY91uUDcYmpsLrV", + "2NWffsfUpmW2EnJTYLAw5c3DBGjVds9z7x82SP4VKFB25RCpJA", + "2QZ7pat7WqMMCseqheSgZBAwGBd6ySsznEVLSLJJh4f43tDwn7", + "2QfzZ5W4DAgeYmvkqB9U2EEavzGJA549XqrDtAHTKsUyY1aKi3", + "2Qqk23b3Fzc9Tibww2uh9Xit7L3jxG8QkDKR9KvdSLmCJfaFw8", + "2RPzaikSStR1FnPL4x9UJgUwFgqGpX8DVLm54Sa44vjSLUJvAZ", + "2TGCyLfQo7ya82Qbw7jeq7rJR1B3cFG2vkLEJL39fb1DQvbG6v", + "2ToBCrrkQ7CdXEVxYHLoZ7K3cBasFW812yjA5HmNeP4aVscC2t", + "2WoS4aQJAL6azWA4tXJauBsCjshLyRRG884KdD9TUnCh8XbBFC", + "2XNtzj37k8FeKgJTeFkWnVC3Z8PkHa1JXE5rwKTJ9m6ReXQnvs", + "2YZM5YUy7znhpMjvFipdhLCipvvVutePEZcGzCAAqyTdTByiDE", + "2YjevVf2FPWrHwPescaiuC7k82v2JmjHVM6ZruuRCGHTqHeXzC", + "2ab6d7CpHJzsknreTVCFmZTCNoU8wCefaFNB5rQyDX2mTWCUY8", + "2bTdq6zTrw3aqyXNNmM9MkQ4PZrMsftUegxZRfmWsDgc2QSeZ3", + "2bqm3gFWAG6td4GTFuTx2uqKW7vc4uYRW4R8tKSW7WzPT1b1fQ", + "2d3rqordy3HTLeEbwJ9286Mpzyty66nHALbsoMmvqntVUMCL8E", + "2eaJEpkNEjU73MK3E3njW1Ab6Lhhi4sYTJ8oL4A9iPXTahBUBT", + "2ebDUe4DfvecEPWvK3gARRjXbKNSbsvbV2KCi35qeeHuPshSRi", + "2fezvdpCGvaRdoHmviJY2PYdLZWTkSCNgM9YiH9iJbS1kr32BG", + "2fuJCRTbEwi2EnXmF4DMgYdEz5fVgy2uL4jng2CWSLRyXNm1BL", + "2gagMEdwA3jKgdHkk3dFeCEDjqB44MVD8Si1FXpeE6Dstn6mPo", + "2hE2jyuicMNX3qBYYiSB9c8tQuEQvDf94cn6enU4RaBwnXkuei", + "2hG9XxsT14sSF16mmcRV5PGLfjBE4GgsRbhqRhT1qEBnZ124Vb", + "2hnpLUZx69Brk9X4ho8fBxFWuLpBEXCigKmtDAwn3F41fW4utr", + "2hwqQXGYpFDqc4HAcS1kqVE4dcpDqdG2RSP2xNQCAdRtrdxUM6", + "2iPnGH68dmFRytXceaAo4PAUcqTNq4vHr14Rndzysc8d4qfVWW", + "2iYS69eq5p2GC2zC5HDHrTqiswXcjLqGYYi2AFB3gWJ7EUW3j2", + "2iiF5GPo2rauQzkiHqEBPw1k3e4dUk7giXcaVVb9uS25HRyTYj", + "2o3rTsiHv8D2dcgNKJDcVwrL7NgYLQiPoTB9HFgKAESMF4pE1d", + "2pC1c1zT1w2g8XYimXty9rUFXceqhEjbXXpYu8iX1w5JQ51edf", + "2sfnRNCQnF82DV2V2V7VQycokG6n3HMEVwLdD6XV69Y8wfjojY", + "2uyYEALe2oDUo4Lb2J1qYKQBuhVUm7aNUUnLRVYKYAexV8p2xs", + "2vB9H4JbhRFaSbMnqDuoyhTT4QARbxxBojswmkXgjqGAMZS4pt", + "2w6gm3AXt3597RmSV4h3GpXKjVkz8C9ENQPj67o2Bs7CowhihG", + "2wgN5mCzzLmKShfARuh32i7s4aU3EVo5fbi7wBjoHri7KmUXtU", + "4rshF4L6BNyawp2HPkJGyQc52MRU3JQyixdNjpGNYW17v9rpo", + "5UrLPTVwEiLf86wvvxQbrx3wwgG1vrPKaN88owiPpqtE3gGZK", + "5oXiNcj2mmqSbEbRWZzHsPig8TwUyf3Pkg8tGtuNp64jqB5nz", + "7dpuZBeaBf2U35LfcruGzjFoDhpM4EaH7aFmg5MBTDE3QfTWX", + "9hFDay3tyKJc9UhZ8t66u8UCvzTcypDk6o6RpdiTvHrVBaTCZ", + "B4jkgVYJUmwiGU2khi5jVVQbLkWJWq6jUgMm5Df29xPHj6VmP", + "BQHaGhTA3E1PERKijtMMfPjVpyyMG3y9GS7QbHhQQWykxAqmF", + "C9ndvx4yTAJJYYrFZ7gX3tTYCaWPjCA1SuNjdkGdCXfiVhF6A", + "CVsQMtmA919WWR27C7MKfNPEXCP45jmEzUYpQVZZMqLVLbMAS", + "Dmx2pEWieTEABHxSmB7J3o24TmGbTsdJkRUkxEsF6Zd3LSaTj", + "HXkUpybLU1yRiJYVz6FDJ8o5V6cFsd2T7hbERcDuE9MqQUSW8", + "JQiVeXKisTsYj5KDUvhNDSemXKwmYfRZJ57woBWv5npghCkWz", + "KSJYGu3E5gK8fpHcwpzKh8LGUSjfb7QmH9RydCWFHd5qdzJUs", + "PD4nf2YX7hYiqjtH13hwHgkEfXnwAWwn71ViGQDv11JdYWdCL", + "PQjnMUTwykrKucTfgJeRUZVBW9SVh18swDKUYAxNi2SELiLc9", + "PZDE6Upmm91e4wRq11akRCiCbCp97Xg4mtByhKaSyKNqE6nUg", + "Pa2D9o6BZcXuN9BFtWwkJFx9Tn2s6BMRqNEPUBwWn4kEcHeD8", + "QBRGgKVWfKogPKm8A7DU8pt9jvJ8bD7XvNo2j6hAqUSPvT4ev", + "RxoMDV8LD2Jy7e1HqzWWQJmr3UYzSptoDp9736yAnVzzVc9Rp", + "SbEKmAhYN6ooSERuN2uS2QCac8wWsWQfeNXDCPXh39r8murbf", + "Tbq5pbo1JkBLBwop3PjVE56mySsXQXQiYxT1QTA7DsjizGPUv", + "TsobpXTYZWsDKN3ScjdW8ZXu7qFK4MyJcUgtfYN6wMCTKWSZL", + "Um4DWEohSQiE7Zip5fcPUkbF4s4vYN2QjxWRiYP4igCCeSmVW", + "XJAYMBq9LypWFvPwbzFTaqtqUi8g72pKnCLe7YeTmChRV5ckn", + "Xf1n2j6pkmWZEeZ6qZzQnSZkXQyApup2ZQ28qpMfyErVwtomC", + "Xi8qQgoCBtBDbeAMLQDFaVQaH3M5RqjuA7t7rJURCeQchWeWM", + "YgVJDfiByLxqujYr755nDhN4YXzFP9sUbwiuw632DPMEzqq9Q", + "ZfJioW81b8YQL5hB8Sna1RbntZjkTmgaHSP7u7tVENL24Xd1W", + "aiq2tzZKeJcygfXEkA9soY92s7YvoFfpQRN9oHL313upDkpqV", + "cKf18aKdKfLedh2kFUAUVu92UHUoTiJEAyDBNhT3PibrPpHM9", + "cYhbXnRyBVUv8RF1dPiyPHQ12bBdRxMmL2t1SyX5DPoxAAQgX", + "cu6Bq6ZpEC6FJ81TcrniXknmzMttqxRvsyaMdBW62fjjn2ZMF", + "eVMrLraoHucQJpScwTNb8oKTj1orjEuE6dd6iw7sG3KFVeRZs", + "gnX7p8akFyKJ2EKcxzwA23iskzkGmr5ytKfanFFwu6sDXYgds", + "go3MYTHoEFHzNKKSwQUSoKKWwKgd49RvrjEqsF4zy4nW9zZrH", + "kMQnn1wVZFHK5CSfWAPqTWGcyCNyE7YA6vYPSqWTQFVePUVU1", + "kwpEspMB4NdzEwGXVLDudbGhpDK1q7JkvowF88P9fjKFqeDoN", + "oYtL7sA8FpGAGPhQqo4iKBxXeSmcS2HKzdG5KG4JDyrRL8HAS", + "pLJ6bVAxh8dptyicnqduWmLC1z663vzD2cdsAsXMrPwusKPzA", + "pMgxdKA8pKd69KfQGEwV9nfQXYBbxQL8JbcYc3JNtTavvdYDg", + "w6SfeK1abuZcDKPEhhGQtxA1KC19AfvrnXPZ8pLQqgKrzRAf3", + "wLMk9dHUrTu8mUs77vUnxNJTUe5K3Zx6BEES68oWUzDqWtv2u", + "xCxuBYyqHFHZ9inVnL8tX2zBQrSxPuMUQxcToxvVkR2G9ChmP", + "yhB33nzYvFjZUYPP15NzUKDAs9H1uND8wemo7nTF6G22hWhtu", + "yrBWj3sYEbmjJdZn8Q7SC24VCDbSSntwMhHdcMcVh5dhRGHnc", + "zat7FisFCmuXzwNjFujicAPjtsCFEQG31ptHD74HK2SEy4is8" + ], + "2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM": [ + "1Kdri5W9vFJqB2dYBf5ZhYWAmZkkP4aHedVoV9pGYPQd8C77p", + "21jEMY3teZTGfPQqs9gGvWwoYsEvMPJKFLVYfUFYQFtuNv61p2", + "22xzGXKDSum2UQLksu5Eyb7ikMN36iDStT6jbPio9gjyHuyFC5", + "25jYWtYr1zwYoYri74aTiMp82rHC86HP7bJ1sYfvyqBrS2b6st", + "26gohU9ZFmkenRHYYFyp9YZdyAJVtiWKcqEhiwtAyAz4GJssQi", + "288CjAHapx9kK2sseVRgDBBfq4JeMbS3F98S4PK2YQU33vpnDh", + "2BaCerd4yoKeB8wiyFYK6ARB46HcqXSb8SCBJT6onMsgr2fv9r", + "2CNkxrCycKSJVxmMHcxPwcYEGqf3royYBrmKGL55upEESLvqur", + "2CTCExtaeVzGvnsXatitBfeiVQJKmehDdRok8PQQALtzQoYM1K", + "2CVcqHWjQkuzpe9tuSGHWQwazNsFZrDPJdPvjpcPfxmJs5s5TN", + "2DD4QdN9SmBnktJc7daV4asJi9ozwUwi1s3M6WNDShSUb3byDx", + "2DK7hzB9XUgUxeBuC34KAe5hnDKqgmRYgN2is3WjUW4V1vYNtL", + "2EDHTze7RjsaWcdgGy4WFSXEiHz3maHkc2jYGRrTzhKRX9w4CQ", + "2EcjWWz29FZoX7bUBRG4RQqpbwoBrwrhHBft3B3iARiejcKwR7", + "2HQzjUaHr3LW7XQTp8MvJCTibhnu4wyuDkS8LGYKsAGncZoV7j", + "2LScQCmZKGDpMBX8vzrSNUTgXGLX69LFdpQchcrM2MGYADWQx3", + "2LZeM7Fj9qFjm1C2WB8AKoo97VQXEo71EguEp8ZmUMTemQxjs2", + "2Ngiovn4iW43XQrx95kzujiVezrkdiXHnymV4pt7me2Rp4JaRy", + "2VFRJacgybj4bqgD3E616j2zhGHvhqqiBBMTLiuXDNxehR6Faq", + "2WAS42i6Xfu8ggWGVE2xCENpKhhkkC6q3Zb9vmVagjHdE1WSc5", + "2Wu6fX1nT9t9sTJWNfu9WMVovdd4QR1vBVeETht2AsDP9qT5rx", + "2XkRkvQ4ujTP7Rb1j7J7grBxqmM71Ggod2HLGC8rTW5pj1VBd1", + "2YS3faaAM93Ev6GHCYp5uWhtsSGuryYdxuNaRcy3hcmuPnDk8C", + "2YdjjG6GW1WcYedSAwRCtpLJUQP2MDCX94aEMd2LPYFAfWXMFu", + "2YdwRT8da6kJsG3XHnK5uQxedMTpqESwoVyJKA9qZjGT8fALds", + "2ZWrZrXWJtPQgVeCdzuAXvPich8hEQuZ9MrhJ6A56KnRw19sFW", + "2bZJH6jDPikCUcjnApQZ9X9k2MV1twjjVoYBp4QPCAB7ZyQANr", + "2bp2TvngbGuY9KRxAxnHk7qZr1KstCKtz83svoNhscEqjt3S9H", + "2bxu7PuSUtCXoBGfjeMjRKxX3gvWsZBXk8x2sLJ8FrBHYDHsd", + "2cm82zfe9NofzqQyjbCi1AmusYm7bxmq5gG7ZKRJhGTTr66fTE", + "2gdhwWbupbvKmHXA5PTe7H7bxFL8TVzW5kA7HG8TK5xMrSsTp8", + "2hxYwQDJ7ZFpfxZBo5b4NnLTL1Bg9fRMZyt7xjNDszG8ezZeto", + "2iWpSR16FCGao7zMcDi3h1XN9u8TTJZpWFzMQizhUV8ZWxWnmR", + "2jkJ3tSmg69mouko9d8PtcfKqs3Z7gR5tuxdsaEnn7trkgSEs2", + "2jwvDvvRLdwYL5UuoigH9BVc2D5bVhFNaYhaygXSamJGi7woiS", + "2kY42u9ckGES78ocJ6nBYgyczvPwVfUujcCJniVM9ZtfWQooC6", + "2kqszyuJBHUN7KsbUkyTFpxGGxmWURLDmGu8nU5h8v3Fzv2zQ1", + "2ncWunFKptuXQs5SiabaFUi8guG827pjLtr2pwfViCCWK85mFv", + "2o92FQ42CfwjsicGnffFZjETGijhsFNG9Gd5Be8UH6TabY6KXa", + "2o9kcsdzCD8Pp1ZJugL5Ryk5YmRwPHyb3Z3orN7JK9Fxcf6iqU", + "2oU5yYfGhdi2NqbjM9Jsufejbj3The1TkWJfzTdKsCF9ouWVJ3", + "2pSpcprbpgACr73pf2JcP8di7eab8vd93QFk2Wd7JaHWrwjHxV", + "2sY7uuNMetZu28FR9rC5MtpqWDKbVHQCvvRqFg9Rz5A2hffa5M", + "2v513oh6biDsL5jFTfVBZdzL8GU5Ur5E4BQCdQ3q94qSYNRvKf", + "2vSaKbZiiKP5pv4PXE8NF7hET1qRxnfps85qCg8EJmAakTMbqk", + "2w3qumfoA21su5Gv5kNM6NjyDmGCaENsBDTGgNEajPvrNf5nuq", + "3py4ULD6d5FzbYXcVLcm5cLBJGCPFJEnzrfCbEXnQfEa8FmZi", + "4ACXU3PYb656PxgVV8M18jw9MiaRmGPYCvApe4uUTEdX7TPpU", + "4VcURBXcMcQjHgdVGJzpqLwaJ53bvyd2RFKqCZj3U41mGmuZB", + "5DCcfMAqj7hWNiJPMSXkfUMvcHoRhHcQAwYumqoiEpKPd3DHa", + "5mg74cn8UZTk1WoWpX1mvipxhSjtvmJPen462K9kh4nG68oL8", + "799PvVmpxzWpsyEzoNaYdSsmTTTxLCgqxybWckfiZPhr2R6nU", + "8JVnuR4znS2TWBg7Fg9dctzYbiT99LiNjzvkxPRJwJayinpiU", + "9StEXYCTAZ2BUW7tzACJny4jLnXwkVgZRy37r9wPJGhRYLgDi", + "AKQxgsjUAMuuadiy6YDTMAFA1om1fPqyVjR9hqdz6hhoWkg29", + "AknqzZuovseCGTjcSfJ5xP7CrgVD9qpxiTj7T9shoLK59sW3E", + "DG9JJCKAMRYThgWW7WK88rqzkBcgfNMWZifPQpj4cz8SVPvBg", + "G6U9ZAQxo2qDUZsnU6t5TFmPekP2ivoSbtb8JXzJV95URWJXy", + "GPgfuY8bY2CTWHUzbY52N1W73DJjTpRgLBrG4wqnd2QqaoHyF", + "GU1N5nss24TshQhmadTtWiSuUFK4d9YGi83FDyhMrrgR1b7Yr", + "H3x2CsQRbZhaQuQCjYRW4MadfEZ9Gu4by2DnG5Y8sv9sWZrUY", + "HCDaYhTaYh9639qofxnG2qiX1dCzhTD6bdDcmrYczrQ3ZHjsg", + "HEyb8Q5n5YkPgXGByAtupKGQR7CJanzvMZWfAcD5dkjuQAsrL", + "JJ7peJW4T9QXKcHb1Mefnd2FVYru318qEQGkuJStwP9WejWP1", + "QTkZ9bfE923YkEZ3LNZRZ82mMrcv5mXnosjPLCX1eZhzvxtCd", + "QaDdtpq1b7V7SeB59uwkCRpMuXqhaBiYigLnMCb3CbDLMjDzD", + "RkKECwRZQ7e2a34oirrDqVQwMNscvmqF12V6UbBETSdzhXJxh", + "RpQmzvjeDaHBqqm7A9iqpPkVvMEW4RXW8SXjR8vNNN16S7qNQ", + "UyunGfbFB12wfmJqS8xyoNpjnb9RCcqBKs8Vfxmjf96x2hFAs", + "VDycGV2Ps7stDUZ3kRvbmVuKkKoPUyVFAGtwKmU2W7nBFHjzq", + "W4QoHZErHuSaYn6V9ptAnmtjYsMxEDLWfdmMks6zFNZYjapxo", + "W4tMAUHZcSWQ3ck4q75XVwB2j9cgqtTHKeDGCBsJYu7jYXQGx", + "ZuDLBGTsryGHBSARE4zgM4Dtj1H5DNe1nxfZ8FQpCHmYunPn3", + "bTs52txp1o1z33uebucqS6sZ94c4XLuXQcgUdr97kwZK7BWEt", + "bZMYP5jmsTMyXRK5QTAyHt8bzGuwm4APboEHFbTqexhyc3Tm", + "cddXZXwkgMX4vHG5kxTG8ktXn6ujAtPe2rkZXvEGgAHgwkDSg", + "fDLZXMLLX77NdPYFRLyn2WNDSwtpKxCZqdRgTBuwCP9H3otgk", + "fGWn4NaAqK2BgQWadVtkHB7Q1n1qijrin91tkzq5Exndq3CNy", + "fM4tU4iDpLUYjwqnDeWkLtbkEqPhPRzwbkAD5GTviD4CpAnRN", + "fd4LC3i5w4w8TMmaPwmU7GPHyUsMk6fUtNykCZvmkp9iNuxtL", + "fptxfNAeQbDzTV78ryzqTE9xiQgViGnLZHHfipb3qvWHMvzAs", + "gJqkJ22xRqQ5SRVraoQiz1piMJKsb9QyhhTvbbRfZ4FCsjbxW", + "gdismVqRtUwW2X5DuU3ybYxtjiaSkAmqntXKFSpUkkgdtYMU3", + "i263AhAfgjqrTjEmJG8mA3dKDVX3rMPWRnn9pzT4EDG4pjxRH", + "i9sCwM19Ndxc1FH3nmSHDvt5QPD6StWgyKDGNwSF6LbhbiqhF", + "iuUYXFzNJYqLjA7sZQeXSDPMWShVkjAGykBcLzyZLt1BpH5Cw", + "ivECpWEqSRR9mA3fP6jTu2akET1rRtmJjqednYCjSdMXJAN1q", + "j6EquSChjB48xP64iHh6Bj67GEsvhCoV73uqsf7K5B6PRjfvf", + "kCd5nEUkHyFJLyqZ8NJhqa5GbsvCgBfj9cHXyAWWCin4HjosH", + "oQNJxqjJYaSLAHqyF3n9GpbtJzYwDwMD9WsUCAAGWSXAYUG5V", + "p9zGnppWJ1rUrqecGhozsBSsRvsiEwkj4vM7rUringMPL7cQ6", + "q14J1hczzqeQS8aKCJcog5bTFNNkFFWwza4XMjy89FuZRziNg", + "udPiCek7LmDmnkooWSsBe8SQzv9dmMgc4F4VpL5naudqdPQYW", + "vL8xjHKkhPkNrDpe7BkQksbvc1JAnwjTk9kCsJ55wmrBibKjf", + "vfshLdk8kS5hXFpKHDkg4FpwWCxzVXmUjv7Pzgb9WQJ2g6vuu", + "vg5LKAUU1YQSdP39zLx8vzUvmUiuAtM1dt5RqBvG3go4owo1N", + "wHwEP9TgGCQKe1viNvmcnHZtQvyEvJNf49fadLrAbXGjGRKaV", + "y1BYCkbExzrdonKbEvgTRwLmT3t7FE23ZAYbFCaShwFyVEeEm", + "zgKT1hRCBnXvxnzuQnxCZoV4TZAUTWA1UdF7Y1rZxgsqk29Ex" + ], + "2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5": [ + "2191BgNTL88zXHVwVJEnHuZy5zFQsjzij1qj8EoGFTZQcGB3B6", + "21dH9gpxjPttySTz3r2by8akMXXt136U3nECkWf5VudmpvgUEi", + "21oEHrhwJpF2AKPQQSTWFDETXrcFDCGkkzymF1j7Pxm8qL2mBo", + "22SGfV6X3f4UtcmGqCgnUvQjazc6gcdbxGxnuZWhRsuvid3qpF", + "23f3ZeENaJqm7FnSGMU9eCSEoycwLnm5JKyEoxeYiu3GwANUba", + "25A13jm8tX5574XgsqSuCYgbpHuNNq8NMGQPaKPHf9GS5yAADL", + "27wMrwKGVQJFnPZ8EziZhVYEbHrpAQxtWx2M17pk6jknSfKdt9", + "29SPUbhumM83K7oRCoa4TviiQV81uwgf2fi6L1gGt4c2MjbPQ9", + "2ANYGa1AHU2amY3epYvktRihnHxVDm9LmNUXRvzbHitPGyczzu", + "2CGQ9iksuJfHZAfUAuLABn2QYKmHHKcfUePdkFpqaWDbCnJqCr", + "2CKaosKd4RmKY4t2T1K84X9VXfpVX9ZoaiuRjbRY1d9M15Xv4k", + "2DRVqKq3FfYTZR88LGfXg1YDxqbz9B7f5Tz9vB2p1y4GwaJNdF", + "2E6c49tin3rkV2ryVuijjZHFz194CGDfhSTNftJbwen2dUg6Zf", + "2EXrYLc9vLDhWYnmQXTB1TYKBrfiVJHMyGXCgVF7czVTVQJdbe", + "2G1CAL5NMrQ6jhwWXs8jB1sbaSJsi58PkTkoKP9PLpRKHF3y8j", + "2GhEcNCy8q2gEUjnBLJ842nmmYiwgSuW7Bdy24hbvENU32weu4", + "2HUz5KyB2GgkAoLd2aaVEBsfbKhTJ7dhiLamQQzkacaWi7KDDo", + "2JDiCDikgadtNFpEknb5MRB4QWfJ7fx3xcGAmqC9o9Ljw3cLn7", + "2JqZF1PFLgYprYJMVVKVsLHPHDgMJXVraKL72HwKuUGpcq5GiN", + "2L7e2Bfdh9hGYxXqvbZ5M9UuyiNzJsmQ3GdSrfzjYGkMVR7uFy", + "2L8S8gnQFx8C3JYKadBedcrfYyMDgHd2szVyn9BTJFX5PMuYr4", + "2MN1cqYpY5YqQkpLhthLggss3aPjJChAQ7HRAi5QQuWFF6yULf", + "2Pj3v3P6ygtYmeBT13bTKfX8mi3ftGcpRhm7apxi2Aw3MwGeHT", + "2PxXfyJHmekpqeXVDo8teWa76KMMXmRDm8ewMYff1vknnzPPS", + "2S9HfyYpV6bikYTZtPJcRDaCtZS49RFBqXiiqi4WxxLtTy3kQ7", + "2TZobisjahLmCX23tbDrjcY6FtS8PfTxK4HHpTXFWpVnuqDECs", + "2UEuSCXVohzXkRPqCaadMtuED7t1utyzEhGcoTiAXHKhBK14ww", + "2VfStqKWXsCJoGpE8dMXAn3ZbP5t8ujUjGEGFWEyGnzBcbrcuJ", + "2WGnUgz6oBaFGfMsw1RfCq29tBjQ4FKGcBmrwND9j7QPq3gaVg", + "2YFpEqmhjG7hFLrU9bSbimDu13jtzJjuxqzwu2MDLujRMMia34", + "2aFTwNXMZZ4djieGPFMtnh22mSa3anTfsNVo9YdWLLhtp3CHfX", + "2aikkxSHpbfjtxnWjKixjvBMLiCodSf8xCEg5FXRMkJuNRKjEH", + "2bu7jnkwymY7CA1eT6t6SSsUysThNNyCRGY3AJ94nutPC9NoZ3", + "2c6hJZnbfNFq9ZBAMHxNhjm2T3hWV8o3vxfDbFegsTCND1WyMJ", + "2cQHRtTFDiGG4ZPMderVzaJqstxeqRYixU4JtmcSf1jUd3keQw", + "2eYSf5zeCsR9sJrsCtADpP3VgoRBZdDf8vj9btoXZxJaogxzTH", + "2ebsxYwArxW6aYUxfCYjCmGkPCvCq88QbL4zEP3FufSyNf1QcY", + "2g5ZiEH51FHHRcvHeGof8WVs1J6cXyv7FDBLRfu3tNLToXazTg", + "2g5wKMsoRscrApnQXne7JT9eS51Mvi7wzcdmyof5z5PtnsEN48", + "2jYQwo6GggNVk3pX5hKQKDJP3fxqx9ZBzQESiTFX8v48uqCh6S", + "2mUQDAusJuq45Kyp8g8UK2iFG7ddgGdxCmLgmabfRCUNfb2xRC", + "2mxTxfefZuJqmQ2hqAFuRGqDDXSCjGY6Vi2iPBejDbVT7rRFng", + "2pVQUaMWyEsmK8xtzd2Hw3m71pRuvSf71QzgynTPjDXHaCH8nZ", + "2qhg77oAQN1WeL5CgAy2jBFJzkNXTFURxWNfvYpLaHM1VTWspU", + "2tXdE5Av5WnX6EDqtkRcJaW8WsyiyZVtDpR2Ft7sbmMLCR2Vbz", + "2tgJgkKHKaQ77QtdM8mNFDsYZA1jaMr99566k2ri8TM6QFuoxA", + "2vaxxDqnQDbfGcQKPzmmdma4ofp5zBEX9CpmVnvq6BdAVpyGzV", + "38hXSvKwB4wbjxn7h5JUEjhAg7cWksbsyZn7LtTD1GTQSpCPC", + "3WXFPYtgRbqTreHPm3y7se34Wgm2V7MT5ftkokuq3Ca3CeZo3", + "4DDrzDxqa1QyB3jnEpDdJ2xmMJ94BrBAEbFr2nWHQ1miM8JDV", + "5Rzxipi9zRooqZS88hqSZhB3pAGknsr7agjjNkcXnEZyCUP8r", + "6TiVaW1k6UiFqU9cwn5PjeFEhUCJFHX2gqf36hDr7ybmC2Haz", + "7tzD3R987j2JP4P1K1ETvtXuFquNCcHStftYxEifZEAeTkMBh", + "8x8ShU3o69HGCB8UbfsagjCoQtFn7UCiG5BbVfEfYixPzD7Gx", + "AWYLuTuf82VJPerkLAwu1wKY3EdexGYWFbAdCpsTyx8Poty1g", + "CUVq69qdJY3D9aWzc8nuyzKFJbREJz2Cz4R3NVkKJMQYc1AoQ", + "DVMHj8Xtwpf6nn4Hap3AZbgWmva3jUs3k5KuzixdorW2eXNaj", + "DpufaDkK7hUFbskyZnzM4LLXjs6eLFBAXZTskppfNHFR8wQPm", + "EBTdsArGiC8Ac8KMvqTdBZpJ8QrhpEaqf8CERCP1g7uQLqn8y", + "Emgjjtv5c6uTpAQjLsy5KkMADX6xBr4kkpPYrAzbGBNMDUuQh", + "FyZACp1WyxRjcTHujqyRztBGZsBCRUegxxMPsnyb7JZxxgGa3", + "GSBQCSGDBBXwQUtXFcjjWgJgSSvBewyoQ5vEVHysyr2w3YzL4", + "HRfxa9UJyA6CyikAJEjKyVbNsjP6ZzrtsyGq6tk6ZUBr2xSrT", + "HdZ2EdKizZbeubZU55DEJZ9miMphvfUYX72kwixB7DnESvPjf", + "KVZNAwDxnbhSa7ttEVCiXSGwXJRpeMDk1incuMuyYjF97dpHF", + "Kxgo4yJ1hGX8Lx1AZDPGBKTXLMDukPGaBEPd9r7vJwaE5XHhT", + "MHEJw7cjsxA568CiHYxmgQRRSuWUHBzverPYAjB8QcwYL4Jku", + "QM2FVSFp2WfuDjTvbhhTyuP5ANYH3kxzx6RirstAsG6yvYQm6", + "Tw12eGYUUhZHhUX2X9mg2N7ULLkide33CdjD877w1ub2E7c5T", + "UodCmwwa8deSz1XD1TWJhMTj9ppZLEpEctB5YUmZNb6z71vwF", + "V6F217VqFEg976XniW24skBadrZf8LWcMngYmMwex9qLhdJAR", + "VML5orUvch5Apdhq9Kh8Rp45scraQTKxYAMQ27RYnQf7Wy4RM", + "VkHSPHvu8nFYCQQh2PPp36eyvYAr7k2EUMv54qwToiPWArdPp", + "W67thhNc43CbsopPds6UwonFf8vHNqpcFLuJyfj6wuCSvft2", + "WAMcqVC25wx9C6qVz7WZUAXnft5LAHj8tm8aQ4NLaGKPtM4NK", + "WDGMe4S7Sx1HpN84iy5fanLycLG7dRd44DuhJFRuGASA5cT2y", + "XCtJ7mvw216yU4ibjEHJ1wFhPenQP7GTpgsgB8ZBB67x38mHX", + "Y1VJAKqwjdvJZTzXiQ69XJedZgBFijQxx9MwQj9qTuoQyL47w", + "bcw6vPCwh11XRaSQAfXDNFZtScZi63hgPoP33wkRzSxwqXEJg", + "d5PHFZDid2p2QRJ78JCHwsU4gu5qkwESGBYwB9SkRgPi2n1NM", + "d9JuwJU8pGWrq4zbr3yUpFBKriaQnqbE8EnJC2XsgHy12TdT8", + "dfTYwqCJkCa8cv2C77kAb53AMKNZSaM19ss8z9pzC7mM1NjR7", + "dmceoqHMteJTnhSYFHzfWzYvEcbHp1fLcxBYcarT5AmgjCyEY", + "fw6ABwp7NjyoCuYdd4CTKPLrnsArEwCpKRMsoqeU8xws1DW7w", + "fyreXQj8rQbQt5pnLu1T1Gqf6Jg2ZdGTuc78uurgFpkMCQbnH", + "gLk84YhajHPJKvTkp1X9Pm3ScLj4ZXtTHwu1rnLsQXkX2ga9W", + "hNqgkW9FXXHr8B9NESUvJ8H3oFe5trjG9TfLAfcrSX4HqHaKf", + "i5fidM7y7T9uQdjjSy4ZRWFrbCiJHyDKEuqkFfGzs6SBkDXeW", + "joJFtcQ3Uc3uRSt15LLUuhk8DuvxYr2PYeZ7B3t7r8cr177nW", + "kNac14sb6M8Rm5sn6qadTB416xPE1kcU5Ed6K4V3BuEnaH1w4", + "oX8eYJUNDacpr6mFHUqnQbhMzp8vv4Mk4JByj6SKFM1j1Ajuv", + "pAbzroSdcCfa4XafBPiiH7zm9jYkyGvKpP65ooXx5XYpTV44V", + "q3XXPoVkj4XaRs8iwvTGySZB2gWDjUNcXBxPDU3S9DPpVSFw", + "sQ1EuhYAJjp2pAZJo457FJEFzdDUr3McmNYoh7EFajUPFxLyf", + "tSMeLpk1Ag7bHzNwsjnfC6xEzcDdYw2TW3bLy8y8324SBBQdK", + "wLpj7xFcnBvdMcFucuVFDXC6jaLWCgc9mfLfrhfXfvcvhtfpm", + "ws9g9bVCNnj4ZAGNFpCcpv1QXKqxDgdmfjVXGnwiMbjf2Ukn", + "xx2jqP66m3B2zijGPVnMsHeyxXLBg5L6LShx3p5fRA3m5EL4t", + "zryWwRG4P69ZV8CoLkUPvxLxkS8XjrU1eEGpSkdEE4TRNUkk7" + ] + } +} \ No newline at end of file diff --git a/genesis/generate/checkpoints/main.go b/genesis/generate/checkpoints/main.go new file mode 100644 index 000000000000..05bb0b4c7fcf --- /dev/null +++ b/genesis/generate/checkpoints/main.go @@ -0,0 +1,121 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" +) + +const ( + fujiURI = "http://localhost:9650" + mainnetURI = "http://localhost:9660" + + maxNumCheckpoints = 100 +) + +var ( + fujiXChainID = ids.FromStringOrPanic("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm") + fujiCChainID = ids.FromStringOrPanic("yH8D7ThNJkxmtkuv2jgBa4P1Rn3Qpr4pPr7QYNfcdoS6k6HWp") + mainnetXChainID = ids.FromStringOrPanic("2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM") + mainnetCChainID = ids.FromStringOrPanic("2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5") +) + +// This fetches IDs of blocks periodically accepted on the P-chain, X-chain, and +// C-chain on both Fuji and Mainnet. +// +// This expects to be able to communicate with a Fuji node at [fujiURI] and a +// Mainnet node at [mainnetURI]. Both nodes must have the index API enabled. +func main() { + ctx := context.Background() + + fujiPChainCheckpoints, err := getCheckpoints(ctx, fujiURI, "P") + if err != nil { + log.Fatalf("failed to fetch Fuji P-chain checkpoints: %v", err) + } + fujiXChainCheckpoints, err := getCheckpoints(ctx, fujiURI, "X") + if err != nil { + log.Fatalf("failed to fetch Fuji X-chain checkpoints: %v", err) + } + fujiCChainCheckpoints, err := getCheckpoints(ctx, fujiURI, "C") + if err != nil { + log.Fatalf("failed to fetch Fuji C-chain checkpoints: %v", err) + } + + mainnetPChainCheckpoints, err := getCheckpoints(ctx, mainnetURI, "P") + if err != nil { + log.Fatalf("failed to fetch Mainnet P-chain checkpoints: %v", err) + } + mainnetXChainCheckpoints, err := getCheckpoints(ctx, mainnetURI, "X") + if err != nil { + log.Fatalf("failed to fetch Mainnet X-chain checkpoints: %v", err) + } + mainnetCChainCheckpoints, err := getCheckpoints(ctx, mainnetURI, "C") + if err != nil { + log.Fatalf("failed to fetch Mainnet C-chain checkpoints: %v", err) + } + + checkpoints := map[string]map[ids.ID]set.Set[ids.ID]{ + constants.FujiName: { + constants.PlatformChainID: fujiPChainCheckpoints, + fujiXChainID: fujiXChainCheckpoints, + fujiCChainID: fujiCChainCheckpoints, + }, + constants.MainnetName: { + constants.PlatformChainID: mainnetPChainCheckpoints, + mainnetXChainID: mainnetXChainCheckpoints, + mainnetCChainID: mainnetCChainCheckpoints, + }, + } + checkpointsJSON, err := json.MarshalIndent(checkpoints, "", "\t") + if err != nil { + log.Fatalf("failed to marshal checkpoints: %v", err) + } + + if err := perms.WriteFile("checkpoints.json", checkpointsJSON, perms.ReadWrite); err != nil { + log.Fatalf("failed to write checkpoints: %v", err) + } +} + +func getCheckpoints( + ctx context.Context, + uri string, + chainAlias string, +) (set.Set[ids.ID], error) { + var ( + chainURI = fmt.Sprintf("%s/ext/index/%s/block", uri, chainAlias) + client = indexer.NewClient(chainURI) + ) + + // If there haven't been any blocks accepted, this will return an error. + _, lastIndex, err := client.GetLastAccepted(ctx) + if err != nil { + return nil, err + } + + var ( + numAccepted = lastIndex + 1 + // interval is rounded up to ensure that the number of checkpoints + // fetched is at most maxNumCheckpoints. + interval = (numAccepted + maxNumCheckpoints - 1) / maxNumCheckpoints + checkpoints set.Set[ids.ID] + ) + for index := interval - 1; index <= lastIndex; index += interval { + container, err := client.GetContainerByIndex(ctx, index) + if err != nil { + return nil, err + } + + checkpoints.Add(container.ID) + } + return checkpoints, nil +} diff --git a/genesis/generate/validators/main.go b/genesis/generate/validators/main.go new file mode 100644 index 000000000000..5f1a8565c5fe --- /dev/null +++ b/genesis/generate/validators/main.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "encoding/json" + "log" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +// This fetches the current validator set of both Fuji and Mainnet. +func main() { + ctx := context.Background() + + fujiValidators, err := getCurrentValidators(ctx, primary.FujiAPIURI) + if err != nil { + log.Fatalf("failed to fetch Fuji validators: %v", err) + } + + mainnetValidators, err := getCurrentValidators(ctx, primary.MainnetAPIURI) + if err != nil { + log.Fatalf("failed to fetch Mainnet validators: %v", err) + } + + validators := map[string]set.Set[ids.NodeID]{ + constants.FujiName: fujiValidators, + constants.MainnetName: mainnetValidators, + } + validatorsJSON, err := json.MarshalIndent(validators, "", "\t") + if err != nil { + log.Fatalf("failed to marshal validators: %v", err) + } + + if err := perms.WriteFile("validators.json", validatorsJSON, perms.ReadWrite); err != nil { + log.Fatalf("failed to write validators: %v", err) + } +} + +func getCurrentValidators(ctx context.Context, uri string) (set.Set[ids.NodeID], error) { + client := platformvm.NewClient(uri) + currentValidators, err := client.GetCurrentValidators( + ctx, + constants.PrimaryNetworkID, + nil, // fetch all validators + ) + if err != nil { + return nil, err + } + + var nodeIDs set.Set[ids.NodeID] + for _, validator := range currentValidators { + nodeIDs.Add(validator.NodeID) + } + return nodeIDs, nil +} diff --git a/genesis/genesis.go b/genesis/genesis.go index 6a61b80aa559..e25088a59a12 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -553,7 +553,6 @@ func VMGenesis(genesisBytes []byte, vmID ids.ID) (*pchaintxs.Tx, error) { func AVAXAssetID(avmGenesisBytes []byte) (ids.ID, error) { parser, err := xchaintxs.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/genesis/genesis_fuji.go b/genesis/genesis_fuji.go index 27c43f79fd0b..06cd2dd143ac 100644 --- a/genesis/genesis_fuji.go +++ b/genesis/genesis_fuji.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) var ( @@ -18,7 +19,7 @@ var ( // FujiParams are the params used for the fuji testnet FujiParams = Params{ - TxFeeConfig: TxFeeConfig{ + StaticConfig: fee.StaticConfig{ TxFee: units.MilliAvax, CreateAssetTxFee: 10 * units.MilliAvax, CreateSubnetTxFee: 100 * units.MilliAvax, diff --git a/genesis/genesis_local.go b/genesis/genesis_local.go index 5a76aa25cfcf..72f180ce3445 100644 --- a/genesis/genesis_local.go +++ b/genesis/genesis_local.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) // PrivateKey-vmRQiZeXEXYMyJhEiqdC2z5JhuDbxL8ix9UVvjgMu2Er1NepE => P-local1g65uqn6t77p656w64023nh8nd9updzmxyymev2 @@ -36,7 +37,7 @@ var ( // LocalParams are the params used for local networks LocalParams = Params{ - TxFeeConfig: TxFeeConfig{ + StaticConfig: fee.StaticConfig{ TxFee: units.MilliAvax, CreateAssetTxFee: units.MilliAvax, CreateSubnetTxFee: 100 * units.MilliAvax, diff --git a/genesis/genesis_local.json b/genesis/genesis_local.json index c8f715cfe0d2..be8dba4edbf1 100644 --- a/genesis/genesis_local.json +++ b/genesis/genesis_local.json @@ -48,29 +48,49 @@ { "nodeID": "NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", "rewardAddress": "X-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u", - "delegationFee": 1000000 + "delegationFee": 1000000, + "signer": { + "publicKey": "0x900c9b119b5c82d781d4b49be78c3fc7ae65f2b435b7ed9e3a8b9a03e475edff86d8a64827fec8db23a6f236afbf127d", + "proofOfPossession": "0x8bfd6d4d2086b2b8115d8f72f94095fefe5a6c07876b2accf51a811adf520f389e74a3d2152a6d90b521e2be58ffe468043dc5ea68b4c44410eb67f8dc24f13ed4f194000764c0e922cd254a3588a4962b1cb4db7de4bb9cda9d9d4d6b03f3d2" + } }, { "nodeID": "NodeID-MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", "rewardAddress": "X-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u", - "delegationFee": 500000 + "delegationFee": 500000, + "signer": { + "publicKey": "0xa058ff27a4c570664bfa28e34939368539a1340867951943d0f56fa8aac13bc09ff64f341acf8cc0cef74202c2d6f9c0", + "proofOfPossession": "0xac52195616344127df74d924e11701ca5e0867647ae36171d168fcf95d536e94061659b3edb924fffdb69dd5aa5cb2d703f8920c825c8f7b74dd0112c9c27814790bfcfa3a08e1d9358da1c54e1f6c0b4d9772432f79d7dceaa3a95c3a7e6adc" + } }, { "nodeID": "NodeID-NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN", "rewardAddress": "X-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u", - "delegationFee": 250000 + "delegationFee": 250000, + "signer": { + "publicKey": "0xa10b6955a85684a0f5c94b8381f04506f1bee60625927d372323f78b3d30196cc56c8618c77eaf429298e74673d832c3", + "proofOfPossession": "0x8d505f7b53960813f1e007f04702ae1bd524cce036b4695fbf8a16eb50b35cdbdd4eedec2b0ce281f35ae36e3ac29fb40867c94cabe2ba0b462f177dd8c3d293b0586b92d392e8278711fb434fe2601ae1b2e0867cfd128180c936a8010c5552" + } }, { "nodeID": "NodeID-GWPcbFJZFfZreETSoWjPimr846mXEKCtu", "rewardAddress": "X-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u", - "delegationFee": 125000 + "delegationFee": 125000, + "signer": { + "publicKey": "0xaccd61ceb90c61628aa0fa34acab27ecb08f6897e9ccad283578c278c52109f9e10e4f8bc31aa6d7905c4e1623de367e", + "proofOfPossession": "0x910082e2b61fe4895b4a8f754c9e3a93c346156363acf67546a87e4bc1db7bbfa3239daa5292ad9bc30a11f60e59bbd30b375785b71fe45abd154d717b6471c2406df2534297305ae93d6abeb38fc461170fc0b74b8aa4550f30257a264c75b0" + } }, { "nodeID": "NodeID-P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5", "rewardAddress": "X-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u", - "delegationFee": 62500 + "delegationFee": 62500, + "signer": { + "publicKey": "0x8048109c3da13de0700f9f3590c3270bfc42277417f6d0cc84282947e1a1f8b4980fd3e3fe223acf0f56a5838890814a", + "proofOfPossession": "0xb034e0d0ec808b7ec456a6d88bdad7b32854794605a11139d70430d81fb93834a3f81d8969042952daff335fec51018016a7ecb19d38597c5743a4eb3fb945ebe28a4250dd7bcb7a192c98d2fcaf15320a9bee239c66ddf61bb24f87c6e91971" + } } ], "cChainGenesis": "{\"config\":{\"chainId\":43112,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC\":{\"balance\":\"0x295BE96E64066972000000\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}", "message": "{{ fun_quote }}" -} \ No newline at end of file +} diff --git a/genesis/genesis_mainnet.go b/genesis/genesis_mainnet.go index 3808174ebbd1..94eae11cb2c8 100644 --- a/genesis/genesis_mainnet.go +++ b/genesis/genesis_mainnet.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) var ( @@ -18,7 +19,7 @@ var ( // MainnetParams are the params used for mainnet MainnetParams = Params{ - TxFeeConfig: TxFeeConfig{ + StaticConfig: fee.StaticConfig{ TxFee: units.MilliAvax, CreateAssetTxFee: 10 * units.MilliAvax, CreateSubnetTxFee: 1 * units.Avax, diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index 35781f37661f..679fc05be914 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -356,7 +356,7 @@ func TestGenesis(t *testing.T) { }, { networkID: constants.LocalID, - expectedID: "4vzpz26oNFyGnMCeFFPSx41Ek4dBPpPPWe6Zq2bSxdCSGbkC2", + expectedID: "S4BvHv1XyihF9gXkJKXWWwQuuDWZqesRXz6wnqavQ9FrjGfAa", }, } for _, test := range tests { diff --git a/genesis/params.go b/genesis/params.go index e2ae45c697e6..6d4f5f4d978f 100644 --- a/genesis/params.go +++ b/genesis/params.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) type StakingConfig struct { @@ -33,42 +34,21 @@ type StakingConfig struct { RewardConfig reward.Config `json:"rewardConfig"` } -type TxFeeConfig struct { - // Transaction fee - TxFee uint64 `json:"txFee"` - // Transaction fee for create asset transactions - CreateAssetTxFee uint64 `json:"createAssetTxFee"` - // Transaction fee for create subnet transactions - CreateSubnetTxFee uint64 `json:"createSubnetTxFee"` - // Transaction fee for transform subnet transactions - TransformSubnetTxFee uint64 `json:"transformSubnetTxFee"` - // Transaction fee for create blockchain transactions - CreateBlockchainTxFee uint64 `json:"createBlockchainTxFee"` - // Transaction fee for adding a primary network validator - AddPrimaryNetworkValidatorFee uint64 `json:"addPrimaryNetworkValidatorFee"` - // Transaction fee for adding a primary network delegator - AddPrimaryNetworkDelegatorFee uint64 `json:"addPrimaryNetworkDelegatorFee"` - // Transaction fee for adding a subnet validator - AddSubnetValidatorFee uint64 `json:"addSubnetValidatorFee"` - // Transaction fee for adding a subnet delegator - AddSubnetDelegatorFee uint64 `json:"addSubnetDelegatorFee"` -} - type Params struct { StakingConfig - TxFeeConfig + fee.StaticConfig } -func GetTxFeeConfig(networkID uint32) TxFeeConfig { +func GetTxFeeConfig(networkID uint32) fee.StaticConfig { switch networkID { case constants.MainnetID: - return MainnetParams.TxFeeConfig + return MainnetParams.StaticConfig case constants.FujiID: - return FujiParams.TxFeeConfig + return FujiParams.StaticConfig case constants.LocalID: - return LocalParams.TxFeeConfig + return LocalParams.StaticConfig default: - return LocalParams.TxFeeConfig + return LocalParams.StaticConfig } } diff --git a/genesis/validators.go b/genesis/validators.go new file mode 100644 index 000000000000..4e7d9ecb6550 --- /dev/null +++ b/genesis/validators.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "encoding/json" + "fmt" + + _ "embed" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + //go:embed validators.json + validatorsPerNetworkJSON []byte + + validatorsPerNetwork map[string]set.Set[ids.NodeID] +) + +func init() { + if err := json.Unmarshal(validatorsPerNetworkJSON, &validatorsPerNetwork); err != nil { + panic(fmt.Sprintf("failed to decode validators.json: %v", err)) + } +} + +// GetValidators returns recent validators for the requested network. +func GetValidators(networkID uint32) set.Set[ids.NodeID] { + networkName := constants.NetworkIDToNetworkName[networkID] + return validatorsPerNetwork[networkName] +} diff --git a/genesis/validators.json b/genesis/validators.json new file mode 100644 index 000000000000..7c4d76657ead --- /dev/null +++ b/genesis/validators.json @@ -0,0 +1,2529 @@ +{ + "fuji": [ + "NodeID-11112jHXqNowaUzgEwjMAdiKRm57b5cM", + "NodeID-15uUYVJH5xM396PKH5d9GTjGErkciLdBx", + "NodeID-1REAt4C9SQUo17pseGmHL2oNUdpkBkrB", + "NodeID-22hnv3WsLkax27PmQdYJrVRDJVm17XCCX", + "NodeID-23GCcht221hDaKdtH8BhSLpUrugsfRAY6", + "NodeID-24WK7qiKXAumya1kKEktwj2ubBbRyq5UW", + "NodeID-24nupT8dLucgRn2HuYGzqUZEbkfKRaUnX", + "NodeID-26yNpVkHNGxboUshTXtFAy76yYLmWa7bU", + "NodeID-27n6vtWe35N6JmbpVQKfeVqy7KJ874tFU", + "NodeID-29tGiEe2rxuoHJgjEZ4sDytQAivwjJmaC", + "NodeID-2Bxm4MQUKqq7yMtHLWsfYpKqRAfSterR2", + "NodeID-2CDAbZAYzHv4imG3sC9hBx9WGMzJdAJeX", + "NodeID-2CVgPTjtmaxSSjKuZXUwrgG4cPapmEgmD", + "NodeID-2D9EnSs4qEtZVPB2kxUX2xEATUUgXX3Nh", + "NodeID-2GKxWMGx19NvTSorxibapih9DcuRx6WzV", + "NodeID-2GXYggxghp56hz5zkdSvtVXEQ3AZB1gGM", + "NodeID-2Gp8ZuzK92tZDzLknZxmsm48RvppFYSqc", + "NodeID-2JGdGihFqGFcKEZEUswXDSWwaBJcU797P", + "NodeID-2MrvT4zf7GNN32eeGizM34s6UaX9SuZsx", + "NodeID-2PPDjKZvz1RTRiKQaojA4vsrno6N8mWuh", + "NodeID-2PfNAhAG7nypsyDRHzPwZaZsDcj1jA6rU", + "NodeID-2Pvoq8Stniirtie17djAh3ys7rUxn21F5", + "NodeID-2Ty5YxnuVTUkPX8SPMcS1b1qfE3sMbcRV", + "NodeID-2bRcmz6BF39s7bMoG7wWPWST73LWcaAUp", + "NodeID-2cU7F76JcCHXcsY6L8pMeHQBxwXqMumNC", + "NodeID-2cu2JVaQDucg81tff1W4PBeZ5oMUG3vj6", + "NodeID-2ebMFnGycbMsnxJ3VV2WLhJSQPLQUSJgk", + "NodeID-2kwc3kKwtfgTQ7aPahndbHAer236maSBH", + "NodeID-2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB", + "NodeID-2mMkmzYVHw4YWojaAuAHpNoEz3mqyEbBJ", + "NodeID-2n8rK9kL8N7y1P6hyKu5ZwrMFUX1NEXG8", + "NodeID-2pV9ukRgk7eBJR9Ab5DSZqU3uZSfA9TvM", + "NodeID-2vtBaWPqDCi1rzQWdmcSXG3R2hJbnpMvg", + "NodeID-2wejWSTCbLboLdsgu9YAUeT2h5bnpzcNc", + "NodeID-2ywA5crdZzPPV41rXRPkPktyRKHHSCdEZ", + "NodeID-31NFpxN7VniDZbDDNkvvZaFuuNhDJyYro", + "NodeID-33PdW8ge5VAHD98Bmxy6kYhm992Mhnu8x", + "NodeID-345wyACGatmkic8wWga8Tr4qxoeuoiZvQ", + "NodeID-34qTBTrvhsF3MQDuwdNUDHBvj982Wm6NV", + "NodeID-34sJYzSaqAD8GSoK1WVguqLgdg9uVotGH", + "NodeID-36e7GjwMCYr1d9tDUgUyz9swewgNhKArs", + "NodeID-36ktMvm5GFTkfTAd5pPw7zpAXZG174Fvo", + "NodeID-37fc7hgUGsXKxPx3rMYyBdHatNpKRb8z2", + "NodeID-3932bao9oUnjfwGwWgsBk96M8rBhgQdeG", + "NodeID-39V1962v35f5Rs1NsaCskg84uaMqyxeAs", + "NodeID-39w4saojjUihKVNHJiQuSwugYbiGraXGU", + "NodeID-3DgkJ24cK9scEWQnoa7BwgP6YpB9ibccJ", + "NodeID-3FQZUAfKMJVzV5opV1DC3EpEYiZ5aYJQm", + "NodeID-3FYxUrCrACjqdBYFc8SnTzJ7Xq9fY3i2G", + "NodeID-3FaY3uSPkatMS3HUWXDeq6WuYyFPrHoj1", + "NodeID-3HNtqQ9HQftuHD8gApoDTLMH78z33P4Ze", + "NodeID-3KSEXMHijPpZbUHHgpqubFvjA2N4GJ6MC", + "NodeID-3KixTZkLBUqrDDeaaEpXaTTiU4Qan9xdC", + "NodeID-3M7L6w9a1G7o6ywAuY47HTnuGaMPZKQWx", + "NodeID-3M8ZqnsX6DDFkxVUnBzcKgVxce8vFLf68", + "NodeID-3NghhwXzgEMZNnjpJGiYrrpfCMNYCtbsf", + "NodeID-3PjPmpuCghoVT7AJRb8qTjyZ6DtCPuURp", + "NodeID-3QKhsmp2cj4eJ1hYecqQ6UAPYGTfZhWfr", + "NodeID-3RGiNDT55RwAQARCjrKgFc1hjdjVVchDR", + "NodeID-3RaFvh85MNRXdUmsjMPjvfNHmdbrSn9kx", + "NodeID-3U18hPBaGyELh3ZFEBPsvidFv5gP2Pv38", + "NodeID-3UhYTVhNbwBBALPyvwhRiGmgJts3obFvr", + "NodeID-3VWnZNViBP2b56QBY7pNJSLzN2rkTyqnK", + "NodeID-3Z6K13A3BogE9oMPg81vWgfitgvN1kCRh", + "NodeID-3bqNW8gEe1uwQus6dr8uux2g227tnXB5A", + "NodeID-3d2VDPUTpXd97ukrx9Faepmb79wzFicQZ", + "NodeID-3dahoBAqDyB1vskabnMHPy47zg8gJ9tD4", + "NodeID-3hdUZJo43ZTmFe6KdYxaPS92tqbR9woqw", + "NodeID-3jFnB85bDiJ8f1G8EgptsZMENJfGCeBVD", + "NodeID-3jbJ1eh1i8iqQKsrMjcG5WzAmPhkFU1Ds", + "NodeID-3kuuAaJKZWFYp4dy8rcrBWVkV4LDb86hE", + "NodeID-3nhpqPqAz8og2A2DBeVjb4fmFK1d2rRvQ", + "NodeID-3oFxfDPzWo4YhvYzswCMzLrcJwx2UKkwV", + "NodeID-3ry1arUtntKBQ4shFYgF9VB6AbGRHnPwe", + "NodeID-3tJhpEC14rjctSoh7ZN6TXAijmrMvjRNf", + "NodeID-3uK65P4yQGns5wVB9Bv955dHP2rRiXzFF", + "NodeID-3uRhwbfSajPyvWGaNSJyjhDKGPACGqHz4", + "NodeID-3um9mPx53CkfV1KSDdYkdrv6tEVR2E8vg", + "NodeID-3uq87hQvpuXWoqpmPhLRWavFpqzGDZbn1", + "NodeID-3vtMDZ9Eqs3AzwNbqudMoxuUXyi97MKut", + "NodeID-41cr9VeX852rtHrELWnHthUV4Bbm75WBY", + "NodeID-468yx2KFJV9M7uNiTXJrbqyFGeQxgiBfh", + "NodeID-4B4rc5vdD1758JSBYL1xyvE5NHGzz6xzH", + "NodeID-4CWTbdvgXHY1CLXqQNAp22nJDo5nAmts6", + "NodeID-4DDjS32CM5oK7tbba8iNnXv7VWJsrynDu", + "NodeID-4H2NGT5zSc5Tt5KHTciLXVHW3CVofDmuG", + "NodeID-4JwxRwRY8LgeSdDLNargL9NBEBUuvTk9v", + "NodeID-4KKUrWvuKKEwNQWvZj7x6NDhUEan1m44C", + "NodeID-4KXitMCoE9p2BHA6VzXtaTxLoEjNDo2Pt", + "NodeID-4LJNqmxdSigcw9wnGcd8nRP7Z4XZrEMyt", + "NodeID-4QBwET5o8kUhvt9xArhir4d3R25CtmZho", + "NodeID-4RApHGzbpwtrUEbL9BpkV8AfdCzr1ciCj", + "NodeID-4SdtN4zCP8h3ZEymEfGxJRocAu5hrwphC", + "NodeID-4TJyt2f7zGuusjc1yXcBJnkNvDMAJ9py4", + "NodeID-4WP36nX46eS27wcRsW1agg7bzpNDonEf6", + "NodeID-4ZrTre1sS8qxndZvsMNzBGm3MxxWBpbJb", + "NodeID-4aAK5LmtRsrAGnZgHTWXcK8EwrtxEk26Q", + "NodeID-4aQfCERvSk5sn9SJfzGZtb4ea6UQtxXNf", + "NodeID-4aa1L1AKqdG76yQ7Y3eGYV1wProfTVjc9", + "NodeID-4bPd6dgavJ9sggrbTMou3jkjcJ6Ng9L43", + "NodeID-4bo3ptnJcE25b9YSZvzUYxkXipqptKGer", + "NodeID-4dNvbt8gNJWXtTMA8BARk8haL8agc8emC", + "NodeID-4eruhJFiLfkgmeKrG9udZthPCvGpyyw9d", + "NodeID-4jkdfkuyACE6K5YV3YtbCieZazm6Gjjat", + "NodeID-4kMv27g3A6x2RMkiJjiK73zmAxvPDmbct", + "NodeID-4kSPhc76z8Nxfta8gjxHDCHcNx7TrNSLX", + "NodeID-4syxSZ2LPcCeE96VL1HsY3fx3AYnnGfsi", + "NodeID-4t94JpNNhRT9v6uoUWFY5G4jKmNqV6BQh", + "NodeID-4tsmKt9tHvMocmyHe9oKD1PkAa67iHn4k", + "NodeID-4wL6VGogpJKy7EB69BxaRi1R7sg4npMxV", + "NodeID-4wnwAREi6cY2vYccTktFC1gCEJfxwMJrU", + "NodeID-4xKQvNdSepnCAUZ5JMvqNxanaUQ4Qn22k", + "NodeID-4xpmwNTMoAxA4XGPyrBzas5Vnu1Beykmw", + "NodeID-4xyPSDPrx6wecMnPu754XG6hnb2HYCYLz", + "NodeID-52UrdjqkYxmkasCeLaH2jkHMXEYayFn61", + "NodeID-53BzXzD64f4CrJQZJmn5VWhwwys1cpzGU", + "NodeID-53QuyRVXP99F83s85cp7Fe8Aax8KAFQct", + "NodeID-54RagM4VF5VNeKWoVV5UNHJfM6ccHtBob", + "NodeID-54WkEbocc1ACvV6N6Qu4NVDGHGfpFhBzr", + "NodeID-59HRzrFCmhEmUJAncPdXZ5H1NrreBQR6e", + "NodeID-5KtwUENNLTnfjAeVWpED9jq2TeCPZjTxU", + "NodeID-5LtxGcaXocgCfx5qn2uMKiLnoJZi8Q11P", + "NodeID-5M11QQFMk3iqV8g6QoRiDNSDwR2XsQSe7", + "NodeID-5RHueEiTPCFb2T8YuDid94rJtr31UvnYW", + "NodeID-5T4381mMgVqnW1b6y9ddyHBfYAqaB8C8b", + "NodeID-5TcM4MkRoE7Vhnv9kcawSwpVSiTPDDBKw", + "NodeID-5UVT7fNKF22XkWXMxCDvGHZW2auLgxprr", + "NodeID-5Vd1XuN87ZgjBzpGL5NDhVPbmaqzd4YfU", + "NodeID-5WPHvkgxcwSE3XhuLf3rVNU3crqoF1BJ2", + "NodeID-5XgY2PdtB5DdELR3CzSt4U5gpC7XEUNVp", + "NodeID-5ZKjaekNVNoi6xaEEuKLQ6ZbYMQk4wA2u", + "NodeID-5aXqEaEkJH58WjMzuw6h8i7b16SNHq5uD", + "NodeID-5aqkYoNRpwJ3EhBvc8uF3626mzfiUsxdG", + "NodeID-5bGeKYsDjDfMwPAipVDPk5rYX5b8vc1aN", + "NodeID-5ewkSj3RxqMG7shRumy4USF5ibHMaFAJY", + "NodeID-5fvPxB3edzXX3Eb5x5r7tbknoxqKyELtf", + "NodeID-5ivzLGz96pAJWd3o83x4JWHTpWZoHJvc9", + "NodeID-5kGqjBKnQ9gEm6gBXieWvP9oGZoFXPA8e", + "NodeID-5naAivQLhjbpVw35YGaD4HJwXTwuDufXA", + "NodeID-5oRZWHEqipzXDnScB5UgPP5cUqfgPa9GR", + "NodeID-5q6drRMuNYp3hfZokxgB2m5NxBoFZXc6X", + "NodeID-5rLsXFYvpZqjWfXkPouCUAmXc3Xcvebvn", + "NodeID-5rZ6LbEm6o3baUxVyV4GV21sPD4QqjPk1", + "NodeID-5tqDUvtVzpChaPgXcrXjuRRS5EJU648en", + "NodeID-5vCs6XJWRNTLpAkQP2HK8fwprGb1v86Q9", + "NodeID-5ztQ9vcGysCVPY56bfht1GsxV6Xce6tJ2", + "NodeID-653BnnF1HfEQY9APtAu7V23FNEmRMBGDW", + "NodeID-678Z9gkWVfFoAAnRJBEtgSm6Th6TUhsMa", + "NodeID-67tGW3yXGqnDVnEwEAwVv84yZtm1AiZPA", + "NodeID-6DjE25mD9X5AXWf8xpfgJ5tK94SFq11Vw", + "NodeID-6DxE723urFNy3H7SyMMs76ZLRYx6bDJdz", + "NodeID-6FgfGS3McpD27X8hdC7ZgQVhs6DoKvBzq", + "NodeID-6Kgqv9SV42oRBk2KNSRwFfJwsLBia49De", + "NodeID-6KtZFnJUHfu92NNRdgdea9ZA5mjbADDBR", + "NodeID-6LZ6k2VRSdaF4qLHgfjER6gjVjrjDEAmm", + "NodeID-6MnjianNNDMpLNhCnirZsw47aGGkkfCZd", + "NodeID-6N15qNZphbUdxRSfThfDbbFpgDdJvoHrR", + "NodeID-6NHaJnvJtHSbdtJHzdDesKdj3Wu9mAuNh", + "NodeID-6Powgn3qZ8HZGmHUeQ7b1acypxzXhA9Lq", + "NodeID-6Pq5DKB1kzJjvnc8KPNSDYfMCsUGJxCyZ", + "NodeID-6TrmPJiRneiG2ztDxBPWQAxAqv976ZJmU", + "NodeID-6Uh985Lx96QQZ6LmbqJF1j4CxLu4ZCcd7", + "NodeID-6W4LCoXP8T2QYRGWcAcpNftMYyHcVuKCF", + "NodeID-6YPFxT3358wbWFW6zHTnVtUYYedASEnVu", + "NodeID-6ZmBHXTqjknJoZtXbnJ6x7af863rXDTwx", + "NodeID-6edspGWTTSWm3N7Zot3kGrEMHPtLrK3VW", + "NodeID-6f4T9BgmXJVJBNTwAj2giidTU7WiomG8G", + "NodeID-6gVjCexoCvwXqApW3YVLnQSjsCUjbSDYx", + "NodeID-6iky1tgQsZupgN75GcSjFyJxC7a3rUc3D", + "NodeID-6pMsJc16h9YF2mMY5VkZu8rDS5Qco23fC", + "NodeID-6qzz3HV9nEmVLrDKGo9L5k94BZksKgwXL", + "NodeID-6tWHHU9uVWAf46qTYTsnTnVZJeL65hwgR", + "NodeID-6tkEtqzyUzqkPZ1KGH8XS9ZTasYGk68iq", + "NodeID-6uRAYA8LDmiD4se1Ve9fBSRrjNi5TZjw7", + "NodeID-6uicgS3fAizqshZgZPrNqm66YieUeZkbj", + "NodeID-6voVgdj8tVSWrgprYtrQYyidN8myXQUXj", + "NodeID-6xEvEA7hcEAnqaXW7R9RFZSkdsAniHPad", + "NodeID-6z161fCLZDJjvLNSPXCK1bAxkK6gjiDXc", + "NodeID-6z3AiSaoYsmFgjqsc1w8rPx4vGaA3Bni3", + "NodeID-77ebv49H9yRLLiUC46Hm9H371wXYNZNjL", + "NodeID-79idCdyu498QoEFzvqKBbWFsXECKSYbpb", + "NodeID-79u7mCRHvc9q5piSciiTphM8H1TDEq4Ab", + "NodeID-7A8w1M4GpWHy8UMwQkARRoB9a67Rod7yR", + "NodeID-7BPqDUFt9F5Vs9eqZiBYXmnaeC5UtZ2U", + "NodeID-7DYhQRawvQ3hoDTfytAa5KHeMTUM7buBN", + "NodeID-7EexMiaXNY1HYVV7gWdXtsiKh8j9gW1Fy", + "NodeID-7FXaqpRJgAPGWAUPs1UunagASkxPZps2D", + "NodeID-7GM3ww9CL3uXtZuK6nSpdKJmEbiiyYgu2", + "NodeID-7H5yeX5kkfdUBFCHLbDfDRxqWCDoFrCcL", + "NodeID-7HvCZsgMXsCTjddz7cYNpKrGxK1i1SxMr", + "NodeID-7L9oKJx6AsgGfG2GR8qQ7ndbKBVxqV9At", + "NodeID-7NenT29nACMVLj8K685mBLodfp6GehVy7", + "NodeID-7QjQJHgVxMFBZzNG6n7ULTeaa7dAbd1U9", + "NodeID-7SKSvUU5xyRiTJxsKsoeXnLa6idNEtZ19", + "NodeID-7Uq7GYJHYNh6LxFinWQpyB4ZitqDkUC6E", + "NodeID-7XRaRs6shZG5VVVoaKeUxdJfbPmuVU4CB", + "NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", + "NodeID-7YJB8mcHCRmzsrrkxxpvqjyQH4cCEBXQq", + "NodeID-7b3i18ygt1A2gPpXKeEfAuC3ekHB5J4DP", + "NodeID-7cukmtqdCLDWG6N8BJswwKidxe5iGyVvm", + "NodeID-7dCZydYmPM846KqjhL9b4gRT1C7MDFFxG", + "NodeID-7dZ86Yv4Md2dwhHPpJNyuh83LjKNKvw44", + "NodeID-7eR9d4bjMsTgpwCbvXz4ynETmbXiQrJ1X", + "NodeID-7epJ8SkPeY9rL49JY8ELYJSduMt6eEnAk", + "NodeID-7fi9HdQSPQbnDrSjiVBUAAy5YfisSweEk", + "NodeID-7kVsskXDhAQ2grucq8sUwJARmtqsTmw9a", + "NodeID-7kfKeW5gRRVj7XgsssdWaKNU4AvC8dLiG", + "NodeID-7peBGoPt8rGfPUEeH1VCqnKyMFJg9y6z", + "NodeID-7rxhbsWmUqQ74N3FqoeUo1jpEspkEPeuL", + "NodeID-7uXxUx1VAs3NUs2J472MdQ4V9xVNKZHxN", + "NodeID-7yTMMzMTd1Fuct5tTeCB34HoZ9kLdKCpF", + "NodeID-7zbpDafC5Rxm6ZMXqSrz9D28ZdC4WV7Hp", + "NodeID-83qSEo3NEDrRoNYzJJY4RUBM79cSz4zVR", + "NodeID-84KbQHSDnojrmyuZjWM6hrM2vgPtMWt3", + "NodeID-84KbQHSDnojroCVY7vQ7u9Tx7pUonPaS", + "NodeID-89GQ3qi4bif58ovQnQAehtccU8Jbynozk", + "NodeID-89UCR1CsPzzEHuknxhJHKxuFPNCyPz7Bu", + "NodeID-8Bh3X1LH6uW1q2USKQeu11PAozaSMgEjr", + "NodeID-8CgGmdahNXvmuhyK7NziR9zTEMiArgrrr", + "NodeID-8DLjsnc2R3mm64NJ5puhrgisrLXHXihKc", + "NodeID-8F62HdqXVhqGTySVZ7HyQbEyFDMo33iKd", + "NodeID-8JC2wNQkExELqhtzoGnyBcC7b5Su1C9ju", + "NodeID-8JtmUUx6NHGVACFTReQbFxx1mWMJTdsWb", + "NodeID-8JvHG2u3R6DLSBpv55rHhNvj7MCbeWRGL", + "NodeID-8L6pLG9PQ3XUhsWHT7E2VGuW1S5mgs6LG", + "NodeID-8Lf1apT2CuoLpsqF3sHd5DQCRQ4cs8oYX", + "NodeID-8LjNUkzYVge7UvJkrJmaCYyWvmRb4oxoA", + "NodeID-8MXE4VNHGfx5d5NM7zVeU4KU8kZVs17VG", + "NodeID-8PvUVsNyrXj2SpTyrfxM7AcgjWU4xCufs", + "NodeID-8WJr4wGhcXChCXKXQpSidvTW5RiCQzX64", + "NodeID-8XhZkWckQGKgzhv3XSmUeM3NSrskD4bew", + "NodeID-8YHqGprPt4vva7TqVbzTtVFRv38BmpDUD", + "NodeID-8bVFU3U2PpHCwavsvzfMTE54cir79cmPr", + "NodeID-8boFLW2GXq6DxBRTxAguJ9N6dBYmBd2Ld", + "NodeID-8byCCMYn6aaNs4sQmwemcQfMwrnxBDA7U", + "NodeID-8eZ8eW3Q2Kf6YV1wEVCaFpEsRMm2vfmqy", + "NodeID-8efhLH97RLg7fodqvk16qQbscnGQtV5CP", + "NodeID-8ftKdL6NkAz7K2yUJwNuhZJVLrNiz5qjN", + "NodeID-8gYpz72F1GBoTwufD9FfjESwKAnRRYnwt", + "NodeID-8hqGf72EsTsRTWpP1kxGJkbzeJjnosiDM", + "NodeID-8i9eqbVxzf2KtQkBPiX2BvZujYza6fDe9", + "NodeID-8jUnxLxkowHFhDrEZ1FmucLDC62c3xJVi", + "NodeID-8miY3PqR6f9C6GDr6WFqAwpqqVFheJhrP", + "NodeID-8nCutCx3GgDYrNL1ancH4ZsE5tXHRYj2Z", + "NodeID-8nXdZLTjc3FLaPGbS41WXt3Qwa3JvKaHF", + "NodeID-8pDV7WK6kEp4mw2kupK1bSXTsShiZamE4", + "NodeID-8qNPJ4cmRiqaM7LA9v9tishQvkqqHLCeY", + "NodeID-8qTXyZZtMFdVqfEvr1nhnCJgh1i4ZGqqz", + "NodeID-8rLtFc9Ds7Liw6mF9vz51fmHfxFqDuCvF", + "NodeID-8rcfydaHitUhBWcUuDxf5p24p4qwfvucw", + "NodeID-8tpPNhuBrXw8KKqJjrsxUwVqsgdgbuoc5", + "NodeID-8vGhAbGtfHhi1nNg8A1Jof4ivgLCRdU8M", + "NodeID-93n7QZ5J8s1hej9LEVKX8g8yV8g9FvqCY", + "NodeID-96B6wPgw4HKySZk2C65c33YzzNx5VGmE4", + "NodeID-96ugvCFSQ4Df84uPhPwqHQNRCxVvkpxep", + "NodeID-97jPS9hQkuWC7DhEchgwC2aUN75oto4XL", + "NodeID-989RjLGtpnUjDHGD9mqxgyHLcXfnsaw4H", + "NodeID-9B3HLkYWERnM5daHNb6XaX8on8WP3XiGE", + "NodeID-9DQ6UMDYYWZccC5MRmT2xqE5TaMZUDbLN", + "NodeID-9DnRjykQ4ZdyebJ3FeoqKBBjJcbMC4nsV", + "NodeID-9FKZ4X9iEb3aBqmqzBHsu9kXmhn6aUhzd", + "NodeID-9HUatTvqyEsDD4bUaocsDGpVeePh7KEdE", + "NodeID-9KhiEguiCdK9KGCsfwkWY7M4VLvM5dbuo", + "NodeID-9NsPTteByFVHbn26rvU9NjjLjRKKCR84g", + "NodeID-9QhM9oKKXbAV5xtfARnyKXM97L7nwvGSu", + "NodeID-9TCq8np31pHjjhGaHtLjs6ptYYPEt3LGb", + "NodeID-9X4SMAESCQz8H32PMq4wrSafx6sAUv6oN", + "NodeID-9Xp5yzoLDUuCzUdaU6uFxXTjUYjwoCvPp", + "NodeID-9YzkgDFxmnRQxV4M8E7cuWPd5HukCaQkt", + "NodeID-9bk3GQD8qBvHfFTFaBCBURupAKLQEYg6", + "NodeID-9c5whzpb1QstunxbQ9PGqKQLanAU32Wnu", + "NodeID-9cQwDnPkD4Km3rQFvyJuWD5GUCMYrPHkk", + "NodeID-9dqCQXBAv1AdbNPrR4HCbRDgDjMebHk61", + "NodeID-9j9p75hBtn6rMES3TuWrGc7Q7tQZrfWf5", + "NodeID-9jTrqaVKZpevKsGoiixjVSAc8b2Md18RA", + "NodeID-9jpUT6Zjmsi2MjU8xBh5MWWR2rmTHuurM", + "NodeID-9yrLEmUoyFvZ6MVwrtsXr82LpFExgsfim", + "NodeID-A1SAHmXVC3D7dn67LPmWgd8bMvoNKgqTj", + "NodeID-A1YeC21gHYrEf4HBdRvxFheviXct4MUQ", + "NodeID-A29C9b97WqpgPBZS8Kh4NEvf7RLRm1cdZ", + "NodeID-A2Z8m7egVLhKf1Qj14uvXadhExM5zrB7p", + "NodeID-A3Y24F5vNuDy3L2pLwmvZG8s976nCQ5Mj", + "NodeID-A7az7ffBywSGhgSGvwC2WbUgNxcQSiJD8", + "NodeID-A8VrzZuMjMGkFce5zqSQ4nbSwUMwNmnHt", + "NodeID-AAvXsy99V6dtcjwvweVC6FRWRk4fxwhjZ", + "NodeID-ABCBD3w6GosyujRHjEAXfbRkU14YxX1Af", + "NodeID-ACQt5KkctKfdLSNB6bfu7dVzmsyqpm3z2", + "NodeID-AHnXYg7f19R1MMmtt8D2yNejuBwha5i8Y", + "NodeID-ANZVxnoYnsWdzBUQePUsXHC1pVSdsGNca", + "NodeID-APKTqeD7UsfjYqfYBtDGaDCwLpr3ECVBN", + "NodeID-APhFyzYExri3n5GtDz7ytXCgbt8x5NAud", + "NodeID-AUFGUsjF8DR9cPfh4kLWCjtwdKyop1Y6H", + "NodeID-AYWEvPkJB7nePGujP9zWkHjrL76ppQqYK", + "NodeID-AZsVs7FHdKmNfjDDesfRY5w9J3bJd2e9j", + "NodeID-AeQY8TMBdDVVUgvsgRtZjFkArRybvbANS", + "NodeID-Ahtb1pwX6ugZfmb8ioJrBRWkPqg762Ed4", + "NodeID-Amf3N6UZZ5Q4PukDPttqqMDrkjYyu7Qfq", + "NodeID-AnJkYoj9mvGqv1A7WBujia17Hq1rDZp4Y", + "NodeID-AnZdc4zkKTSkYUDUByvX6u3HJP2MU53Pq", + "NodeID-AohNLSFbhRMHRdF9CPe72epbRtYPeZwjB", + "NodeID-AqXr2RXWtX9FcPndqPZmEDHqfd8Rm5cxq", + "NodeID-Ar9txpfoVW1LxzfYpQ1itPWpjq8b3c3s2", + "NodeID-AuyddbmFnAXCgeN388tMWzvAYu3nMzi4m", + "NodeID-B1SoG2eCceT7YhdRq8EdcuVZgcXeWB5VD", + "NodeID-B1jRRwEACvv92YJWyZrWwQn8DaAhEQA8K", + "NodeID-B3KbeQXjwDat2ySJrVppBxRrTxb7RwRjy", + "NodeID-B3si8iQ5MxJDRnwMBJyVt6puzici5tufY", + "NodeID-B5EdUCfcqnnHKgYzZfoVE3BBabQt165y", + "NodeID-B6GaYh7RRopBAkQJGiMeMZAsAbHXyYi6e", + "NodeID-B7R5mup1BGypqosat4fvCxWXGgziRaZgB", + "NodeID-B8SLANqFYZnUqfnWMuJjsjfzcN8Fxs9rp", + "NodeID-B8vH6dfELDJgPjYVMK5hjrRQkUMFFW7i2", + "NodeID-BBt17ZXGPvov135835zT6BBAa6uTwriQi", + "NodeID-BFa1padLXBj7VHa2JYvYGzcTBPQGjPhUy", + "NodeID-BHM9wtsHVPmqKbfmqqHTvTCiM4YXoRSvs", + "NodeID-BJjQb2PFLPiHsvejL2Q3yX28nqRxGxEqA", + "NodeID-BNYwWBYAidDqYiJYKhRahibQxPuG6hGvp", + "NodeID-BQTWT6W3Ke4PUweoYHThhuQgerhuAFVZH", + "NodeID-BSE4qod6VYM7Xxeausg2UzgxZqMmTTWnU", + "NodeID-BVXicWEU3ULP9aBsmRT82PLJMCXfdXDMp", + "NodeID-BWhTVK8YDrRmK6NeoLDn8oSk7pGu9386L", + "NodeID-BZq3oJcE1Unw1rYTB86B1q56x2VYQ7PwD", + "NodeID-Ba4X4hJ4bz51Hm1L6M4rRhu9JwniJgyUx", + "NodeID-BbDHoP1Th9LSr8iEuoHktCF38MQ6Wx5Ws", + "NodeID-Bd7EUL6WhVm1wF5bVNefWxHYsg8q2ttuh", + "NodeID-BdhYSwf3LW3bBqt3235WS6Cw7P5GYDFSV", + "NodeID-BdpT6z1FRz6vjEQp7Cb7nRFUgvcquBXT2", + "NodeID-BemmZ7Y5HLGHSz2kUjmPUDD7bbJDebVd4", + "NodeID-Bevezp7VTbNs4wGiiB115f8NrBwuRa79F", + "NodeID-BffXkmzM8EwrBZgpqFp9pwgE9DbDgYKG2", + "NodeID-Bgskmf8PocRPgtHgnAYKUgyifo9qD3Y5g", + "NodeID-Bh1NKhN5t6MBcHvY7XuGhLK14q7FwQjmf", + "NodeID-BhmBgAcV8WiNyrVUdDYJ9CVi7TvjPfJV", + "NodeID-Bk8Etvn1q5cEbZcn4y9QjbXoEAPwsHxB", + "NodeID-BmkswdTypyATT3xP9as8BeUCyq2sGScBD", + "NodeID-BrYXghQSu6KKGjuzhs3nrkcB46Wc2yYHy", + "NodeID-Bvr4dL7pK3FHqXvPs3X21y6QaZS86g4z4", + "NodeID-C2Yoi9vTGPppDhpFMx8J1XvKJv3CY2x8n", + "NodeID-C34t8rtehJ2BXMkok68K7ZuUZEk9ryJrA", + "NodeID-C7E4ym1BsDbraGxymND3zGYHmo8SJfMRb", + "NodeID-CA74JxFwV5BeRMCesYQruxjiKw4ofkEKQ", + "NodeID-CBy6ZLEwBBuYFtVDDvz8Wra2AfeAd96fs", + "NodeID-CCDSFhaQAuXWpiLviESSUPJmWUUTVxdGj", + "NodeID-CCPjVQc87Ka7nbXVBN5cVtGTfTzPVnEyb", + "NodeID-CET8PwdAu6Q2Cx7iqHrfdSyo4WEnkoxFr", + "NodeID-CEzYD3rFjsGtKFu7JLGpD7rMcGRNJFLGB", + "NodeID-CKZg819tz5CXMxMtrRfKS8XxVURFDuHVB", + "NodeID-CMSgVPNXC5R1kL19KHCNgbbRTeStUWPJA", + "NodeID-CMaW3TiXtNHzPGm7xzjkJvE8fthGibhAv", + "NodeID-CNK8vDPGbKfS3YSptRPPN4VYUEpoNH4Vz", + "NodeID-CNp6HLV3F1RnbBF977E93KReTXTkZ4t32", + "NodeID-CT2Fqk6d8eYAx627aYB8HAr7VjWHZYPQc", + "NodeID-CTtkcXvVdhpNp6f97LEUXPwsRD3A2ZHqP", + "NodeID-CU8Tq3pP9QniPeYWagh3TKDFccbvfwDkw", + "NodeID-CYKruAjwH1BmV3m37sXNuprbr7dGQuJwG", + "NodeID-CZP8JG4BKdDyiktxg3D2ZYgREHtFhqzjH", + "NodeID-CZmZ9xpCzkWqjAyS7L4htzh5Lg6kf1k18", + "NodeID-CaHFWxHNLv1R4R9oGHMQR7n2vKy95LTei", + "NodeID-CafHoeCqCjg8fWufaorRqFe3iX8s7z6A8", + "NodeID-CcS3n2j4CEaQCEnY39qaBDZajeZaXkRGU", + "NodeID-CduWdu3Gv7bsqAxnsdTCWuuMEyLYQqchX", + "NodeID-CehvZ1zrpo5HAb7K2sDiQcrks1Cn4d5Ew", + "NodeID-CfN57fb8f1jS3NWKsYSxfsHf2Lo8HUFSY", + "NodeID-CfdN2fVKKjDBRjqMLsxZNSaSKFdBgpMmr", + "NodeID-Ch8GJYTFsdPRzMVyWgVJTVV8MsHvbS4h6", + "NodeID-CipRaLt4Ym8yKegs45dpeEscA2x5w29Q9", + "NodeID-CkS5zL9rUsJkYHgSMtoYF7yBwBCV6Gn3X", + "NodeID-CkWijvSj2dEenuJeXu5fyJcxc5Wtg1feo", + "NodeID-CoKgGkWBUGouNZTqKCYLBNJ9dw4yJXUEW", + "NodeID-CpLDjTHrAuB8p3G42uRtGhnGKJ5YzGkFa", + "NodeID-Cq8kbEVqteYEqfNNW1CJSkrG7S16GiZ3u", + "NodeID-CqDq4nGVYJoSgKVpZpyutzkZPfcugTdjG", + "NodeID-CqGy1ZUkq4ZTznvsvzwwXtQiu8gBT3eYW", + "NodeID-CvFCEpXymDz9A2EftqnJmCNi8qVHxwKXq", + "NodeID-CwcD65rnKwedLNcsc98MZK6RghPMptEyo", + "NodeID-CyQc7xjJ2gJtCQNL6RVMEJf1KysJXG6QF", + "NodeID-CyS27tGGmcmxeKhCuCwhT4GwiZGBE8QEb", + "NodeID-CzBZTXGcxZqfB7UiN9ALzssfyHMWbMKH6", + "NodeID-D3BkKn3N6chukRSZN3yr6zQvu4DgCSvp2", + "NodeID-D3x6QzTaTdXxkMXYFx1MDc3T224H2Pmh6", + "NodeID-D6MxfVN4395J9jSmuPHDueQJue4Gitwns", + "NodeID-D7jdvPnW8XX3Yz1k4YYKCZxaY1FF7x6at", + "NodeID-D8bHw3BUjyGycV3dDTknNp1p6w8LHkNhh", + "NodeID-DE5iNyHAPisnXpaf8e16diJWL5y33VB7u", + "NodeID-DJHKfhFKe4QjX6AY7tyj2VTjZrpxUWUyQ", + "NodeID-DMbbEVaCyas1EXR5ubg2naKgEBmVerPne", + "NodeID-DQ4PJvysogwgzru7R8oDiRLv6qoF7Wwxc", + "NodeID-DSCwZSNFHcZFg5kbWrbbMHJNdpLUAuWB5", + "NodeID-DUNsbAcHScpFE2uwBQ2o3SPZ46C2svMqq", + "NodeID-DV4fFyxuvwUUUQbTKuc9bYnRibVixtSDs", + "NodeID-DVQevoevYh5pYSNCwoGpn3QnqewfibCJP", + "NodeID-DVZ7ffBkVkXDbpjGPEqwyYQgvcHhuTynj", + "NodeID-Da1yBnfJmUsS366wMN9b2Kzw3EL3WMdkC", + "NodeID-DdhG9cNQmDpPwWSpEW9u6T7PNCc9GKv2S", + "NodeID-DeeVioFXeoXx718FkbAk2unyikBN8iLJo", + "NodeID-DfWyvyBF7XF33B9kkSiXYcJyzxRBATXN", + "NodeID-Dh1JnVoEMU5zEQtvR6LHPVsyw6AGqbLuG", + "NodeID-DhQSUszPR4zHoxF2Cicx3pCUnjrfsewnp", + "NodeID-DnyNinbTZWYSyoekJ1xMmh8TxQCw9kHdz", + "NodeID-DonmTGabi4TZjWH1ue6W9GnHB9ZSjTrqG", + "NodeID-DpwRt5xkd3rKici9ikuo9aEH4AMfzsT7E", + "NodeID-DqogihFFxTyKaUTqmjDt1Fh82hWwkeq9v", + "NodeID-Dr3k3Xb2uFHqztcbVtAbbfhtrwYjsRysu", + "NodeID-Drb1UM6uo69KiAy8tyD2QXv3JUGU9exaz", + "NodeID-Ds4BPVEDoEPu9wHT5rk7ry8vWaHoY4cDo", + "NodeID-DuXPSVSZUeEJA8QP9QV4Vt2G3y3WbPvRx", + "NodeID-DxV8FsH9amd8cjQhEboKZerAPsBR94maA", + "NodeID-Dyd6GY1EnBTGi9WEMvFo7kkjsZBWWRDo7", + "NodeID-DzyJouyxaQRYYyfYEvTaWrB4uaPTTKn2V", + "NodeID-E1tni7wFcu9CizXdckfgxo9dhUwR3243v", + "NodeID-E24tDGQ6uRbTZaFCpqnUgZdWq2Y9YX4qy", + "NodeID-E6CBZzykpEJSqKyNjDdFeLvDN1AmZccWY", + "NodeID-E7Cm2d6wVhaA4Q7AnuRg2S3sD6noReX91", + "NodeID-E7e5wZQb8QYEKYd1BrRrszjPxUaAQNaY7", + "NodeID-E8dfbZCzX8VXgXYmjnA58GorajNrNcpFL", + "NodeID-ECmPviPvgqFc4iXFi3soN1xpXnUUDHjoG", + "NodeID-EDESh4DfZFC15i613pMtWniQ9arbBZRnL", + "NodeID-EEuQumHSHr2hV4C5S2FNL8LVofD2Wvqyi", + "NodeID-EGzKzc9RU6HGCzBe8nK2qY6mVs6WqroBg", + "NodeID-EHV8d1ezFyoA8DMzTU3WZXfVR95R7v1cv", + "NodeID-ENMu41b1avXe4yRxmF1uzymNJP9Xf4jcy", + "NodeID-ER95YbfDrVih5Ja8F81ac1RZ3Bk8h6dTj", + "NodeID-ESYYcZZMJkLzC9raPoRvH4rUeErh6YF7P", + "NodeID-EThtNJN89zd1cGjVPyThdEwHknvV6XeML", + "NodeID-EXuaP8HAE4anxXGR4QJX2em2nfEiTR9tz", + "NodeID-EYEsr6Srmjn4XM8DijPFLFT3Hf7fsDMHA", + "NodeID-EdSG1Dy4kqqaDJRn3kSAdM7RVSdW5eyyz", + "NodeID-EdbD6AHYPYNqv4X9KMUA4RBkuHnd54E9a", + "NodeID-EfGKSr1JxRXofvpSo1kABnyjuyJWLr19K", + "NodeID-Ej578hCdtGtaSQUMQFkPkoXVHHByJJzZM", + "NodeID-Ej5WQFmSCm27bZuRPoK6PeFe5tMURCgdG", + "NodeID-EjSCX6FJvzVA1gaja4cWqfaXEkZeAgnQb", + "NodeID-EjvGBJ65yHcJMwV5RhDJBEcKb1QP4Du1L", + "NodeID-EqJ1JGnx7zPAEvGy9JRgytDDTrsPtZUet", + "NodeID-Et4ic8Ew2HhDqKzb4Zx68SMbC8mCLUWzi", + "NodeID-EtBgBdH22chVR1fzZZPky3jzUZDTigCbV", + "NodeID-EtUZs6wf6j5kZMUuABEzUY7Z16Ja9DtoM", + "NodeID-EuQ17UhJAMCCPKo6qhEkeiEgeyNQk3vLt", + "NodeID-EzGaipqomyK9UKx9DBHV6Ky3y68hoknrF", + "NodeID-EzN4q9mU6TVFkND6oghbdLAUqDacE9Czp", + "NodeID-F19vhTXhznYNp6pAvaUmUpj4VWq7qjUyP", + "NodeID-F3c4DfFS8crrJUyExRhgaMiXtZrn3TuP", + "NodeID-F6AdvbZj7E2JZDfG9WhafyQeFAqiU3geT", + "NodeID-F9kHeXw8eoxEMpKVCi7yCUwYGry5zPURy", + "NodeID-FDXS1jxwngimMxWwdysC5N1dNZVPZ9GQ2", + "NodeID-FGA3erWUCbD9rUBXy1Ea2rzp4xas5zLgW", + "NodeID-FPZ87ZH95TeFm1su82QJRX8PCJi4oBAoJ", + "NodeID-FQ4TUmX7RFf84qxGPBdmPkrUCzCVoWuVR", + "NodeID-FQuy4XRiNwdHunxKvAEsSG7N2oTFvcKiJ", + "NodeID-FSDQT7qCdqgxZZq4AnFDcMpxukPrXUoyL", + "NodeID-FT27zhjAL6HYVkfBHoWk8FQ3pCt4xyVsA", + "NodeID-FTtpSNGEGH4HiQbi99qcgwGsgHthwMUGZ", + "NodeID-FXLBpf4W1QenLua7FMGTKqvk2Anobbi4m", + "NodeID-FYD28utCsypYL6THXnyT8bTW9syP4WQrm", + "NodeID-FaADDvJH51EAP4vhJEKCEfCtgpUUnTNX6", + "NodeID-FamFBmBfwGwjn7fKpbaWjzcnTiGNfmgWN", + "NodeID-Fc8JvkNuUmo9oSy8jXiNXUczUM2q3jWrS", + "NodeID-FcG6viphauzUYhxsCtbaJUKqirYqp8YbK", + "NodeID-Fcz43yNkFRu3jL97vjLi3ZkpmeAcVR8La", + "NodeID-FdcqnJ1bwv976Ft4RibXmD5DTeWSnMzS5", + "NodeID-FehvGhfc73qH6s4zTdiKrNA4WmSMbCW4X", + "NodeID-FesGqwKq7z5nPFHa5iwZctHE5EZV9Lpdq", + "NodeID-FfiosnkKYLrMKoUpNTuFZz6gY5kb9cPb2", + "NodeID-Ffr1YgeWw3h2Ct9dY7V3u79ScrAbhSUUz", + "NodeID-FfxNCp6tZxTpj1JxA8GGVTtJWowqKDmvY", + "NodeID-FgvaeMrL2GqxSBV9R2hkLL7jH29vhBeao", + "NodeID-FkbkPTVMLbZZJKFHYUKZTt1Z3quNUurV", + "NodeID-FnQRH8p5JrhLRUT9wK2rtEc17EtN8Tcb1", + "NodeID-FnsrF37UrgFtzoEKNxyAV7wN516SwqFVN", + "NodeID-FqAXbqQUVaXS6z6jTaLPZGivbLnHf3G7q", + "NodeID-FsBU6yEVf9ZVEhuFy5sRixYwio3LRN9Ey", + "NodeID-FvMWujr57hToQPSfGsd2exqaFBdZzJBTk", + "NodeID-FxgVB4Wwgg4dZ5WTkkBp9EbwJhAgwZgWd", + "NodeID-FynRLYTNZroGso229UyxpxsjqmQqJgAQF", + "NodeID-G1MJv3mVMjo2SueWzr6rvn1gWyAvgTXrQ", + "NodeID-G3axaqp4Grtc2S6LD3qhFR3wN1NEZRuGx", + "NodeID-G3h4X9o4Rk71ijtpEdsQpjWCM46GNCXPk", + "NodeID-G4Za1ZuLLinkM5skf1LZqAWqKs3P5xs7F", + "NodeID-G5p6Hn8XFSQJKH1YzA5iS53rgDfCCE7a6", + "NodeID-G6g8fYiCrp6ZNryP3Ni2fJKysDxHbaafU", + "NodeID-G7fjDTJ9DH2P3ckC4VqKvH9q9Zqsv9b6b", + "NodeID-GDZpG8cVCndUcnoHiub4XPM5WR2hF7tmk", + "NodeID-GEC1zVazREQmLUpj1fF2qvuATpRvMgf62", + "NodeID-GHvQsCAUehhZi5psMeBxDfKHuoVGfRoDt", + "NodeID-GJsdSRahtgdsTBjr3jbew5uqTXLcjJkG9", + "NodeID-GK8GSuLrw6jk9CVZ7ryAjSTm3pFEmeVF8", + "NodeID-GNJqrL7nMYPaYXcrSSfsY7ffe5JAzTRGh", + "NodeID-GPc1Si7SQ6oAEfP8MzQ3cFN3fsAtkwC6i", + "NodeID-GQTbQ8qJYwBEhQEa92V1yjHzeVyjTDKFP", + "NodeID-GR5Hc9oEi2AvNBAoUaQDZtWEYz73jP6jq", + "NodeID-GTgPUiVqjBX1UAFmdDmBoyHuiRhTPrZen", + "NodeID-GTx2HdjpdzYuQEpjrKnuuvXDzd8XPBxBG", + "NodeID-GWRSuAdKF4LjKSgEYFXR1crtjov8vnW4a", + "NodeID-GXtJyspnxHxzLWWyUqJ3xgXNidoxwAtdw", + "NodeID-GYXpBJh1SYjG9heFSV51NVhJHh5LindGx", + "NodeID-Ga88DP7f7F12ePN3LEYUGge9kjKi3qHP5", + "NodeID-GbUcqW9qSUuKZPMJ89qL5YJWDfAuUjdq5", + "NodeID-GgDJXjd35ewugJyG9tonbjBEKVZambFAb", + "NodeID-GhLyXd9aVH4xujx9SzC9oy2JcQQ5amXuC", + "NodeID-GhsAAVi8nXhvLS1Xv4ast3EbcL4BDU8NK", + "NodeID-GiAS7J8Aiuj2iBAnKT2wmnxWWqp1QLd2v", + "NodeID-GkiKRa6ww4zJqXKQUcqHXnGaRKD6JNoYx", + "NodeID-GoVFrxwF41mzpVyt6Pb8K83ynj3q2U1CV", + "NodeID-Gohrw1ApuWhwFwUbCLKjS6hbijukerjuo", + "NodeID-GrL2hD5CdKU4CE8fbN97kVH5DpztHMLPX", + "NodeID-GsGzZFRypbZdhosjortCTbGCHiMGAifhw", + "NodeID-GsoJqaJe9SBrouxVCtSriYE64e2cQB4NP", + "NodeID-GsufzM4GKTutEFEAkDunS2wkhjdUPUvCu", + "NodeID-GtJaZR21ZmzB8CcPYXVA2Ks6Qgb26upj5", + "NodeID-H5Kcdpnye7iumF8ad4EHEFxvYzPAg4o89", + "NodeID-H5yv8taAdDWeCGNSLzdM1u2FA9cavk8ka", + "NodeID-H6FuMzUuGxjcko4hoBhPaZ56F4pheS2Sj", + "NodeID-H6Y7aEKkKz5HeWp4nE7nKHR4PUksjBuke", + "NodeID-H7Wq3A1RJsb5skBd9iy8zY37yW72zrNLy", + "NodeID-HB5cY5e67mzjNwrcqucGAxh7SyNMWLY2z", + "NodeID-HBkBmNgoiab6dDBZ3d7Atiq8ja9achxwV", + "NodeID-HD9qhGtz18TdEbLeuMhn4ZXNCxvdvQTwy", + "NodeID-HDcjsGMHLaLgJFoQYmi4CBsbjMKPacuP6", + "NodeID-HEdbAzR9GLht5p6b47J214UhdshCDX9TP", + "NodeID-HFEef35BkgqRBYiQ2ou5JhsyB8LHRtyuo", + "NodeID-HGZ8ae74J3odT8ESreAdCtdnvWG1J4X5n", + "NodeID-HHXxANboEXufRMuSFBHgc5dbpk67L7XPV", + "NodeID-HHcVpHweLyANYAB3UoDYrBQLDznFeS42j", + "NodeID-HM73QztvPuGikaADwnjpaJ1iKWAABuQon", + "NodeID-HME2ASYZmCcpRRpKQRsCLPJ83ZkWDBkQ5", + "NodeID-HPpKmAyrXsouQTaxMJR2YFgSDVV8BA5xP", + "NodeID-HQ4mk1kjeNvfsan5qrmynaMhQLCsQ4pYv", + "NodeID-HThbCMfRbs6nVRR1ySihYtWLraLZw8x16", + "NodeID-HTkPf7hwzrLyDDjAZAqMNQh9oF4B9iebY", + "NodeID-HVYnT9MUfcK8sFV9tRirmkppWiNmFWQya", + "NodeID-HXdPaFvKy2oaDKcd1ufP7cjUuRLk4T3cQ", + "NodeID-HYDf2HUkHFDiUKYmort7tjUFLq3253dHc", + "NodeID-HbH45yH52gVWeFe91A873QBG8a1uGBD3S", + "NodeID-Hcg9iX24FKp1zumDpJbXF4vPzkbcHBDhw", + "NodeID-HcrXuRruPjFEdc3snCR9wqxbh87sNCJWj", + "NodeID-HeWWhU8g5KQBNFaVdwnj2TZLvBVuke7kB", + "NodeID-Hfm8gpD4DpCz4KTzt2osJPfFvu7az3qiD", + "NodeID-HiX1MqbbJLeV96jrYH6uTkuWr7ofbiAHy", + "NodeID-HkyME9tYN3G6DR9bvrha6XWNB3kYaDJbq", + "NodeID-Hm1uax9QMFjwNrrAmriWVQVcChyfmrqFB", + "NodeID-Hn6ji4ie91SEhmpgs7PZzDajaMZoH3eZg", + "NodeID-HnZNJY2KscixbVi58em86faLMY1BbT5Zf", + "NodeID-HwYioQttyXRmGzdGAfNcZ8cAkrhfCqSuj", + "NodeID-HyWNPsQZyve88fbwmxgUzq8FnNfE6wR9E", + "NodeID-J3s61uikXhXGVxC8ffsgbctVLV1Mbvxz6", + "NodeID-J7EzGdbSy8EFrQM9bBJXdLfwCoRHtXok3", + "NodeID-J7yqAsDbJ9jXTg3dbszqUoJMgUwDgnmyn", + "NodeID-JAyKNPqtqsmjizdEWgTy8C8dM1tgZ7XNz", + "NodeID-JBWVRVMqqTR9sYFQqESA1V9YQNoi8oa1b", + "NodeID-JBiRdFNc9he8ivyGkdAzH9WaL4GKHoQG4", + "NodeID-JHQyndxosLrdixA6TjB6bexisQxCuQkgR", + "NodeID-JHVmPLfE3YrKdBozJM5MEn1BzgMJzUyPz", + "NodeID-JLw6tbjnhJZLhEq5jnWsrxRgP3AEbxAtT", + "NodeID-JMoiLcTY2t9P94nTXfm68pYq7LMQ4fdFt", + "NodeID-JNRTA7Q6t5azWZrT3qaVeeXnT4vH73zDF", + "NodeID-JPvPvJLYDc89thb7UhqrqhndNCHGggxbM", + "NodeID-JQGMqAo4i7uXuS49vgaZDeJedDRp6oyuH", + "NodeID-JSNDtZdthVnHjSt7NY3HWd1ub4gw1pKez", + "NodeID-JXw6V9Bwidgc7c2aCZEebHDRT2FuWzBLu", + "NodeID-JYJRG5ZYgd6kuQ6RvqHMs9CGh8RkyEpvv", + "NodeID-JgQWdp2K5yq7Bj2Z3TfGDP8GYAmeqtUQX", + "NodeID-JhWkuVZGeJ42FLktoMVoiGuoaepWjxyqx", + "NodeID-JjvzhxnLHLUQ5HjVRkvG827ivbLXPwA9u", + "NodeID-JkadBNex6QYkFN6MRMjrRq8LHn6jp6qKk", + "NodeID-JnVy96b53CKBAPiqJTRTK7FnPxRk15gZ", + "NodeID-Jni7c9DhGu4aMvU4axvH8jfnKLmi5iaCo", + "NodeID-JqPCvrvnKgNEKdsKfCnhVcaP9KJ3GWPq8", + "NodeID-JrKGcbdnCzXUijeLiwzJZQNX48Cyrs9Yx", + "NodeID-JtgzXwsBLsJh6HWQGWfszrCCxpCnnzdwc", + "NodeID-JvyHfY99GqXXWUsSaoeEEZZ64LQpMCUMU", + "NodeID-JyE4P8f4cTryNV8DCz2M81bMtGhFFHexG", + "NodeID-JyRud4d7Sp77cjoXiuaThBsUVrSFEHgMQ", + "NodeID-JzcJLa8bXBPBg75VVXUHh4fqgmhx8N83h", + "NodeID-K2tnczAaPNak4WDoHs7thRxztKDc3LgoH", + "NodeID-K7X83JX64HEzj4xSLF8MZWNNtUbQmwnn7", + "NodeID-KEkCHDs3iiF5U9xaKSVg6xzDU27oe6bBp", + "NodeID-KEkeqcndUcaWVXNjNCXUUbX3gUepXqvCm", + "NodeID-KFerpW79qQ1whckFbjUvuP7eLwtXEHndV", + "NodeID-KGfpnGi2LiRyTFfeq866B4skCpo5xuwFp", + "NodeID-KKMxwJXjz4iB6G1VopYYst5W9VJ7dE5RH", + "NodeID-KNGEydgzXRZzNcibfSDGfQwZBUHaA91ij", + "NodeID-KPi6dGARXc6A8vMjKuA8troteNsArT2nB", + "NodeID-KRibmCkicP8FkeGM5Xm9yykNcrED2sjaK", + "NodeID-KRvfLaiAUDLBEixkmdTDf43nNppas96Mg", + "NodeID-KTnhPkucgchw74sGCrkpuCUmv5y2Nwnnz", + "NodeID-KUVib5Ye7nrnA7RdKWz9nv8q7gZuCZwp6", + "NodeID-KYr2Ldd1YHibJx38nkQ1vfqzRmsc41Gzu", + "NodeID-KcC4UA5WXJtgRBdhtEnECqxyNcxh6bsno", + "NodeID-KeNHPP5bXbGV25Ujjp6cYfExVzQrmCfkq", + "NodeID-Ked7WpBzVYe4DthczsGV3CCYpEvj2F2bD", + "NodeID-KerQSdj93DReMZFcDxqCVqs37jwEQb1Ji", + "NodeID-KfWrRxNQACEGRqPw7RNkpn2AYh5kPbCFt", + "NodeID-KjKVvgJ71tiwHGwqBmhQLKGqrNeQBpqeR", + "NodeID-KmzXj1CfPzG2WoTJiXr2DACTksgeQARJp", + "NodeID-Kn1JzoTMNzKS67fuEtcLU73dUTCdjoomR", + "NodeID-KpdyakxPD8mdLHV7XDYjrtUZ6BG4rXRKu", + "NodeID-KrZPkFHQnZWqdSC76pXRvnPVXBC7PyWWZ", + "NodeID-Kskhj6CSMxyA3Y6VsjrjGUDGZdMXTMjjv", + "NodeID-Ksmvwp6BpSszz917AjDWG9mHqi2cdXVW2", + "NodeID-KuvxfizxywPLkup6RtzmaJcCECjBrSACL", + "NodeID-KyP1LyscPyThYCEbN1KXekaeNgCtUgFhq", + "NodeID-Kzj2Jr4XWUdVhLkRsaYC5WH3tKVKyqX8x", + "NodeID-L1UD9fwiT1VtFUVX7en9Pnz4EcLTwAvwP", + "NodeID-L2LTo1cbHtyqkZfgKpPdo861Lgz8WmJzj", + "NodeID-L2kZ947yAuzugQaduZPJdysWRqxxsT2Ak", + "NodeID-L3eNEFiZPrdiToXvDvcR2Yepk9p23NK7X", + "NodeID-L5SLiTDbpJAgt4VFJ7CtVscaYcLk4L8jT", + "NodeID-L9Md43FWvJz6gQJ2eAS1QVigXSecSM7LC", + "NodeID-LCXfgTWLmmxazX8C3JuRbvpzmzqjzT3xe", + "NodeID-LDKGrAwCoz5Tdhu4tVDjqFFQsR7nKqNHz", + "NodeID-LE9q4y22nKHCv1gHSJfUyYY1qXcziLkFx", + "NodeID-LGGrrgqBdzT3smcojPh7nTKiPRu4Q8G9g", + "NodeID-LJH9JffagSPthZ4adGRjHByXFytPdrmmc", + "NodeID-LLH3HuzG4eaxJYYMiZTpQPVJNuEAZEaa4", + "NodeID-LQE7F5TjyfGV3HqrzAwYYAjdeUZzp4Fzh", + "NodeID-LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C", + "NodeID-LRPi25wApvsNUuUFwwQtUMyuUNgrhmZPG", + "NodeID-LRZMF1NjDo4neYWtbA1vRD6P7rmLKu3RL", + "NodeID-LSsApoxv5refjrydswTevcdtddK4LU392", + "NodeID-LUMxSYFmov1nDn7KZE1Z4RmT1EYgTCu3v", + "NodeID-LVSzcr2TXAjTTNbEi8Nxfjwk3GQyZo2s9", + "NodeID-LX75T9BwhW85T9Vz9mxvUEQhFqR1688Qn", + "NodeID-LZMzeFFuZvg2fL3JYoAGd8sChmBncgPA2", + "NodeID-LZYJZxAqgn28K3g2crJtGgr5mW9mo7NFc", + "NodeID-La8YHdyQZHuaaoEb6ww2BznRa1N4c54J3", + "NodeID-LaidvDiaEiYSxKhqPsJx3ST2PQKFidmY8", + "NodeID-Lb7Ez9LWTXUeKRUMgHX4JJtg4WNh31JUS", + "NodeID-LbijL9cqXkmq2Q8oQYYGs8LmcSRhnrDWJ", + "NodeID-Le4BWsZM3dAuMXTDL4rkcRZBTyHpC9zzq", + "NodeID-LegbVf6qaMKcsXPnLStkdc1JVktmmiDxy", + "NodeID-Lf75GiFUq2psqfd3e6GkMNuVEYx9BUuuT", + "NodeID-Li2RswwproDXoFXPtrc4LvVWLxC1osx1C", + "NodeID-LkER3oCznkUE1mTaRsugitboTtBSxNF7o", + "NodeID-LkcSuyM6DZa1ZMWDpybe6VJVD1RjNF3JW", + "NodeID-LkdxkfYhg6nSw1EEUxDUSYPXPwmr2cUet", + "NodeID-LmzaWSmdA8jM4hfYjsEZ9QJnHZP9K7nET", + "NodeID-Lpb442HkX5sNFDeKcf5zXZJ349cuqiWdg", + "NodeID-LsgAS4nCNrjL4gxPmTSDmNu2ywWrPwvVK", + "NodeID-LtUqKXZBqZj37HCNQGqjaor6TZVBVF51x", + "NodeID-LyaNgPqgHTrKSiLdEpp1JPF3Qkv1k6n1B", + "NodeID-M1745j4YUixszHoRfPq52DpyQ4TSiM8pL", + "NodeID-MEEDgr35ZZm61jCzguAYDqFipBGrEnMtP", + "NodeID-MEe8K9JFZHtovv515bNciv3YGC3jci3yN", + "NodeID-MGkAX1DhKPPUudKPgP9qu2bCh3h2KY2ea", + "NodeID-MKaR86DivZF1UA77gwUtRjHp6ddUHBr6T", + "NodeID-MLHMrE994WwHxKqF6tf6E6DF4SuQyeP2Q", + "NodeID-MMiSVwiACfxkyURcAYV2EXUShAYpmpkWJ", + "NodeID-MMsdXcGMAN1RgytS3EfsEFzF9opLeEryY", + "NodeID-MPw5DtJq8yeFFuEj9LrhiM6p6hhdTXBsd", + "NodeID-MSF7z6rWEhimnDUJwwaGTiuWxkQEjT1Di", + "NodeID-MT8bs75x9ZZYTCC2G3FdBfSiSPmW5nste", + "NodeID-MViNUVT7MHKe7jS91EtfJixZG1nDowv9y", + "NodeID-MXwySzKc2JDDg2GLR8NsmHXDQ3XJcWFtT", + "NodeID-MZxyHcXmSRh6uLruUmgBEhXRxYge4rooz", + "NodeID-MbibpFmGwGXAL1yPQVDvJfjL5GgbGqDEJ", + "NodeID-McsVE9skuHHM3ebcFwWLhzXvhpT2Dc8Z8", + "NodeID-Md59LZThQ9xsihEFmbgv4SVktSuQkXnN6", + "NodeID-MeDZKLP7y9ydHRVixZ54yxZQzWqS8YvG", + "NodeID-MeZyN4H2yUfvu3KuiTFDh7KLctHWni9tF", + "NodeID-MiMhJNePYcQNGrAVsXRYEKXQ8AA85Nhb9", + "NodeID-Mj7LRrnqPK2UPrAKYHY3aNSDpshsM9tXP", + "NodeID-MjFgGwGpHSsvyJPk7d5XRhWWyihttVLNU", + "NodeID-Mn5pQf8vkLQtic9W6beXzE592Gyctr4qo", + "NodeID-Mn9484xnB1NnnrfPfeJ9UrdakNXdD1B85", + "NodeID-MnAL4QmHAqYihdL8Ad61AY5NuKAtYjz9j", + "NodeID-MnBVormBXdmpzuFUVFX7XVdwoMHEbH2bV", + "NodeID-MnNiFH3NJLWYsH1MU7C4U6fXdzhqtZW6a", + "NodeID-MoW5hERQWaaaZDvy2g3LDHqUNqkf7Bjo6", + "NodeID-MtR3HLYP88Xmsd8whwWHRvijdAuWsPsBA", + "NodeID-MucxfzaqiKNQCznykW8XPPM7JZFPVEbhF", + "NodeID-MvxNcbGGUifjjh41nnrm7RRsTqmU75mH1", + "NodeID-Mw9wURaGBAXkgbpX5fkipsmoqnWXXxzdF", + "NodeID-MwBo6z1rpGZqbD9xuo3ByvcKTw55D4393", + "NodeID-MxsyPb1TKJzaTxyVvEvYqmkYfAH2NKED4", + "NodeID-MySLmc7236dztdt3jA5znemeaSHWGCg8x", + "NodeID-MzpGqPgcjmT5ALuCnZXbociRiWJTQyncW", + "NodeID-MzqqzQwU5BGCTTTj7FTEG8eGK35Sp4jrB", + "NodeID-N1dQ3zWTgcpnxDqejNYLevSQQPGNZSgvg", + "NodeID-N2vaQwWF6MeLWTT8Y9pBKV2oGjKVxZ6K2", + "NodeID-N59JVgQCpF5cRt4e6zKQUBgDkgyNH5GAa", + "NodeID-N5gc5soT3Gpr98NKpqvQQG2SgGrVPL64w", + "NodeID-NEeaczgsLrh5DKparo914eAz4SHCNrCSn", + "NodeID-NF3dhwiiGHc1MoT85T7MwWk2xLF9zpgeh", + "NodeID-NGCgVZpeUaJXdjVq2moRRha8tM8tfYEcc", + "NodeID-NHkrKzwkkva75HTqd1RDWBdH7tRe9FAFo", + "NodeID-NHv8CEWXzT8z8A4xDU9qDVp9yGfM9SZVo", + "NodeID-NK38Y3Y1gg3duKmwEmPURY5ngYLYb1yAq", + "NodeID-NL8ivcdpDokN2c2XB36iBbqTP2dmtDN2z", + "NodeID-NQxiZFkiHvPMdFnVQer5K3mPJPxctBefD", + "NodeID-NRhg5SrvPn8CvtisAdgFXA1QkLoPsqRdp", + "NodeID-NTELjonQCqJzT1QFvRdvMSDtJ2qwnqXGR", + "NodeID-NTvmKq2wmTvqoccqpxW7fQd1iGCiGhAsj", + "NodeID-NYrHxv8EfFjM2wnsvXSickbq6XEqwwB34", + "NodeID-NZeTWb3Ri5eR3tim5hAntmiwEWoLVdkDE", + "NodeID-NZfe5jh36pdgmAN4yX3MgafwUFmvMTCXW", + "NodeID-NctFDTRfjWrT5cfC831NNBA12XCLmoLTC", + "NodeID-NhqNSzgKY5HahcosTxbcB887htZgzcrjS", + "NodeID-NjPTsac32vTLnYBmz95TLBmMZZyy68Y8t", + "NodeID-NmGx6qD19PwiQRPwJq5fxfoKc4QUaSYPc", + "NodeID-NmUiTVDryDrut41qnwaNrBref4FMfn9Wg", + "NodeID-NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk", + "NodeID-NtyTVeUSdkNrAEQo3vPLzrqhQfrYg8f7Q", + "NodeID-NwkffS9VYtwYH1pDJ3xMD5ZKKt3ZAqCW2", + "NodeID-PA9LANCKQbk9Un8Wv4SbBXoDCW4bP9yLk", + "NodeID-PB6nGgZqX8ccEvyEutDiasS8ndhnrakdf", + "NodeID-PCuSoYTWTcZwQUZW41HpW3VaU5ogsAGER", + "NodeID-PEdsT2FKxpXiemSctvHry8gbZJC6bJrF7", + "NodeID-PEsmPuBRtjpBRnaV6gTrTxTFehVUtjDf7", + "NodeID-PGhVwQYM5eidnACK7jQ5ZkL1CnFpKTAm3", + "NodeID-PHiFH2SQRhBFnaNsLVZX179np4y8gjmk5", + "NodeID-PKSwUUGdADabw5br1MosKrSPyyeYdRKAu", + "NodeID-PMYYkxDpE7RPrtmACDaHi6RESLwhoHcYw", + "NodeID-PNXWeCUduveVZYuHRf9Mi6b4suVMJsxSK", + "NodeID-PPCdQBpQ5THZzWrbKAHTdwj5fcx2x3wBs", + "NodeID-PQRNvZxSZi598GNxbH6joNaUPgxaD5Yaa", + "NodeID-PWko2VpWPb748ARPENzdAfak6NZ6pYG1D", + "NodeID-PWnPrE1Sckzuoj4Di5zBwrRc5q8UYGm7j", + "NodeID-PWxEYBRBbKai9U3t8jgfb2SA8uAECKvju", + "NodeID-PYXgapX8Yi28W2Xib9oNDxuaaLkZ1gZ42", + "NodeID-PZATpKoYGFhHosyUfecJXUAzbXiMVEWcr", + "NodeID-PZCSrsorCsvyTxkt2aq1yCMXPxEUm5PGF", + "NodeID-PcZGZ4CzwJ8NEGsL7ccQEgAGZ7AU6jtBc", + "NodeID-Pd7rHrBk8hJyD5uCRcG3uWC3ZwPGtTohm", + "NodeID-PgBvqpbMQzJk61yoN346xxrnrHad8y8LB", + "NodeID-PgXSBGwW6cNMwQv8P7kYXVr5AkJWs7i5R", + "NodeID-PgheFFDtZftKj9mNpEvH27zeQ6c6qvoep", + "NodeID-Pm3TC3M1pWDFhwa79YRqHGzG9Sjvgcdtd", + "NodeID-Pn9jFroHyLVfDmsEruHABa73mpbnSU5RN", + "NodeID-PnZAZuzfg7xchQ3qbCMAtg4bAW9XSx1Jx", + "NodeID-PoyWPaU4f25ev8h5KQYWrnTdGMiN9scHK", + "NodeID-PpR5kKTZTpF2VstAXn4BwNKHeNEvo3LiA", + "NodeID-PqKQwU5B1hzz11kW9BE4G642BMB6xrdnz", + "NodeID-PqZJgjNLDbeqmSg7hMWU8R1a5HCEjbBP9", + "NodeID-Pr6ZsMxfFr6uyMyZeN95ZJ1yHqGrDhqRP", + "NodeID-PrZZEFnhrsgdbWr7hZYBw1vTpLpkntFLv", + "NodeID-PtHsgZydMzwYNT3GcUXmaKptmScFwJkZY", + "NodeID-PvST69voCJeusKj2UGoFvK1UJxuZYWjmZ", + "NodeID-PzPoTUQaErAzPd5EBdRzJeNSFr1kVMX3h", + "NodeID-Q2J1vGj5FvVz5rDST2nC2saG1BeDofUbY", + "NodeID-Q5Jk89SmFTSnxonorozufMhqnyY1RiUQx", + "NodeID-Q5XuPs5dhsQRuGJebnzStvyF7GKWt9JVp", + "NodeID-Q6uYpvGDEJUUqgN1eiiKNkDCfbPCjB11s", + "NodeID-Q7e8iAC8sdAhnhPuegkFchun2yHo7UAC7", + "NodeID-Q7y45kPeb692xEHNUXA5rCdZgDqvQtHtF", + "NodeID-QAnYYbuWU1PfEqVFgZQz9EHKEWd8fky9z", + "NodeID-QBtEYnbWGX5PtbsD1cd37Jx4eATeJoDuA", + "NodeID-QE8ipEQ1Sc56Fa15G6HdCfUFk9SpFRBjM", + "NodeID-QFG9ZiXr7boyFonB2D5B4MLfPHRgQDwLd", + "NodeID-QGGJXjdsv7EKVKy1EG5Hq9GzeFcNFMbMh", + "NodeID-QKheyKgFVkDyMxLcnoUHregsKHFuzuR6h", + "NodeID-SCtrKtb5k75f8KUnTPj2gBsq6yrxpmk3", + "NodeID-SvkCVFNKhd5VEo8Hgdky2TyxZ4J18NpV", + "NodeID-TJvKKLYAycYi23bnFiSFpq9pZBNktyer", + "NodeID-UCaJK1qXng91C24jExSCkRhqE8hSCXWW", + "NodeID-UujvYYJGbxFXh357EMppUFsXzPKvU3sN", + "NodeID-WqHQbG5MYQAYBRzjJV3qqy263mbz6iko", + "NodeID-ZYrkX7KzmDHfRzUhtVu7B1EYy1dCbbQZ", + "NodeID-a4kVGB6zfhfHgz3JpoLY7Yqr6aXM9BRv", + "NodeID-bQd33gr3fvRae95TLYkF6LcuyqrJeKK2", + "NodeID-bpSs3H78Q6kdMPHyy1WAYGdVXhVMmuWg", + "NodeID-c6NmA1sCyFvKV6NeKBndEykD3a5Y3Svc", + "NodeID-cCZxQ6nBdNcPMpKdKHHxm5kjwfR3caTk", + "NodeID-dFaWdTjZH3FryPCs2MTiivJuia52711h", + "NodeID-ei9CM9pS3ePsg2UYUrdRRALKr61jyJav", + "NodeID-fJcYnNFYcnf9Tn4sLVPSyKnpFr7yivMH", + "NodeID-ftRqiSfcMstsADKTtMnHGX8svv95tPXn", + "NodeID-hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb", + "NodeID-hpZLLFfwLit3Az5SDfXAtgCsErQfdQ9A", + "NodeID-jRBYRTWyowM2FUvfyZkg8cmhtKwqRYb3", + "NodeID-kCqLujZC9JuZp7sqe3vhffxgMgVwq7bu", + "NodeID-mce4CCkKp1Vx9ck6FwtjQVbTyUSvKe1V", + "NodeID-mwGVBwDPG11rSWCjYwYyqBJJxM3ZEoPj", + "NodeID-oRUBcQ7Lsrxi2S5bmWGD5CF69e2x8mct", + "NodeID-pCZeP48M5cMxycWR5QFfUumPgwKEXPFY", + "NodeID-pQCdCxnzPybSowJ8aNuu3o1w7e7QCVNH", + "NodeID-tFgE4qqgLiivtexeoojvJgSk7eHv1HRU", + "NodeID-tkLPZudEk3RpVG2DLq5jfZS4KHLJf27E", + "NodeID-vDc7LhjzpkqnPoFNMisr4BvijCQKe2oQ", + "NodeID-vZMp41xfebyKTCjNNT8wq6bm2u2xo46B", + "NodeID-wvLAxGEDoLcPaTWzEkmV1n7ipXFiWAA1", + "NodeID-yJAXXBzvB3sTVciXpoyku7uWuvhPnnqa", + "NodeID-z7b2btT1NfrouM5xCJPescQW1QxWLq2v" + ], + "mainnet": [ + "NodeID-12dyQ7nhRzsNSiFzEoW1RWK819Zkssf5g", + "NodeID-13XrVPjS5dVxBKaweeG94SjY1Q6yeM4EJ", + "NodeID-1431VdHCWJgM15Wcg3zaSBYbpXGTSd7Hu", + "NodeID-158YgSNVidjeFx76vtQ5SxWq8B1F3ai1E", + "NodeID-15AuPME9h4AS8CAEDsokNE5KMjLovnphr", + "NodeID-1A1GTyMcDcassSiTBZFVuoXs7AS5HcHu", + "NodeID-1Z67stQzn6v2hi1wD1Zd7nooPoqJE4es", + "NodeID-1aA7BtLfTX4SRXaWR8HttP4z2UapE1R9", + "NodeID-21TUfex3GmhmusTHmp5SDUQTEaCkaXwH9", + "NodeID-22jB4hF45iUrVhqiHvbDETRgpHKJdirY3", + "NodeID-22k7HZj3D9DSAD7ujyvvuYn8XzWuwA1Hi", + "NodeID-22wjev1roSt8jieZ4SW8rJLG3YiWyvbV3", + "NodeID-23NsVPT7XuxbAmisVH2LFwXAN1JsGvCjb", + "NodeID-23Po1dKrXFmpQaRafo1ZEdv8qbHC7i1yi", + "NodeID-23Xaok4Hr7SgTcCjjMnVsYJaN5cSZLR3Z", + "NodeID-23jyZenUu8Fm26ebdyXEEVH6PDoEXJ9j2", + "NodeID-24gzuDUUhGEsAGbxhMZNJJ2x5G1wDVHEo", + "NodeID-25GWqcvqc8m2ZT5ULNWfan5SEJNFBZMcw", + "NodeID-25RzatyYmpsm3QbYj9QdSWqdFQLKxdMR3", + "NodeID-25vUwC2XTURgqbtYripLhP7V6HPrkji96", + "NodeID-26VN5FUvCmFWEhHm2k85hHn4rDqyKsV3W", + "NodeID-28wbL4a2ozWTgYXt7NekGT12h5wp6UyKg", + "NodeID-2A2CytyzFR1r5m81cfUPL4BR2MAAp5U3g", + "NodeID-2BSvxuZFVVSpjrezuABMPvC6cLMwH8TT8", + "NodeID-2Bct3bqbgyNctwUahjFDCF3siViyVBiuJ", + "NodeID-2BjnggzgeiPAgrSmpzf3Hd1r4DTan3Rdo", + "NodeID-2Bq98e7Q94vVWipc5tv9jCL4aMxL7pjxr", + "NodeID-2EH9c6NwjHLZLWC2Ah3vekv3XUwcup62L", + "NodeID-2F2gRAyyVkaNC7jtPzJ4zgcQh64QyMkvx", + "NodeID-2HvkjoRiMzEcZgpWKNAruLKNv9r7Lb2pv", + "NodeID-2HwdM25EpGSt2M2BDaYREmGJrbYNudMma", + "NodeID-2KSietvSEq4mX2C8p1DAT83RTJPykxUqN", + "NodeID-2KfgS6P7vf9L55fMRTbHPgS4ugVSDW3nj", + "NodeID-2L2wFDTAv9Ti1wpqbFzqLbTa87dMw7QQM", + "NodeID-2PZiqgTALZcdU1TUvQeypFARYEgjkQZPg", + "NodeID-2Q8DS1hgPgsaMB1y8VqmxeworYJ2h2Ubt", + "NodeID-2THa3uLP7oEvBj19k4E4jm1tCpT4muhYi", + "NodeID-2UXMFjdXTw96iQHNL23PxH2hcLJUxnWic", + "NodeID-2VJfSPqaevBmiJKSVDnw7GBwsdfgA1XuM", + "NodeID-2WfvcQumS36rdyubDSioc8B5YinToKAaf", + "NodeID-2ZtHY1RPNrq1y5YuJLaCShK617g6CjRsU", + "NodeID-2aCXkeemxpBXiFui8BX1tVpyVsvhoZCU8", + "NodeID-2aSKmf99roSwDq7MChQ5pvmCKoS1qbcHS", + "NodeID-2bRK9nCjngVig5fMFjLhVgqf18L7dbTj6", + "NodeID-2cH1ggEZ8Z6CoKoQsEeZudUy5kEihW4T2", + "NodeID-2dsDfPgPdjhqMfDHskgiwDcqkqhhG6f7H", + "NodeID-2eBu8RjbvCFTZMcxu1oh2oKtfHT7V4zyM", + "NodeID-2eJ2XJDpUagguhE9wKFR9JUxYNaBk49Ko", + "NodeID-2eraCsE3WijaEnkcFX77hbK494TneHXkY", + "NodeID-2ff3RLJaMK5VdQ4jnXELc9yas4iKoKuLG", + "NodeID-2hoNSdthtVU9RE6g4VJ7Y6tGdWm3s1PKN", + "NodeID-2iVvryG4GyPR5XBQebYMMVTFYfsNKy9oB", + "NodeID-2iWqUM3VWvrcTLyXi2KgBLVhunMvFW7vY", + "NodeID-2iY9tRvYLGjeGtjnUGyGbMz7uoaBdZR58", + "NodeID-2itoUKTyTyhGLyN1sYKNiyKHG2UkDGjgk", + "NodeID-2jnJ9jJrW2EULmCCNnZz88HHCAQYo1Bja", + "NodeID-2mWa8ytKEtviCNwmHfSPmDvkTGPLH3kf7", + "NodeID-2nfH8Mmj5ZY6Tx5oD8ebst7prrEmbS4Ex", + "NodeID-2pC9PZWbWLdaxTuXX3zvh4xZ4WobwiExr", + "NodeID-2pNQqfaBqMqwWgeJiqPbmHZk1cUtWcjqb", + "NodeID-2sNYafYrpqSjsGspW2emjd6TqNTtWz74S", + "NodeID-2sspVTzGYqTkeiQeBsdFKv57gWpLg6Efg", + "NodeID-2vPh5nJm68PNyauc2ot6LJgubxBaJbBkV", + "NodeID-2vbdREFyZitz6LvSBca5gCe3eCPHZGq3b", + "NodeID-2ves7JhkWUcLfPWWtKVgh7uvJogz8HLnR", + "NodeID-2wWroHMggzJvKh6t3tdPtJTTP9DNmdc4K", + "NodeID-2xWeMjBPrnJejBo1vFiyNZzy2FcquZnae", + "NodeID-2yC7fWX23kGjCd7ytqGzQQcRJf7xaj1N1", + "NodeID-2yZivvmC5XD2M6CwcVVBdPGGgU3MNta1o", + "NodeID-2yrtwg1pRGi9jB6ayaXgpNbMTeGHUyofw", + "NodeID-2zzp8nVwqw7ssQsMXniESwjwpZYMzeFsa", + "NodeID-32JAXT6PVdHqpvDuXXHfWrFLXg8oZGqiv", + "NodeID-32aVnE9HYv1FesKKbvvUh1ZvFU3ARNtR2", + "NodeID-32dhPQyEQArm4ybXTxAvAAWAap7DkZAPh", + "NodeID-33Ztk39cwheRHZbWLMyrNFUea7y7VoaWW", + "NodeID-33miCHPn9eN8H9Yi4bCzEaL9Sc5BLeKMg", + "NodeID-33sja4uJHmJ3fpDDu5byuG6V8icdSAke7", + "NodeID-34QP8d17f1XjB778dCTCsBfjL3T4PeMAf", + "NodeID-35rq6ZWsLWs3coxC38LACEayLh99jxMav", + "NodeID-36Vywf4J2tu9gA8xJCxZjwi2JABWSF7qo", + "NodeID-36i6jQQTfB6Z8NXFPQJsY3FVukaSGKTDp", + "NodeID-37Aec6FfbDxVtjLNUv3bTsaXWaw5EGUrM", + "NodeID-37ZMQ9ZZ4e7ZD1kmRg1WSTpRrEPSQ5LGT", + "NodeID-39ZN7XxaD4si7XdTrhiaPJfQqusjzLP9M", + "NodeID-3A2diR9QLdWU77zW1sS39tAb1rSYexhSG", + "NodeID-3AUVAzMqQ66svzG1H7JVgURhUQHU8Y3r7", + "NodeID-3BmiiYjMNvspTqKKo4fXJWbjDbaaWRNXE", + "NodeID-3CHc7PuHw5sGxtSVjujpHRxgiFDSFfucK", + "NodeID-3DwhPYMEQABuocceWDAZEpi8GMcLyvTYy", + "NodeID-3DyUWkRptB3CRUVHk39Ni6Dpr6QvGWXwA", + "NodeID-3EzewMeb8MrVyWam8FhnHPHPoKsT7ERXM", + "NodeID-3FhRFK6UxSfMED5EkK7jZT84pFZy9f17D", + "NodeID-3HLAgdaA61zPrTx5yQ7Cc6waKWidsiqMT", + "NodeID-3HvUXQy1siDNUBGWMYxwMfjufh8mxLtQY", + "NodeID-3JSPJv6xHFBekjausg4m8e5JP7UXSPWj4", + "NodeID-3K3PUAqo3cKxRoQyYto1EsXtuTHoDZ2B6", + "NodeID-3KAxYX7JeLQgm1fwzVqbzjow6eNzSS9Aa", + "NodeID-3KX9tgCEQcTHbC8W9yY4zY32Cj3ET9MNQ", + "NodeID-3KyemFW2jou47TCfoJG4YQhtCDd6Si64q", + "NodeID-3M5i35f5u99QbBn6fP1FzeAto7NA4TFKt", + "NodeID-3MFu1eLpGRrRcWksHJLBuk516Vk5PoYgC", + "NodeID-3NJgvio7B47MB7BZWm31LHzbPWVdkEiEP", + "NodeID-3NXS6ZAzHeqV7w4saG4vXxAd4tcbpmxfk", + "NodeID-3PJY4Rpb5BRR3xSUvuU5Dj3cyHcAmzfjD", + "NodeID-3QvnmD9KrJt7BcoYytWfCsCD83TmKo16d", + "NodeID-3ShFK7JbJ2LN2gFT2W4iXp4N2NVuP4vZC", + "NodeID-3TWEGuLyjvyKYLQqXNvQ7GQqSNoSQrviN", + "NodeID-3U6UcmBu6WVCgodXYsH72VwVKpgE4XomQ", + "NodeID-3U945Ju6EUzVaiA25ea2BvaapV111iqcW", + "NodeID-3V8B3h9bS1cQerQMF47sw8Tr9jmJz4uHG", + "NodeID-3ViTS5vVn4uLoQQ5d7Qs8mF5eYvgQcLQe", + "NodeID-3WH62agC9m63q5SmAyPvhhLygsUjboQe3", + "NodeID-3YkLrm1D9MqZ6K4YTwaQkt1NBb6wP9Ldx", + "NodeID-3Zbddv2qkn6gWAfd12ysNN7N9EsDTjc9v", + "NodeID-3b2dLL4mSiGK2gx9pmML5n2Za4T9pLHe3", + "NodeID-3bciNCJdZcW5k8jMtHujT6msFbGMotkSF", + "NodeID-3dan8fxgCWWJZpNUkrBBEw1vhL8E85h4i", + "NodeID-3douD9GDP69zSq1eMzRAYfKCUVtjEEUCt", + "NodeID-3fntmpjeW5JhEgbVfKT9fy8xeaMg4tquo", + "NodeID-3fuaS8gCD4cWtgZDCLrffU17gfHUNHX8Y", + "NodeID-3iXKmHPAMgJa7z7pGcPpMC6pAmx8fjM1q", + "NodeID-3iknBWGJowmNu2d63Qv7mRM52xoQ3mCoB", + "NodeID-3ir9ZQxz7rrTVDcfieFUHcaVNVhV4ysUR", + "NodeID-3izZtcg2iecpF5rEkQF7Mwofd4puVGZwz", + "NodeID-3kGMSCRnasr5CjsgNm1D8FbZnY4C5iNRE", + "NodeID-3kTMjX1jJvdH2S94GHLi6Qxy1DKyLhuc3", + "NodeID-3kUUeZB7umtaENekUfWxa1bxrAU2eq1AD", + "NodeID-3kpT5aLGx1dFdDY1Dfn3qPH3qYkoj9dWy", + "NodeID-3mvtMQ554k7VbrbYFUaNZR4JmeCoggD5P", + "NodeID-3nuVL5JFrrumBeA3xqAk7BqTkRpu5mJsh", + "NodeID-3oJ5XYknQTmDrwTkBxLx4DCuFgFKFP2h6", + "NodeID-3oaegcGaKyncpiWQCzEVd7vN1Vcsue3Wc", + "NodeID-3p3GjN8gxjGNpFCEKyErnWoXJGENDdj7W", + "NodeID-3p3zJHazEyiVrhRXKL6jJds5esWnwzbW4", + "NodeID-3pSYSkZy99npP3NS1UfES7beUM1y84EjP", + "NodeID-3pfvvxwJWQb1w7KrtGfGhd9ZtLZhrBrBc", + "NodeID-3qUq9jKXL43HwdMEov6KBsnCjcvWXmAoF", + "NodeID-3rw6bDxFFNVoRnZmBAnTV4ZEXnkjooVq4", + "NodeID-3tTQWaeudKziFWpbmzbawLJTunDsQnjnu", + "NodeID-3tjM63VDvTaHkhzxWxV3zrnJjAfHFAgfC", + "NodeID-3txc7u47G8EyCyD5aSZdTxeVdhAWSbpoa", + "NodeID-3vkjzH23PgBPmHixW9xpdUD9vndf6ffBZ", + "NodeID-3wd8cyGCDmhuoZYWmNDab2FhAVpnKYKJE", + "NodeID-3xmutK2VUntMTYcAL5F8A8AYuDcyeAZ4K", + "NodeID-3xtitFQ6k29E476KCq7a6CoZtDmUgKqmQ", + "NodeID-3yiw3g5Rer7repmzoJaATJpkf7GnWD5j9", + "NodeID-419QFcweSxfj2ZgxFMR619CkMNupjpAW4", + "NodeID-41CFoAtQx8Bqivuq3tkg5oqnj6WpBz32S", + "NodeID-42kmqmMDkmMyw2q6gS1uLi4wiXdC2NLwW", + "NodeID-43AKDBv1R4hvnt9bjDFA4SEcwFMzZqpXX", + "NodeID-43LKNkQ9avhKgVj7KrHXjq1bYi6mvxQ2C", + "NodeID-45hNv286MEyVSjGrtiYLt3qqnj8G7FecS", + "NodeID-45kFbHHYtgXEXEy53LCaLMBE8CA5XEdKL", + "NodeID-45p6WjZk3E9Je9Sw4q4SvEaagbYF7Jsud", + "NodeID-47pbycgGRRWtAB4FZ5fJZApoeG4nzv41U", + "NodeID-49LTjmBTcdjMyD33u7gKkfREPqEhhfPaj", + "NodeID-49tGSAiEbwhRcTvnu2EaskxZPiPb9Lv2T", + "NodeID-4ALu7Jfcp5S7S4BXnQgGXMZ3rm3fQkxNR", + "NodeID-4AW19ZAJMCyr64UKfFAUhZXuZDtVshQ36", + "NodeID-4AWHc6b817tKesbKJ22EAEsJa4GkrDuNE", + "NodeID-4BQYSFMqtFcjNgwS1bQgLnzvoKqqMx18E", + "NodeID-4CJDfSDWT9X3hTosPsgui1ZRkBNRhX6jV", + "NodeID-4CL8XnuYqfVVLxRre5C5A1dpRv3cZN2Zs", + "NodeID-4DHwFAw2xZ8HuSu3jFzMU9cXvNhUHCkZ9", + "NodeID-4G6U36ehMMYjJ3C9gzLmYRzQFho8c4U4w", + "NodeID-4GFFG65jrNUU3X6vsZKgUhmz5iCG21nyS", + "NodeID-4GcMxoKhvXDebqgeZq2zKWPgQZF5aDPm4", + "NodeID-4GkgAWZzSWHi3hZZLjLGes5HwuJ5FXDuj", + "NodeID-4JcnzK8FQKGHGMt5EQHRWgBVxxdA4sFy7", + "NodeID-4K6rXew2J41T5LTWmTi8AjMbUFsidXGpE", + "NodeID-4MqtiVkCYGD4TmoBhH3b6a6UTGNrPhvvW", + "NodeID-4PKUhJMxeL5C7A5epar2X5TAAzXLqH44r", + "NodeID-4PTYheMTMhPZvYhJJ1Hj2FwCZSR6rSWsr", + "NodeID-4Q7EdeK1p9JkHULo5nZ9KVwgWYjDjDf9F", + "NodeID-4QRHMt4aejVDLqY1VNmSXnLxaQiZoZbej", + "NodeID-4R4zmBEWqY3dCtKHmvDd56Dupq97UAwtP", + "NodeID-4RV8eRbw9andBLK2og4rtt3W7txBmSuxA", + "NodeID-4RVd14QquiKdEXitdrnTuZiYpaBY1W6QM", + "NodeID-4S2uvFvPmHh2Q4f2To1XznG2HMsyohuA1", + "NodeID-4S5sLovGppHvP9uv4v6jZHV7JtASRpUUk", + "NodeID-4S6TMWiPdBW55NS9ZU95y4aHTBjUqr2eh", + "NodeID-4SkHqMZLGsjBmMXF7Dnpg7tYESgo9zAC8", + "NodeID-4SvFyvJPHPYvkMnJqBaNcwr5yuU8sCsem", + "NodeID-4TSV8FnyRHrVAmPALXfvLnGaHGspS5W2R", + "NodeID-4TaEnGcWM77nvEityjYDxB4zYdLQ6LiZ1", + "NodeID-4Ubqsj2vfwdGUUYNg1jtYpkYNNLugNBQ9", + "NodeID-4V2KBeNd58jdBXej3ohL8E5d1bNTTp4tT", + "NodeID-4WGNJ4vv6bH3FLCJgovqn3D7RCJ8rKBDR", + "NodeID-4Wt44Y2AAuuD8iRvFfh1va5K7p7v697my", + "NodeID-4Xw1ekhVqhHqzZ4pLTPtofbTZxpkvcQNi", + "NodeID-4YLHgpase4iRi6a4wH6c2jUMdWsKr74j6", + "NodeID-4YYQc96D2kyeXh2z5Eb9U2owhPXKPJgS5", + "NodeID-4aLF7SfieaFRqFtLBKo6eeEbQbpVYVjoX", + "NodeID-4abzhcVga2Mc1BJYEfypmRVtVctYwsvpx", + "NodeID-4bKCgF7VcG9ZtcfMwAVFfBJoh2xDgvz1j", + "NodeID-4bajtcpjERHRChiaYpovKU8XE5qAE4usY", + "NodeID-4btZGj8TmrycK22kwgBK5wJEFighAFWiZ", + "NodeID-4cJyBoufFfiMShtRhcd6WGTYdvUCE3Ai4", + "NodeID-4cuTK1XYjm1VMTitd4MPBcwZ7LYyiqjfd", + "NodeID-4cw926TqMXDNo7QyraCShjWMXNSCDZqMQ", + "NodeID-4cwQT5hvhnhgYM7kJia4MK5kT8XLVZSNz", + "NodeID-4dJTYbWkF82oeT3gYAjoVNNbaga2zXsy4", + "NodeID-4dNrRsASZXHs7XdZ1sPuj6kKWs5bdqHZ9", + "NodeID-4e1C32U5TBUFuQpykL6rSw46RijbjdwRz", + "NodeID-4eaj7e8pXaR1TSuCF5n7tKDCHPPemZGky", + "NodeID-4edDKb3xKovZjTSuFbhNzJZ3Y9KTAzDaQ", + "NodeID-4fK92LkTyEUzPoDW44Bo9b5YvL5kJ7369", + "NodeID-4fwq4Tcz2ERoDWDzcow2JNmT2KaxtJrjY", + "NodeID-4gTwepTF5fcacXB7gdYZLTtfSFsYh4faj", + "NodeID-4givb8yw6262YETrNnm4hTSRPeK4qEBfk", + "NodeID-4iabwkRVrx3KWgYgFuDAR23EZna6rjKxc", + "NodeID-4jvscnKzJyMUpbsZzfoaZVHd2ez6Vj5ad", + "NodeID-4k9FLLYj63sJNrrGycj6MTRm6JqyacEav", + "NodeID-4kCLS16Wy73nt1Zm54jFZsL7Msrv3UCeJ", + "NodeID-4m1TLqY9ob5u23YrJx9zWDj11dz6DmV19", + "NodeID-4nc9Pi32BZwWxcM21U95mVHsbExbPSmkq", + "NodeID-4o17gdHmyD1dWGxwez5KqKdyLDfiYmPX9", + "NodeID-4oA58fddyvuKuRtqanZtp8V1Sz9mbr6sS", + "NodeID-4ocksMuoXQXnBH9XPRnH8j5x48cLLpFmC", + "NodeID-4ofBXitDMQ6QZi83yvPjCYn5LG6HBwqSn", + "NodeID-4pZqGB9JdyanFMVQEeq5VZ6YG9yZNsCF8", + "NodeID-4ppYtgngzEnrgBZ7JUVw8bTbaBD76efcR", + "NodeID-4qzj4sLxsLnmhhktyTyR3BWXu8nXTABnY", + "NodeID-4rqntan9eh1AmkCW7RNKwzVVB1r6hhnxc", + "NodeID-4soc4KzELnfnTLLw4FTgpEefZpE6aSQbw", + "NodeID-4ttQXnMSk443JHiMCRaoHBjjLArwEWbya", + "NodeID-4uHXbHesQbAzcDBRqaFyJ5kpgovHpU576", + "NodeID-4ux8p7PdNGBzrcwYE9U3gHF8ehb882VGp", + "NodeID-519R4edrXr2qJBZnFX7X9dJvo2JjJcesT", + "NodeID-51wXjrXuQkpHqgnyZ6pgQVbiBwTWgVQZi", + "NodeID-5517ZjCrBhnLs1aqw2YVqRopDnAu5z8FT", + "NodeID-56PBmfRBSmT2sYsPLiBPKVq2fQUVGEg9g", + "NodeID-56RnA8AJBddFSt2rrGu5WmsZF8qW4zNeP", + "NodeID-57VYS3UDdnkurTnK9WVzfpNT4H4V2fCmT", + "NodeID-57o8JLPG29tydZsDVGuD8iBZ3rVorYeoB", + "NodeID-57vP1ZP91HztWSRTHurGdrMSw3TWHYumo", + "NodeID-599kjqvHr1rEhyvT2mFA2whFJZS2Yoexg", + "NodeID-5A3vbPTyhDj14bLRCBs75HvsjMJ1h32FT", + "NodeID-5AzNzec73Pth9Acw6jVd2BYZWyY5KjV6y", + "NodeID-5B2Uyysf1nWcRbiCYdKwXzevVF5a7sN6T", + "NodeID-5C4VhhoWhTJ2LewfxQNvM6AKk1o5oHS3o", + "NodeID-5C5QEUprYutWMbxYicmQd8dUFhc4eh6TW", + "NodeID-5CDfrTrcGndVpY6qtuWsKWLm3j93GXyey", + "NodeID-5CwgaeHgQgWbeaMB2ERYZ1ntrhKCmSs5D", + "NodeID-5EuG66om4jxQDK8hRFyq89SsMqfdvESew", + "NodeID-5F6HiZ34F4oRtMX8PZoJcfvQiH7K9mGSs", + "NodeID-5HkCsXm4TQUb9uXxM6Cjy1WzvuDSspPui", + "NodeID-5KGMoDk3pwPz5LgQ8FTJ5jJXJFnJ9CuRc", + "NodeID-5L14VfdhVJvi4GowA2zgqj78bGriVMLxr", + "NodeID-5LGFWx2kfwMZXpyG52FkRtrfYfVRimjh5", + "NodeID-5PMCtewYFfdxWmE5gcxuZeXAQAxHqMhu3", + "NodeID-5PMGUqdapvGYEATmbQ48hMJTwcYyKFNDg", + "NodeID-5PeQVPLrJH9xiSQZ8hcmeXWtUzhcArEhQ", + "NodeID-5Pg156uQvovbZQ3F6JKUiyAg5MrdFseMP", + "NodeID-5PvfcnfPd3MLnpq4MujxvBQHPwtdjYk8s", + "NodeID-5PxpmHkfB3gNh3spZWuzEKis3DJ8WKxLt", + "NodeID-5Q8GBt1GY6fUToWZ9txSGzWr8QTDUXKJF", + "NodeID-5QzeGmasNDHSxzxBqiUQ9TBPqkkwzJWGe", + "NodeID-5RuEXmu7SJpf8bwuY17UpMpoEsGZRWnuj", + "NodeID-5S729stbM7nFyWHdsoBeFzo5NMUXaTrjR", + "NodeID-5SmDUGU8WwZkvxKMnjwKAYvfa9w2qEe1U", + "NodeID-5UW1pHAXDJi1tRFfGSEgujuExMztq6sCc", + "NodeID-5WCpR3DKFt9665Wj9jDCTdncLNX7AbzfK", + "NodeID-5YX9uqiPm6hmJEmDy2fUyvFEjvSvSbiWE", + "NodeID-5ZXpg581dpjG8AdoJgTDeXXLnxrQc9Wtd", + "NodeID-5aCHiSvLejirNRt8Xgw6SNzd1dDq3XviL", + "NodeID-5aQHgP4cTuyentoPz9KK3y2anCBYyG7tG", + "NodeID-5aiXb9CdCDJ6vRmJ6reLPehKFeKvdnHxy", + "NodeID-5bJYpPDsUq3JpGJrFjVRtr1GybXQDP1M1", + "NodeID-5bNomTz89SyKffJWnEZGXXdwvSnZanVTS", + "NodeID-5cL4dSEdWWKnzZxvg1rqen4M31YeZhFkf", + "NodeID-5dP5pp3AQeD9GbRPbhuyXfFcNsU8RsgmM", + "NodeID-5eFuL1vXb4NSxW4ZF16WZsJM3gyjS7i2Z", + "NodeID-5eVCCzU5VQXhj2iqpqneRxTUNSD8aCJki", + "NodeID-5eogZcASu4bDhWxMuUF6h7VFK5SCt3msp", + "NodeID-5gcdejFBQ3wMPFpo7qKUKBREJ8w3PRTM1", + "NodeID-5gmMRqNob9UjcgroCKH67bQb9PVwtnoeD", + "NodeID-5h3GjwRs4cndBMxxfRpuDMyrX9SF9dPoe", + "NodeID-5hNGcpQHUrBDd19vV5QcugACUAmwiWU3D", + "NodeID-5hSVoN9CgKTxGVj2M3pXBDBYtfZkjQF1D", + "NodeID-5idht8398Yg1AqYyBkTQf2kN7zFK1J7Qn", + "NodeID-5mmVqScHrzTiSa2WVjo99g2yzL83EBS6W", + "NodeID-5nWWPwUZPzdTJYGXJoWxzWj3Xm63vLL7p", + "NodeID-5naWZjifbRQdBoW5bDtAcvuFjKr3k6G2o", + "NodeID-5o4eWuhvE9cmScEoZHr8ngGebxa94rFLo", + "NodeID-5ppnn5JSeWMAznssuPMJujyJhzHkXHN8E", + "NodeID-5qr8okF4mnAB2JhAvaJuwqMGXN3gi9QXD", + "NodeID-5qxuSbJfb3xqgNQ2pFycD4ybuEH3ATg66", + "NodeID-5tUAsBWWmhHrLnjmieom6twf8YREwfosQ", + "NodeID-5uYQ6R4WF7kmGfraM9LtsUJG2CDmh78Lf", + "NodeID-5wCTAXLJSc5i9RpksPG5fFZXemq4dm2A6", + "NodeID-5wQr9SafAyQ6BKMjtTomLB7Bc4tg8iYD4", + "NodeID-5wUKLtxUEckPyuzLQSozcPmqVzMRx6v7j", + "NodeID-5wf3A9TPUALDHaRhufq3Ry4jfH3cA9SEe", + "NodeID-5xEL4zcSuMjZCEzY3WhQ81Sb1Pw4E5VL8", + "NodeID-5xhFgBpc99AjBEXqfzNAG3W8mX8vnmXWZ", + "NodeID-5yYNsLs1TZoj93f27GD1CBr424tZJVpVh", + "NodeID-5zENHPe3oP2SdU13oTAvxZA2cEupejYhD", + "NodeID-5zP7p1nk2KnAL4SHvgFWcF9Xei3Q13CC3", + "NodeID-5zWQQHoMSd29NfKUMziBuccvtt2aahnHH", + "NodeID-5ztBWB4caxbw121xfsXc9JqsknrwxrRob", + "NodeID-61nHqrF65PzDx4SeUmaxrdbBLHNxGfv1z", + "NodeID-61rJbupdhGqaumerueZFVVJT4Sbp4fciN", + "NodeID-62XYpHtuv92hHhJJXVd4wxJqPFv9rbWA8", + "NodeID-62hEBGiabBK34vV52kGT64T6S9QVcdMVN", + "NodeID-63JGDZZtRyCMUiGmqQcL2rxXx4MrbNzNt", + "NodeID-643Sjdbh4n7krQXPSNbSK491xfVbh3hqy", + "NodeID-64Y4Dq4dwfnhTjNnZEFjCexmc2Wwi1fyM", + "NodeID-64Zz8dh68ypXYWaUcfkXDm9UuG9VNXrk8", + "NodeID-65KSgMC8R7CjSQoyBc85UMjW9YZ1cTKke", + "NodeID-65zy7gGi1v2US2tkBFfjpwuDoWVPjPsZW", + "NodeID-68ZngAKzsRbzQMq4CpuZ41J7AgCDEijPp", + "NodeID-69vkfdsq14i5HZSd6m2kUwRAu59ePbeWw", + "NodeID-6AKFYXuxWErAD6nqqFkPnbwHzXB1JC4VW", + "NodeID-6AXAfsfvp25QX5Tff6Ag6ERLXQ577wurM", + "NodeID-6Aed6k5Zj6vffgNdvr29Z9VcdDxk5PoMq", + "NodeID-6CGouDLcYyiuBY9xEi4rEaHCoBzH6UQrS", + "NodeID-6DX3ppjVpWsV8EUihpbRh73K5dGXBSmGr", + "NodeID-6DqFjxDUK7nh68zrFDE7iHfVsXvoM5yzt", + "NodeID-6ETncdUXndB43iT4LijRYLHF711gWeAgJ", + "NodeID-6Ef219qf1xYjFL5SCFQo29ZYKDrcNi6xT", + "NodeID-6F2LZ5hykkwmnRYYabqHH19MnzknJ7rij", + "NodeID-6FSSZuuLEZ4sHJZGfpY4Wuz8M3qnK6ec5", + "NodeID-6FWuMoDyCe1gsmeQj2RsGBmYv1hoGUbzs", + "NodeID-6GEno5sempCQdCZvTEuPZpDLqVvpN2JEB", + "NodeID-6GuJTV6P89ZgzzYM1CJbsuMuVTioUMmuv", + "NodeID-6J3LY7ojkK7WCZzmArvxEozaESDMb42hX", + "NodeID-6K9dR5Dx8YkPXzYeNvDRBionRmEo6HGE3", + "NodeID-6KsZqkvobK4vUHJzo2VdwxWMsbHtXCZPb", + "NodeID-6LHDq7hq3PS1xBj7cFQuvxozaM2hJXheF", + "NodeID-6LRbs1K2iPGmhn1EBbX8u2udyPbUt5S5x", + "NodeID-6P9u3TyWn6jh381HSZP2uTyf1JdGBLqHP", + "NodeID-6PS1ECxRbQ5x3wj31utVWecd9Xkgcp8sb", + "NodeID-6PabAb3cYifZm2i5Trr2BCaYyy9TvYYKJ", + "NodeID-6QD1KNQkx6wj142Tv6demd3FYbghQvCM2", + "NodeID-6QpAUeTWNKyVo34r1LrCYpfNtUwUqXNMB", + "NodeID-6SwnPJLH8cWfrJ162JjZekbmzaFpjPcf", + "NodeID-6TG73ofz7EU4keEwv6jt2xLrpaFzqXnYR", + "NodeID-6TwryrqyRkE3evDcGCN1C95TaWzkXicQM", + "NodeID-6TzpKmXTYp2f1ok4rEzEAgP5b87FSQ9G", + "NodeID-6UMb71Emubx62ZEsNBPhhuEzZdqgGpKZY", + "NodeID-6UpNHJdkRQM9TRX1m1wb1sfNX32Ze7ZhA", + "NodeID-6WwTZqp76ms4iEUepaagpeEsf4N4saDGb", + "NodeID-6X1FD9gZjzSfGP7E8v9cxXBo2ofhHLfGq", + "NodeID-6XwiaBfuKwKG761Bc28jDAsiFJ2gDuVgQ", + "NodeID-6YNW6QK3JDpCaEnVNsG5wsNA1SvxT1dhP", + "NodeID-6aZ3KyGBPTcn6NF9K2jpvxTjhhDjks3Ev", + "NodeID-6anRg13mVkUeZmpjf2Mm41sug56BH7Jof", + "NodeID-6cRZQD9AsdBxSPSoHcBfqXeSAwzKNYsBe", + "NodeID-6cem8uPoUwQ6o45VYS3D5mmFFzTuNC2M7", + "NodeID-6eLQBH9yJUPLiZGXToPxQPj2gTWv15Lf7", + "NodeID-6eS6iDVjh54ww1pGHmCAoKiDhkNGi6xTM", + "NodeID-6edspGWTTSWm3N7Zot3kGrEMHPtLrK3VW", + "NodeID-6ff9kZ7ct7e9PvGKBfbMHJ3MJaMPCmyTE", + "NodeID-6gBKuwn1xHG9SmLZKprpSjWACGDUp5k9r", + "NodeID-6gFsNyzYU474fgL7t7x43Efjbak7Vx7Wp", + "NodeID-6gRx5vFhuMTDtaRYP82ZSGuBq7eR5s3jw", + "NodeID-6ghBh6yof5ouMCya2n9fHzhpWouiZFVVj", + "NodeID-6gi5JXkZSZyoZ28CHU6TDtJFVRy4e21W7", + "NodeID-6iLoGX7rEPJW2GZMekiNPEDNZKmsbn3iU", + "NodeID-6mSXCB3r7oeP8Suy1AWroDY2KEF9hT9Mi", + "NodeID-6na5rkzi37wtt5piHV62y11XYfN2kTsTH", + "NodeID-6nssFkEQTjVRoWGdMfkkYJc6wMNYdRg9W", + "NodeID-6o65ccWJmdcMSDkDphL2i9ajLCZRsvAfj", + "NodeID-6oBNaAUr67MFY9Gtu4eis7bsGY1RswvAR", + "NodeID-6p7pSJ2hoYnpRAXUt14cr1tsA5kEm9vXE", + "NodeID-6pCQAcsrwzFfT8sYZeT8cQ7f7aF8S87TM", + "NodeID-6pShVkG6mZsinZNWZr48xvQSuSnmnyh5o", + "NodeID-6pXaVajr1G1nj8Z4rEXisGUajXGcFPh14", + "NodeID-6pm7qiab6Vt5n47MdeNGuzKx5Jy3Pxwu7", + "NodeID-6pzGb1VVyQT4RLChXUQYfhvjLrqEmWiNi", + "NodeID-6qPNz3b5VZpMWkVNjmLmpBrL1sYxr38bs", + "NodeID-6rJMJqgEbTGJCv3hBzDaY3axQ3Fq98yFX", + "NodeID-6rjd7h5dPJwVEytA9zm86ZBfqEfBuSjKi", + "NodeID-6rtV2pPKXyf2Ek7nWnmuzdiXvk2Ma3ynn", + "NodeID-6sLdnpLBDDMG2eEzgdiGjTkne7a1iETEu", + "NodeID-6sywKjSw7tS2qgFAmLSyJR5D6oB2BHT3Z", + "NodeID-6tkBfGVTgfpbaiv5HFX8LpfKdu386tfwr", + "NodeID-6upkG4FRNCoUQ8nz2by26m5vdVSthXCtK", + "NodeID-6w8o4bbNdVLscimLv4X6BA7QuSqX37TFL", + "NodeID-6xPTxaGM5hVHVKKFuC2MjpCL7ZD6xT3wN", + "NodeID-6xQ5oYAQ348ntvsshCMMSd7uduReey2wC", + "NodeID-6xai7EXhzyxCUyHDrtLTYT6uRCNpUfRHB", + "NodeID-6xjnXHhLrLDafewh5t69uRqtHea75RSGv", + "NodeID-6yS1J9HFdHgdX2ruW6XMouQvECDF2Fta4", + "NodeID-6yf4tK8VmAKhQHC7tdQwy5FTw39i9mXVV", + "NodeID-6yi5sG94i8wSHhqFuuUip6RuZGkKDHzxy", + "NodeID-6zcpV968BV1e3BrZoPmcKuoUNRcP6vjxt", + "NodeID-6zytvHVN3xs7VRFG8RBhjWVmcvaYxWHDo", + "NodeID-71ZzUv6td3aKrZG4LhGDDAnjsEcEchwDe", + "NodeID-71taHoSLZsm1Aqe4spXKJvNXw3o7982Hz", + "NodeID-72D6UYvQjfgKuSrSFtMfyCtHq1jZZ2gN6", + "NodeID-72ncx4NEgf4cziN5GPDRhn7i71rgwH1wV", + "NodeID-74oLbJEptxh1cVzvh3QTwaykED3FXjPYK", + "NodeID-75PnenZJE9PQbQycTys2zonb1atGLsbX6", + "NodeID-76GDFDnZW2ihXCsum843HEDmrvcVuMDZ5", + "NodeID-76KcZZBhWfiWuFqrx21KyeBHdXxsaVyX2", + "NodeID-76nqFaqDGakCDcm7kDsgP7GJn2VSM33xh", + "NodeID-77cf68EUcw4pSLJcvjBNNgxUtxNoTHjNX", + "NodeID-77kWbGy45j1Scbvgh5z3zhoUBL1S3qjAi", + "NodeID-7843EeyboY1mZSmdzjqdJodxHycNn7Kv2", + "NodeID-793wJG3RHeLFVCXkNTQxsM1pSUHNyDyMv", + "NodeID-799wg9bHu8gGe83SHdioqQ6hUg8QCYRv3", + "NodeID-79NUUnbkpDfvGzGtq3Q9FArGK9D1Wzpgq", + "NodeID-7AVurguoWJggFMe8WBEBuTzZjmN8AmN8y", + "NodeID-7AnPDxUDnyU4p5YgCpooC6xx1tCfrMHhf", + "NodeID-7BvjRu2P26PSUXWzCSh8LLxLNdSpHEq8f", + "NodeID-7CQwXe5Vnm7hzSQeWDRXqUsGDc5vcPBfZ", + "NodeID-7DR9yUKc8S5d9xwGoaT488hRn9LiHhKTz", + "NodeID-7EEYbCpedmA4bgHnnvhx4zXgMUpKe5Gkd", + "NodeID-7HgUf4o1UiEGh2Z2tWp2erbkn5JaXYYZG", + "NodeID-7HsxnZQnGRWYG5jFVWDX2L26zEAYaUwAP", + "NodeID-7JijGwmqUuditkaCwdcCAr4Xc6AAMy89A", + "NodeID-7KKSDmFVJ4YXsdcnxjDnV4girXR32yRni", + "NodeID-7KvAj74Y1Aixv1CRWfNMGj73S6qsSGzhg", + "NodeID-7P5SGZLFp95WbF2yv3A9WbQaQDzqfsAFF", + "NodeID-7PUdh6Mzq4NfJR6wpng6cRAYDraESgWCe", + "NodeID-7Pjqo8qr8Gd78Wmzgo5xqF85qcafKQ7W6", + "NodeID-7PsjAayJQ4g5wiQ4QF3qokruC7aSUMqDn", + "NodeID-7TepNNPHs1pGnMS994Pm77YFdSKvghvKm", + "NodeID-7Tv2ubT5xnAXWNRf8X61Eyw7VcU9J1w5b", + "NodeID-7ULxX37k6SSaTX3oF5RdrxL9r5aGZu75M", + "NodeID-7UaxuvJw9C9FytVkUjRCQ3csmDiNzujGW", + "NodeID-7WDMJNKYqJ1yNbPLJrBZubqf6LQrtTw3E", + "NodeID-7XrrK8iL25SJfWrrkyz3gZSo2t5iEadvL", + "NodeID-7a1GnsoFxviSiL8gJyFqrkazv29xY48mr", + "NodeID-7a5FbgWRUJeRWPzyidAyL3ENC8wQDJ1eK", + "NodeID-7aBosQAfHykUE3rmubktp5zZtZmavTzow", + "NodeID-7bnk7Pi7ihTpoyKndMJw53PVM3DJ9QKZ8", + "NodeID-7c866Md3Geveoz9G9XvP1TroT4LKdz5hL", + "NodeID-7cwvfriG4LFWV4iF89E7GhYfLB14hFjMa", + "NodeID-7cyp41vXvj62jBRh7y2j6VjLXhoJ6Hoab", + "NodeID-7fJWXpi4Bj2XgeVXG7Tj3JkNNrGnnJhdp", + "NodeID-7g7qvfKvRXmPZAxhBMfj8XCGuWVVHexHe", + "NodeID-7gLtM9D45daj6WJfqrT6uUje4muznwKwQ", + "NodeID-7gqKK3aZ6m81v2138yfc4fsAdzFHsB3xb", + "NodeID-7gu4gthY5YhpmpUpQvuAy5CDDvDs2yEVt", + "NodeID-7guwrZEeTwM8EZ4eULA3pw1f4ipfYFiLn", + "NodeID-7hQFksPPW1Y5hZukNcQn6bTQjuTZPnj42", + "NodeID-7haZar18iKUdnps7YZEYbJCSwnFw4KMY1", + "NodeID-7hnK4EoWzm7V5qFPqJvpcRQti59au2BD8", + "NodeID-7iFGXSHq1R8MvEM88EkS64rW5z7MwukYk", + "NodeID-7iHpsauHYyQoCjMSZsXJkqXodq7J5eXTR", + "NodeID-7idSkp4X9MwUacdNn32NkE9SXo1XWveLj", + "NodeID-7jqH9PXuyYhoTaaQDAF1eVzLtCVSgmSpz", + "NodeID-7kfCqUsmvGEEfgas81FXNFx833U9xZtWL", + "NodeID-7o1ubnZAYsiV5DUD5R5oDYXkQQvWo69i", + "NodeID-7oHSQLPBV4EzZurguKU4a2FUvPxShuVzf", + "NodeID-7oKBFksJjAcBCNEY3ZcRLQeXLX7iA9dNE", + "NodeID-7oaj9Yf8Uy6PRhqjHyv9oUyLzFMt8DgrM", + "NodeID-7oyvSG1pLLp3zf2ouSAT6rUeehDuQcNtQ", + "NodeID-7p8akVY9Z56XkLykaohLpNGp9sU9PLTvh", + "NodeID-7pDhCWQKNrpCjTL6tB7uNvBUnBQ5amgob", + "NodeID-7qZBqVK1XAPXPqA6jPeDwimDqMTo1ZudW", + "NodeID-7qrESfShxxqXfa35MfBC8D62t6enjm2RC", + "NodeID-7rE4BjdFpwUk2igXEeauKaZT8mCjkt9Hs", + "NodeID-7rXi72Jm76kAKUi6BiyJChnAm9xuKrud9", + "NodeID-7sTpQZFCZVzhLTugv8PKhNE8tdQ394LRo", + "NodeID-7seuWZpGnhEox3dL8T363sghDtCT5UNp4", + "NodeID-7sitbRzmAsrFjKRitC7bub1soEaHXsgbA", + "NodeID-7stntWreGt1wPeFuY9Z8bNURuHxW4JN3D", + "NodeID-7t5yL3rJ5JgME7vxa8em5sUNTPeYfmMop", + "NodeID-7tZXydqQqbjAQaNDK9MNEZFMsSFCp4xJW", + "NodeID-7ts1Uhry9k5qkmpYZNfdivuzj2eBPemKa", + "NodeID-7v2CCw3NVjU9UxBhGzN1jhSmatHKE2mXZ", + "NodeID-7vWb35jCnXGxAhLStVtKKZhWiw1ByhBdT", + "NodeID-7wMUdN6T9awYChEoRgzQ4eg8qZj1BFrne", + "NodeID-7wSVhYSmrwcts5o98Yg863PoHhJWWQcbt", + "NodeID-7wqgzXaTtJLobuBUxbp8yGtQe75TxmaDW", + "NodeID-7wyu3EXASVzsJpRPoqhssRWdmTWa6Fycd", + "NodeID-7x5HbUJqp763SUw8Sz27yU3bKWCdQiqAk", + "NodeID-7xCJtVM3wTprzgDBhXeXCXEUy4NEuqUvi", + "NodeID-7yBDXjCcqHCnfqC8LfQ1oL9Yyx1se2Rj9", + "NodeID-7yUhoXtjqGABwMeSgQG42cWX56r1DkBwh", + "NodeID-7ynNyRtZx8re55eU1CwJ1Ga5cEaRMPJ5s", + "NodeID-7zA1m9YUevKKyKdyJK3snpSiAnXhUG12R", + "NodeID-7zQLzhnMhaj5SbRXVpUUsMP4c4r8yQAD1", + "NodeID-7zVSqXr1f2wcEnMtMRBzMh1VczoEXsu2", + "NodeID-7zeJ5VBLvTF7LvTn1VRZCJfdt5EsWUg3e", + "NodeID-81U2wpq95De12KchbFhatf2fBt6KTFmAr", + "NodeID-84RAL3MngQcrLpLgTgJwtLobhryyt6TiM", + "NodeID-84XTAh1VM9QmfdewNnqFVhz2kbjrFvK4Y", + "NodeID-85QtYSfGWxkvQjbiFJzv8R6ajAutwFS4F", + "NodeID-863ca1dX7NxyiWH6Hf3Bp4ipNokKdzJbp", + "NodeID-86HV89ZFehy2HHmnkpgBuUn6vRxg25if4", + "NodeID-86LjyKDK2eKVUpdesMPWJtvo9gadfW9m3", + "NodeID-86oHZkq55eBsPNFwrFX7kV5w97BPHhtgD", + "NodeID-86rtyxneP9grH5wuqRcQMSZw5mS1vqrKm", + "NodeID-88fsGoe69sPGtRjqe21SK72KrJNezzz5u", + "NodeID-88ik4ovrpTx4dTExqhXhSQ4JwAQnHbuGA", + "NodeID-89ucgJfiRvHtGXynRbta7VcaX6MEcJ7K6", + "NodeID-89zA8HBgsSce4Ut5hzWgfMRKshqooJWdu", + "NodeID-8ALyunjeYV85zBSrD2rjJdQ754z9jasvq", + "NodeID-8DYmrGzmxDPogtQvPR4YjXhRyo2pNDBdT", + "NodeID-8E3UaD6hjNK8aqK2BBUmqPXyrfB19WJJ2", + "NodeID-8Fiuf4Uucg4x3ijMfkyVvYXdfgdBFvsFk", + "NodeID-8Fn8YCu76VUHny7MHQTkm3mPSuGPkVeqn", + "NodeID-8Gct3pDyuCEY5xH1F9RdYjvk9ry23cTtX", + "NodeID-8HHLzk7pF96soZ45bJ1tfHNcSp7moXiTB", + "NodeID-8Hg2J3s2MQVf1GtFLcRpfWjvJZkMesX3z", + "NodeID-8JYJVijgzBsSTK5EUsXitrMEYNGkqeba5", + "NodeID-8Mk5Kpvp3oTpLbRSnwmCJmPbvRwsR48ra", + "NodeID-8Nr7TDSiK215oiNDY17SaDgF5nfty4KUi", + "NodeID-8PmuHd5fRb8VWpTTyNhytQuv3sy83P2jT", + "NodeID-8Q1uX2BfyLbmCCor1t2h9qvn414WXfQ92", + "NodeID-8QKheMcZ1jn5n9RVi5MFBkzGrKjFJNNAX", + "NodeID-8RRm8QkFnJvyuNk9GtXCwniWfHs47b8ni", + "NodeID-8SHAaWWn5qoBCFynwzXdRtFCcMvEkTjAM", + "NodeID-8ST5juiAQzVfXhNAgER1UapEpNowZyMoj", + "NodeID-8SbnHu41g4NFAk7aa8EXpgFSmQ8ZpvX56", + "NodeID-8TArWpFgH3sazEH8qP4gUjtGtFMvjw1aR", + "NodeID-8TubANvEGU3Zkz6nKeRx3ShmHyULLjD89", + "NodeID-8TviTXQLiqxcxkCikPTut9DwifYjFaMcT", + "NodeID-8UynAEU8PuuGetmjiiPcbGmvD3EyycuN6", + "NodeID-8WowWhvPM6hwXBLGkuX7HvwApmXNDCyrH", + "NodeID-8WvUBZyrk6XyF7pvcg3NJZk8jtJGi1W9G", + "NodeID-8Xt57NiwBsGKt3CjYZ1ymKx5h3nHU3vcX", + "NodeID-8Y4i7kpWyv3pvvwJSk4EsGx7frk5mU1Ah", + "NodeID-8YNtmpa8fe2dt12PqDzhDCBtwcQDTXa9p", + "NodeID-8YqEZ2ufMNUFPXyNpUAvLJWRXLKqC6D2H", + "NodeID-8Zvnq1815CaYYegXmuyBD5BVtJnurtpC2", + "NodeID-8byZLbVuVJbedEvsAjZxTTSqorsjz7ThK", + "NodeID-8cKy5FzzP2CHV1rcwdKq62vuYP9yUzYuG", + "NodeID-8dCZ9beUGmAWyCZPwdM8YZ5FDBRPXUxFf", + "NodeID-8dq5Vwc7PzWn5NYNFRkM8TfnYaJpLiPis", + "NodeID-8f27M6Ju9Dnh7YU5pHqpkBLhCmBq9EBBD", + "NodeID-8fLXgVYgby12VunVnSMrRrvcgEijVknbo", + "NodeID-8fq1H8oStMi9BZrEk8qiE8QAV7AtqGnXt", + "NodeID-8gndDR9ULm3ywRNbh3FUveaSyo5g7V3ns", + "NodeID-8iQxXS1Zbm6tGzfp56kpSyUboE5kSNoH7", + "NodeID-8ja1C24FM5FyQgUGjsm4qwpD6shiu71KH", + "NodeID-8mHndwCShG4xau63f5FMJC5r1kDCZNpNi", + "NodeID-8oREeth2haQyVW7Bu1iGgiMe18h5Pqiwp", + "NodeID-8qkeNyB3cbs7LqzP4AB7rHcrcHLAYVDEs", + "NodeID-8rEmSGNj5VW2HUjGYMyynpWTYJuDrMpFE", + "NodeID-8su1eeSjb4emswAx453du7ya3Pp8p2wSe", + "NodeID-8t9KLSgmSQfW668sfoLjVrrur3nDwW7fC", + "NodeID-8vuV1UF3KHF98EdqFyuFAMDJUzwkQWEA7", + "NodeID-8z8rezdB9vXDxBurBeCYnNBfNcD1kVrJP", + "NodeID-91W73TnEik6kseSpBHSAZzsNVRGk8Qfkm", + "NodeID-93LVjgmjMcBmcJSByxYiTuyPNbdVw9q3n", + "NodeID-93bNuNqHUpmiZiT9bvcm4YdRU6ADQnESy", + "NodeID-94CDXgCTFRPKPDmK55UcfG8KqVQfy4QsV", + "NodeID-955GU1MqWL8yXAtoc8AsE7FNx4nGC9JyL", + "NodeID-95rcDYyjGNKckKCF8PKuTyEV1wxkQvP3n", + "NodeID-96tBuFHLkXJwyYug768fE2GDEFmebUUTy", + "NodeID-96tjSzF58iKJUG1hDQve6HJdzjwLNMzFW", + "NodeID-972yzi4wAgqU7RGmcQGED4ueQeBXzccka", + "NodeID-98JuUPhpQBgiY9ozqFTh8rEjx3jL9D3aJ", + "NodeID-99Dk6vNf7RaX4S9e4oAAbuhubzzRnrGxC", + "NodeID-99hQvpQKJi1y7gTjD2icHKYwk1pNMHFhg", + "NodeID-99sqQME296SfcnyYWTmGrLXQJxaGUKAA7", + "NodeID-9ANDYM7So3ZPNwbMMd9xBKUiyJirXN3vF", + "NodeID-9ATz2jx7Vh64A31CcD8PgJw7KLZy92fRT", + "NodeID-9AjYPCvDWKnwzFgRiy7jeTEKmVxK42LSz", + "NodeID-9BCUhY6Q2Eji4J1u6MV9XKnPspciWWdLR", + "NodeID-9Bb6E7B6GMd1MnAuxYVVYF9CSf1XiBCEY", + "NodeID-9CkG9MBNavnw7EVSRsuFr7ws9gascDQy3", + "NodeID-9CnrQBBFSkE2Xzfcz3Tk1e8iauq8iNR88", + "NodeID-9D33RPjZwKx3gV6MVs77z4uPJcKgqy4ns", + "NodeID-9D63iEsgkSKzSz1aBLpELbh47Y7aT5ujj", + "NodeID-9DCqby2EWyUKFsKs1VNH5mX7V9FDkqUnL", + "NodeID-9DmoV3n9Sb1A2aLkYuv9wZV7uPJo5JhpG", + "NodeID-9EMH1m2oZQsbSFvrMzx5YXZWFfqSwjuUx", + "NodeID-9Ef44CXDWWfP63UaLr8XSCsmEWUFY5i2z", + "NodeID-9EizvTJhGPtcwRExBgYySaJMt6syztj2X", + "NodeID-9FZQBRnW2jMU1PW99pqVTnxkPHF2Srfeb", + "NodeID-9G5DhsoBgHjDA3j1FqFXKyEJH8AGPeB67", + "NodeID-9HT8KtMkcxP5aWjmxaVVGBCMZ1r6jGmgz", + "NodeID-9Hp3rH2xzTzDoZj6JgYqhbmTFGVeS1BnL", + "NodeID-9JE3zL1Ud1x5RgSJvqRpyMG6F17dPwAdc", + "NodeID-9KjtB2xC9aehpcYRuqJR4myLGJaq1tVse", + "NodeID-9Lyd45k1mXNkksF2C3nwgCJRX9YY42FT6", + "NodeID-9MCd77R8qxsNg1ajgHqFMsRYCaQ4KpGot", + "NodeID-9NEeo3Ayq5qGodBbMZ1gdCQ4bmGH59BBo", + "NodeID-9P6CxSTDfAi7cRr4LUW4bbdA4F1nK89T1", + "NodeID-9PbbhiQbQitcLLEiVwMCThVLWj2zFsbp3", + "NodeID-9PzVetjSSSY4Xtk3VnNcGzQdEjZSSkR4g", + "NodeID-9S8duheEvrp7gKW4qLLrWVCnC2wvo39w8", + "NodeID-9SQzWXUh9SF4b4qDmYy7p6LcqWSgxtu6o", + "NodeID-9T7NXBFpp8LWCyc58YdKNoowDipdVKAWz", + "NodeID-9TMnzP54UVfmYmYcxAzVuNC9rpVr8zvgB", + "NodeID-9UWtBGvW7G2RVSwcR15MS8gesUvLk6f24", + "NodeID-9UgnxmEX92MdaAFQc7FG9SAfPECeT6Qp3", + "NodeID-9Uis3D51cjQauBaBPnqsvKjKg8Byj6Xwb", + "NodeID-9W3QhxVhg9EncK9438UUepFBrCxdtFp2w", + "NodeID-9W5RaY7LA6so9hGCPGfLezBXMnWNRQb8V", + "NodeID-9YiwE69B2wcvXtEW986YcgQxGg9ctzmKZ", + "NodeID-9Yq2bF3B9p4Lg5czRpYoQULLuEn3nwuhm", + "NodeID-9YqDsggitZTgCi2WnnrKBz3jAcD1S7oin", + "NodeID-9Z7jK8jN5FsTXSqkG1fV2QNG7GktPU8az", + "NodeID-9awX4ceeXavKKmccnAuDU7uvhqGfaEDSS", + "NodeID-9azF97hpcwpzj3t7S9mD4VKY1S5eJDrcW", + "NodeID-9bU9jwHLH6KxcTu8pBbqJQqkHYR4woY7L", + "NodeID-9cEGqGSwjjX23qXbP4vEqva6TwJqH4hAH", + "NodeID-9cazV2uY7efo1f9bjbixH87FFNG5wGyts", + "NodeID-9czFqw5FRq7WhYyf9SBSfq8wsJ1f82m53", + "NodeID-9dN9NU5XCr3WQGYnFjGudKRScQ1HKGmBn", + "NodeID-9eH3Zu7LN7kNwvUy3Am8mkgvvekrkxD6d", + "NodeID-9g6keiiMr9vXhaSNP3oqdxhu42LEZ3pWu", + "NodeID-9hW5Lb3t5JMgP945DB8wHUJP73uLuDLAZ", + "NodeID-9hj2xhiHktPCpcD9koLLH17a1GKoWY7QQ", + "NodeID-9hksERdA8ypkBykmHuch4mw24RR2sg5js", + "NodeID-9iGXJ935QZWkp5j2v6KNSTr8b45a7Xbfs", + "NodeID-9iiCATASsRcosMQ811KpxSMHsGdM38rPJ", + "NodeID-9iuMMyL7LZyUxMK9s735hcRng58bSj3gw", + "NodeID-9izFTRjPnyph3GJu1sp4Cf7zHiwaK7KbS", + "NodeID-9kNp7yMjHnDg1QLQboms9ACgQ7BGu8zpS", + "NodeID-9kgmvmU19mNsTxPS4f2F7ZiSZSwZ7dfGk", + "NodeID-9mtTuxamXQyiePCNyheFEyjr4cAQo4Wbg", + "NodeID-9ntqrySGMSMwqYR8yFdChFdU2ABj5zSEp", + "NodeID-9oP7KmQjtxi7rK5GKmRXFNuNpPmr2vzfw", + "NodeID-9odH3Jbw7uPuRrbaVkVoYeitdzwX6o69A", + "NodeID-9okkd3AwB3eurfQxdP46mcSM5sVhD9jxm", + "NodeID-9opRroLX5zG47LVmyw8hGnQJx6dxmfGsf", + "NodeID-9pBxdzsJRoGFdAjEJzcdWbzKEEkdzyGCr", + "NodeID-9pTnTcJZ714TcKA5ibKwjc4ztbxnt4YR4", + "NodeID-9tAAt3ADXpmStdjfpUKsgKCBowaPjXueE", + "NodeID-9tJaF6mjrwczuEZk3oQRJsB2AzPk7QFHn", + "NodeID-9tLM44jzwQqnsLmQTfhv6Q2GaAEkCdev6", + "NodeID-9tWnniKF15vWEqJa6o2hjmL8p61jq4ZmW", + "NodeID-9u5MGjm2Y2BvQSZBqwDkqjUrqFKffDund", + "NodeID-9ucNhNpdx1QhUnivTY35AtNEVpjY8AAK4", + "NodeID-9wCVwfdLGJe14xss2yjAT8ntowavfe6Cu", + "NodeID-9xTBvp3kCUiUGu8aM8XpBWPf2co3vJTJN", + "NodeID-9yFkNXGSAUgPu3hB4UxkWGhbp7cXBLaJN", + "NodeID-A1rgjFoH8JTHiHWoWYVcDDWC3WKnNFFi1", + "NodeID-A21bAN3Nk96xfoAh8auwqP4uQRbk1xAPz", + "NodeID-A29C9b97WqpgPBZS8Kh4NEvf7RLRm1cdZ", + "NodeID-A2aPfvr1t99Nm1K82U2XyWtxse3CNmZ64", + "NodeID-A3azVkFN8rEsnnw7yQoXHDze3gVSSF4YB", + "NodeID-A45ZieXX9VhAjrz2jhACBGYYypn2HEjaS", + "NodeID-A49YH2FmGteFpSV46b9WDxZWDEebAnH4q", + "NodeID-A4amNjJyWX5kd1wNhxZVqDFqWKomCndp2", + "NodeID-A58AWZohmoEPEG2FDjqdT8m5XeUL63FZc", + "NodeID-A5SHWeGWKUiuvPZhRvNAXKyiEnyDhJzdh", + "NodeID-A5y781VL74i4yfFdcMLnjxr2qLbtjaAgi", + "NodeID-A6onFGyJjA37EZ7kYHANMR1PFRT8NmXrF", + "NodeID-A7GwTSd47AcDVqpTVj7YtxtjHREM33EJw", + "NodeID-A8jypu63CWp76STwKdqP6e9hjL675kdiG", + "NodeID-ABHKUic6BDS8Kg95tSWoYv71CXWeuS1Re", + "NodeID-ACB5BBC2RNLSGZVEGkRRQdWg8hqXjnTpM", + "NodeID-ACMpxS8rnwkAA4JfCUm4vNoWQaznNCqGV", + "NodeID-AE67987jikbcv1cjf612huYwFgx7RPAZG", + "NodeID-AEtF29pZDpKEkZ625bg3reDrux4wyMjhh", + "NodeID-AEtKZyYiqnWqzsrwSKEPWrciPHEAn2Q2T", + "NodeID-AFWbdZEPvWxEq6A5SThDPVydUj9FSHjCQ", + "NodeID-AFZVPKaEHwRZgWvcqRpT6mTd4k8xDubGg", + "NodeID-AFnD2mSUo1cB9HSMxGT9hQREHX6V2aCmm", + "NodeID-AFtmZMo4p7AZVenJtT9D2Tbk1od5mrJge", + "NodeID-AG9hZpGAPNtzpPJWKmJHDjyhFppqM16uc", + "NodeID-AGEnkaodYJksMTnwS8LHHD2X7MDQBSMJU", + "NodeID-AGurvCTxa6JFJkk3553Wx8XPtgz6w4xMw", + "NodeID-AHTFPmvMrQBt1hcoGS7HugNCuu15Q5gY6", + "NodeID-AJR4Z9bAx8tom9ygqv3PkpQtdxDV8JeLD", + "NodeID-AMMTYTe6uZR7yGL1AbEMuwYayd4N1J4bL", + "NodeID-ANeoZC7jBSmoDSYVLip5uASJW48GwWLLG", + "NodeID-AQT1i38PMJQxRqjpwrQ98QESW3EJzG3p7", + "NodeID-AQckPm8yWZiWwSAx4FQ44J41Tp4yF4qME", + "NodeID-AR44a1cyPrD8cCNZgyhv7Tc2Lvw3ryhEu", + "NodeID-ARveB1Km8pUjtoTVQL6e27fMHfUyyrjjZ", + "NodeID-ASgm7N2wwZKYc5dZPiHExfDdNcfyZQx3T", + "NodeID-AThE3ad1aiPLKsj1XfmhCsNbai8sMyPc", + "NodeID-AU27Fz35YmQuM6d2pM9HsvLtgwGT5xwnd", + "NodeID-AUeAjRR7q12Pps6Qckyy1PUzFwP34pFij", + "NodeID-AV1uDZqWGSheRS3M6RHUbQJHDGXy6f8Y7", + "NodeID-AVAXg2LFFdkWJjZi4554GBNprWUepqKxm", + "NodeID-AVAXnRLmetkBadrUoqtLSVkohnoEpeSm4", + "NodeID-AVp7QsomV2auUPqPXQesRcuKwxn4JmDRh", + "NodeID-AWLRFc3R7oCqHd6qe95QsEaoBPPTQYpuv", + "NodeID-AWMoYmhjbfpWphJ3nmgxGAPpVFzQbdp1G", + "NodeID-AWirVtsoiwvNdqhqQRwz7vQQEiEyvhq3y", + "NodeID-AX9Fkm7jKPuY9Mit6AyZoS73tFKWvTMF2", + "NodeID-AXPrBC45azzYv7UKCwLYJhidPkKat2veF", + "NodeID-AZPULN7CsYdfSq1zepSS3CZNhxJsU3a4n", + "NodeID-AZfAXX6qyLxVXBUJZPwEgR1iJYwjwozm8", + "NodeID-AZiGXCQ7UJvsVjmBpUMmXAQ12GRj1iaz9", + "NodeID-Aa6HmCaYwZzNoNXRf2xd1aqgtaWdnnryn", + "NodeID-AaVVyiopFyWeLNfshG5eztnEGSUQRDmnn", + "NodeID-AaxT2P4uuPAHb7vAD8mNvjQ3jgyaV7tu9", + "NodeID-Ac4RdT7r2vj7J8s68DB6mpoqPQ9sq6oro", + "NodeID-AcUWiUhdsfAAD4gyVUfQNokZXpDD2SF5J", + "NodeID-AcVHs6PgeipByAajqn3h7PHDk9cZXDZ2K", + "NodeID-AcZuWDkVDZy32y8YSxMVU57sLtkyHbVFQ", + "NodeID-Acigg3i7aU9j7coVrns9SEtyq9dox6cKz", + "NodeID-AdSMHubbQcGeJYMcY9nufaieoYjh57uzp", + "NodeID-AejE54hVtRxSm2e8KW4zUHpBTgHz9fpEw", + "NodeID-AgDYXBZcELb6jPp92Jk2p9qt2R2jbGweF", + "NodeID-AgfJSHN4yaVjgxanEWMfQXKGDNScwrVZL", + "NodeID-AhFT8H88EZjd7K1dhrRPfhatXKuz2iBiY", + "NodeID-Ahq7qT8wG6ufLoM8MrvB5G65SbEfZC85B", + "NodeID-Ahy3pk9UZEeVLzks1BRSqsLL6zvJt4bN4", + "NodeID-AiVA6rmmPhouiy3ERwF7k1eoLTrPErd2p", + "NodeID-Aix93q3XbWsEcoNuuWjYuQM49ZeCX7NLS", + "NodeID-AmFfzQQtUYm1RNuhkQuoG6hzaXpYYbYtE", + "NodeID-AoGM9c6bKC46YpEecEe9tHG5HfcjqPxtb", + "NodeID-AoU4gTzEVKVmopKx7s9hKXBbQ14VYyAcc", + "NodeID-AoYvCZojkKKkDiUjY3Daq5z3FUPrhMAbB", + "NodeID-ApohQPUqvLAFGgH3XobyeDFBHEz7vbS5u", + "NodeID-Aua1pVxQsWLvTTf8bNVw2ZCA2NfogNqiE", + "NodeID-AunfxeXeb5uh5FbxYMr4TtEpbVTPssmmv", + "NodeID-AuqWFe8yzJxPgaZShBZpa3AhEnKT8SmEk", + "NodeID-AvTYuKYvu8oB8dXCbAsLh7nTpCvih4fdd", + "NodeID-AvYo16nRcdMuFmsEps6STb7faiRoCRgHx", + "NodeID-AwBfjtuNZk2Cu8wmQjYsNipqWK6nYMnv2", + "NodeID-AwmHrKdtkhnuEVY5TvbwxVksQ8dWd4a1P", + "NodeID-Ax97guVgJnjpDYiZkHghBpjELMVrbhKLq", + "NodeID-AxSapUskp62c3KkAnzd2WSDammmNXaG53", + "NodeID-AyByRGSrfbDaeW4njEYwCzhsnMyZ2KMcw", + "NodeID-B39dR3Zj4ZtiqSHTPLxck66yqxTZ9pRmK", + "NodeID-B4EU3Tk8BsVczMpkPtJAqS9FSYxNjYNBz", + "NodeID-B4Pubug4ct7TKPRuSAWGqS4Hq79v912VF", + "NodeID-B5MJkFe7GoyrXY3MqrpjMrrTqj9J5oprQ", + "NodeID-B5N5hVfEFbTrG5QGKvXQBGqfvR9GJvSLg", + "NodeID-B6NVZDFoLAErvcUafQ3EzJeynFnGffCQd", + "NodeID-B8RQVdW7nFrVR4Peh6oMfsdK8Muuwfeme", + "NodeID-B8rRoKwovNH4Hv4cgREBqjHcGSCN5Lrcq", + "NodeID-B9dnZzwTmygKPxEP7GvyqDh1F6pQHkmmi", + "NodeID-B9o1a3g4PFp4JWyMJxRv2nvnWHsq1rSvC", + "NodeID-BAePtWtdsqk26YVfucEAJTA9spGpuLf9r", + "NodeID-BBJP3fsJ2sRRbB3umzz8UXAhzrUZJmWqH", + "NodeID-BDFwWjNUNeLYrrfGmYqBMqZSGZQtmCcxx", + "NodeID-BDnEyGooDS9w6bk2Ty7UncMyQ8t1iLLqh", + "NodeID-BFgsRa6TLCKcHZhN9iuufSqRengbXuxQy", + "NodeID-BGEvsCNRi5gr4ga83cVx1E7PTSiBZjyhK", + "NodeID-BHPTN8HM5xA9HNiwUPceNa314kh3YnDpp", + "NodeID-BHsQhBq2NYN4YYB9t9VxoxcL2xkCD4zPj", + "NodeID-BJTip339hEcVzpgyNqzERhfjMc6sUFabj", + "NodeID-BLjWtGBQU3PxgBSMvhiGxHGZ6Scn1gyyB", + "NodeID-BMaQ42mVapxbCcX2RLPRTeRW3th91XdaX", + "NodeID-BPsD5nsuqTKR7ToqUMDFqanYioFnAVo8C", + "NodeID-BQ6uwBhrSU3mwDpguw4rGPXa7kVkk6FyM", + "NodeID-BQEo5Fy1FRKLbX51ejqDd14cuSXJKArH2", + "NodeID-BQpbmskmST3wkcSbypVuFZu7YULXG6pKR", + "NodeID-BQsr3wEWirFH6C81M1LYsYagHXVxXRFSS", + "NodeID-BR8ysKTDMKpwbgH9fsfCdVQWMsniMQBWp", + "NodeID-BRRp1FdWN4PepbmQNUksWPsRJaZ1mFP7q", + "NodeID-BRrFzQrJs8nnmeSZxWwr9yCRcq4U7resP", + "NodeID-BSTCzJUvn8jYqDtsV3aTjRycJHBJi98Zm", + "NodeID-BSornzAY2Y4SmDcQw4Pgsf5zH4Wdv6d1S", + "NodeID-BTUXn6xfxjtA8P4nXYHiwEJpso9Q9t8FZ", + "NodeID-BTybEYu3y8aW54EK9SSivLUYn2fiCosjD", + "NodeID-BURrZL8SidbaotZ1cNxvPxNH5DGvXS7gN", + "NodeID-BVSFJU3T86CWXQxSTc2Y7cHo7nYJx1Jn8", + "NodeID-BXTBUqX8gitUDtVam4fhRWGD1SfeHGoBx", + "NodeID-BYGnq6ZKg5ncN419eBsxd2YQcxoewxfrQ", + "NodeID-BYSrjPkpQsN7YxR2v6ToGhmD6VnpEz9uR", + "NodeID-BaPYDXQYeS9aCFHv76r38gUhYrtfpzq6A", + "NodeID-BaTDuWRvZkEy8gg4FRYmNYXASWL4QKzFD", + "NodeID-BaaUYiEUD9ogi7dDikVZAzaVu5yxAHjGs", + "NodeID-BagXwfWXPpzCt2utVf1P5MTwdQsPnxeaL", + "NodeID-Baof3ssuxMwMHxd3RXpNwiWmRdskL3hLD", + "NodeID-BcfnNieXDpsvkMApb8FcCmGAoQdtyu8jH", + "NodeID-BdD2fp6PXxbSdGshiVrHDzarWpgLFEMF9", + "NodeID-BfeJfXUfFveZPbS5tFaJar7X2uQQ7S7so", + "NodeID-Bg5GpEVehBRU9HDf9e66vXmrRJ39BW1hG", + "NodeID-Bh9vBw87fdqVzMzrFrPiTQqA4irjDP6G4", + "NodeID-BhSpBoma36qXxym3XPxM2E9xisXh2hBmL", + "NodeID-BhbczAQnAARsn5Cwa1onMXLD6j4Uu847F", + "NodeID-BjoNuhod6yNfMJJDJgyfNWWRBUG38ZPCs", + "NodeID-BmKvEFG1hXNQ52rPBTXwQ3ZxEmcCmxXZx", + "NodeID-BmmaSGfGKxQNMsaBSnxvhQdLCNchGEcUo", + "NodeID-BmtQcEcSHhUJzkjKQKJKdF3gQaVtud8s7", + "NodeID-Bn4LPbETjf8D9pZE45abJgcCk5KSQCbo7", + "NodeID-Bn8ZcnL352FHc4utgVPVDjD3o1HfYwtdA", + "NodeID-BoN8r3MqeJiqVcvrQx1VubZMGDQE5pJi7", + "NodeID-Bom5dUsayGwdagVLoNNxe1t1FFDZXGGK", + "NodeID-Bp4nG1LrP7JCg9eSQpfBE91mBSd8biThQ", + "NodeID-BqCmioAapUYdqLL7AuWJqPeRrp4D5a1ag", + "NodeID-BqtSwNPmkfSi5W3Zz7CRffeLmYrUYuEmJ", + "NodeID-Br6QnUhtZGAC4CymYLPRSMCrsNyhLMmH7", + "NodeID-BrexWemEB1Vgpbpnnriy3k8e2CyRvxsPR", + "NodeID-Bsa6pnd8nUhHEQ4v68GJ6fDFwCpz1QSBj", + "NodeID-Bsgdx1HosAzQ5X1M7JmbYngULQ3xjkP2e", + "NodeID-BtcHfm5o3A4WhAmkTm6YPWRhPCChDPLMM", + "NodeID-Btr5hkRibMyYztC9jQg5T3fpKNeYf3Qdq", + "NodeID-BvDJUkrDJotTxgZi96zsEe6iYQBLtz958", + "NodeID-BvpPGydG4inQCbkwRZ3nPMp1zAGToUTxz", + "NodeID-Bw6s9DYKi2V9jRqNmaheGGg2Wz2Dv7DQo", + "NodeID-BwCLnFCE3ipnmkT3V814hGsb6sCRFqScn", + "NodeID-BwsGZ6YpSqF2j3QRRiQJDf9JC4FmCGXq5", + "NodeID-C1oRu2mGfJHUD3UyNCWVJnJ4T6AWXhj5Q", + "NodeID-C3aMR9tsKqTNQra8FbqpFkMgHV5DtuJGx", + "NodeID-C4emGUFHEeCEZgoeMDHZddr5nyZBgEUKf", + "NodeID-C68ijbc3Bz1cdoQv4Mh1TYxrzeGA3PnmG", + "NodeID-C6b58cQGYSqm1FeQyvXKXUgZ3R7Q9D9es", + "NodeID-C6fd6GFYMnbD44LpZR6AuddMDRXTvR5Nn", + "NodeID-C6poeYWySHhDuKWMbckvcny24gTGKEtEK", + "NodeID-C6zVv1ab5JKn2j8DDCGWGai9jgWhmpLVA", + "NodeID-C7VsWUVsE7uFmSgkRQpcjAJWB6gRdm9Pc", + "NodeID-C7YyYDva6rvbpY5GEu2Su653eLBNt49TA", + "NodeID-C8b7aGd2caAe7PgtP2vnJcWta1wUezAAa", + "NodeID-C8zmJazq2HwujDgsjKpmYJsvSuNMD1kwt", + "NodeID-C9Bcji6rXUp22qyWhvYKm4v45hJAkzvpU", + "NodeID-C9CJ3oVhdeLf2Db71JPuzvEUKjy3BdoAA", + "NodeID-C9eHQ7iCCpW7zy7Vk5UJEXd1zwXEdFtS8", + "NodeID-C9eewJtL6WMZi2ce8YoPxhHTCYemhvnKm", + "NodeID-CAy5q5U4AnPR35i225a2QXM7ftYvbZEEg", + "NodeID-CBPbPrYDbghbAP4RLSCQr7bHQqHWh1c9Y", + "NodeID-CCBWkqmiTdwUCR9ExH1mNSLzvrt9JRKF2", + "NodeID-CCPtDzjuJWjt1VuV3yiHPwQr35qh2rbJN", + "NodeID-CDjuct2YS1o4d14nHtszWmyVUmFjo6AHm", + "NodeID-CEHgpbVVJh676muJTUNKzP3jmZ6Cs2zar", + "NodeID-CF8WKgeNBHxWFmijN7tysmmRSHfJFEKD9", + "NodeID-CFoWfGyYw9HPuDK79fjor9d96S9wLAbpR", + "NodeID-CGrPauwXiEvvY3yLBxR9HM25B8HpNTVfV", + "NodeID-CHxae1CeKzs6DNxur9rHcjSAecpdN1p9Q", + "NodeID-CL3N5kkR7HsmVtKavcuofwQGgMNjppY4N", + "NodeID-CM5KJnEvKQfjFirjbUzHa7WbanH6dLHva", + "NodeID-CMFSQ9wzby52sJqTBeajH2MaJq8HwnZPL", + "NodeID-CNPUVCHqT8Jpjpix7SJfXhh184EjPSsu6", + "NodeID-CNyGDr2EvPqxfybfNLkpGWqMQHiZthJ3Q", + "NodeID-CPWtTaJjKj3uiQnNKeKVk5yKfEiE5mok8", + "NodeID-CPkJH4zTnyhERzo7H1usRiSmMVDeUszm", + "NodeID-CSmSE2veuHYG3dJD9WuqobvsAdJ8iF5pF", + "NodeID-CTry2Hj2aXeA1rfWq1s6pDL8SzaCFGLMC", + "NodeID-CVJCnaM2gVKr6WCmJxKSPjEA6GFtmBknY", + "NodeID-CVXpUPtPZnsEM4mSnJbctBpL6Sgj1rCRt", + "NodeID-CWTULAJzqphbmedDkRpcHuCvGiHARvodz", + "NodeID-CWuWBhX2WaAY1pBTAmLRTGsjX9y3EqWAe", + "NodeID-CY9BmJwkVatHX86Pt3Fkxts1QoKTovyAs", + "NodeID-CYdCC4uTyAmEHW2CNB6oivzBtzqJn55mS", + "NodeID-CZRwtJDY1KeQspzxqmHfSdwvhA8xAeV9K", + "NodeID-CZVcr8ZP7njdQynukR8B1RcCcG9qxSdD5", + "NodeID-CaNQUnkzWJFcsYNhdx4iyRk31McfqtKhX", + "NodeID-CadcH2YHzeVWDSBT2LR9bfqHvMVfH6bTs", + "NodeID-CcNi1r9gNk8NtaNFKShCQdahthUEpZX6R", + "NodeID-CciiJQMXRWQ5967qBS5XncuEiyor1Png1", + "NodeID-Ccx7u3SwfPt52GxWbNc74kjVJvqktTkn8", + "NodeID-CeaG1icxFWcMkTzsqQjpwt82nePJrk5gZ", + "NodeID-CfM5DQx47iD6AWMsFdHGfWUnt97Sbt9n7", + "NodeID-CfT57H9QA59QT6s3QZGi2HZz8TH9i8V3y", + "NodeID-CgXPRNG5FeVSz8qC31u5UDpZ51pJ2mN2X", + "NodeID-CgbgB1UFpUESETV1CVGDM17bE9JA4tVHK", + "NodeID-ChjgtCzFxwhCYQGsUzGHUUTi9nFYrLsXh", + "NodeID-Chk1hvm92LQPActavSVqoEYCmXfQZeFvE", + "NodeID-Chy78FPagNNU7oAj1QWzrdPFjJgZP3Scu", + "NodeID-CiKdcSyNH27re2W17ygscpZ4xG7474E5U", + "NodeID-CjX7mnJM1qasMtuyyxeuKoKy6b9fdSSbE", + "NodeID-CjooWTHxne1o2w1hetMGgDL5Et8JzKMiw", + "NodeID-Ck4VEAv4JTMgnijLQDeGCWST8Zdgxe9Q6", + "NodeID-CkMG3CThWnfAYCYE1MDsFqSU1n4NESoFt", + "NodeID-Cn4ocmPe1Hqo29RyYCC1QXr8b763Wpw92", + "NodeID-CoRQHCd3aSKe7FrhHSUnvNVZzW52xu2YA", + "NodeID-Conr75BBUMBFNXD2VP6gvEbCRihCvjfej", + "NodeID-CpAhCB1gu11mLxBZJfgGLExwwvKobprLQ", + "NodeID-CpS69JexgMfH1sy7a77fiwQgN3c5QtrXa", + "NodeID-Cqc4sovCY4ypZww5envqKgtrvd6zrRPTz", + "NodeID-Crfqdz3qPavSYvQUWSXMawWjUXxnr3Gg5", + "NodeID-CsGzEmegg1sVokBzrSKkMygKy9YCVEzkq", + "NodeID-CskPetRMvtH5Xr6gLa5cwfY4hR34UgkM5", + "NodeID-CuEMaXrtX3tdegH3BPJowHr7cMUyf7P19", + "NodeID-CuF2PsXiuZESJWa5itRLmXWQ66qL1NSE4", + "NodeID-CzW1jKjZwFXgkB2Ria2nNPvah4bMq85bH", + "NodeID-D12qpTYhB3feogxvMBzS1QPvwyKyhRK8G", + "NodeID-D1H3GmiDVvoMsp3foTPmWi9YPXweYv9fn", + "NodeID-D1rgZbjdrevpXXDF1orhAvnUmojVfeZwM", + "NodeID-D2hsJkh6oAf5X9WEuNgbChFvkeUgC99Ns", + "NodeID-D2iAoisJf33uGaaKh9uCUMrEjN9HG5dib", + "NodeID-D3onstuMsGRctDjbksXU6BV3rrCbWHkB9", + "NodeID-D47UrYCDdkfrDrv9gUvYKLUyT44TXYCJZ", + "NodeID-D5aUrNtb3vKUBKYpdapiaGEaBTmUz3Jr2", + "NodeID-D5wkgZDwkAQVpFNYtNZmGUJGnUYRB97RJ", + "NodeID-D5yA7HbEr8AJ4yzwVZXsDtb4xhKkTRz73", + "NodeID-D8vyyYopHECxjFCQ2K4X1sttJjC1B7zva", + "NodeID-D98ujhSFuA5pXc8bnKPPpbP1VNFnfESVk", + "NodeID-D99fW2bCQpU6QCN1pu4UccDLZTQdVEwMj", + "NodeID-DAMYbaM35qcXWvuEKt6spGK6EzVKDJUkT", + "NodeID-DAZNmxJPADAnk53993F5wntS7LUhxNH2i", + "NodeID-DAtCoXfLT6Y83dgJ7FmQg8eR53hz37J79", + "NodeID-DBB8Ve4B6dXejYtFhwcQ8ZMFbmZu3VpSC", + "NodeID-DCUpFFgVWQBseGis77N45cBAZudSbaZyg", + "NodeID-DDzbSLsiD9qMPr9eaUhxwrChfbbXW8Zu1", + "NodeID-DEa3uaa1iztrxytaMqaP3PdydErzjWEgh", + "NodeID-DFDnfBmuJWqEeCxMviw2u93rK5thcetQo", + "NodeID-DFH5SbPENeErMmeys9FcQTMvdKkYrdLU2", + "NodeID-DFUyhAzXyYi3TUaQYBvm5cezyEaREeSYB", + "NodeID-DFuM24ytt31NZquBMCS6X3EHmrbfJXMa8", + "NodeID-DHUX369UfTJb2vVuWSRdJr9s27owdMbkS", + "NodeID-DHpKTzduyCvN3x2G4svUD2T7LZDD1T7nC", + "NodeID-DJ6nZjHBeTwdQiikyuz1foaQ6dWLLYm4u", + "NodeID-DJbw4TrSRu9uovTKqo6w12axPLHWHsjJD", + "NodeID-DKSq9hmU318ceXbU4LBEcSacDPZ5oSNAf", + "NodeID-DKgP4pe8GtAwQ3oVgvHJFG3WexnLjkkSt", + "NodeID-DKxvuUSqd8xtyWejxXC2JvQfP79SAsPvG", + "NodeID-DLFjLpQupsWf5UdaKRFiGELdeY2ViJfwT", + "NodeID-DMn1E4EyXZu8GZGKHV3p1naTmpKkpf4fM", + "NodeID-DN3fqQmgTBvk5JDxKjCFzmbs2XSVXTKny", + "NodeID-DQMJfc9ArkN1gohMBh4SQi4Cmu6EKAPo8", + "NodeID-DQVzByLn57iLrib1MVzoxVsnEksS9jbgb", + "NodeID-DT7ET1yHCTuNMydS4mGod99maQjBFS9MM", + "NodeID-DTzB9kesfKQNYuRBivWhK3iB2aWsuYu6Q", + "NodeID-DUmZowZh6nxKskHUybupqNAhnXVA5yQ1r", + "NodeID-DVh35F3R11HAAxzeSTbk3eR9w2BzPrJQt", + "NodeID-DY2Tk5ZqUir1NpSiTwXVGdETCGGEV8wHe", + "NodeID-DYCS7WSBWdmMx8ioQ4Lq6gfnPtpKmKVB6", + "NodeID-DaKj4jkyyCN4B5zVW65krECqpjxYGUFzX", + "NodeID-Dbf2zdJeUXfi3bFzmjhJTSgyME2o8iEaY", + "NodeID-DcCkk2wqS6L9XpNX41DVN5RZnwgixCYqE", + "NodeID-DceQxAzA4nCGFm9BKa1PEJ4c1zwx3YLuu", + "NodeID-Dcs4UwSwQe7vKaopXi9AcM891ybKatAvp", + "NodeID-Dd6BPwCsmEQaAgx9hpck23LYe1SopA3p9", + "NodeID-DdJNZBekdB5EVLEe2vE9LusaHjbGmsMYh", + "NodeID-Ddm8dYWWMgiZv5dyNWuzgkDprubqB1wkN", + "NodeID-DeT8Fw4Tp875GUQNxfcfKoBSPjKpGi6eR", + "NodeID-DhNYzUwYHPF24Wfz9MmmVyqLBUF6tkEpb", + "NodeID-DhuG2YhqQy26zabyLqTqNr85T9XscsWpY", + "NodeID-Do5ZUHCK8DHm194KGqdcwFc5wWE4fCQ6s", + "NodeID-DoEC8uw9qRNi33EiYrPoK6Vu9pnsH2rnW", + "NodeID-DoTTJmp8cmERizJdYQGFsPVFgZPJxfcis", + "NodeID-DomnwLG65RUJM41RgCJ2tRdR4iZEkVCkk", + "NodeID-Doy4qhTgun22byNDyHxNSRceygqwhwXCn", + "NodeID-DpXBTEP6ZCmm5bgBbv2rb6MzdRovARVhi", + "NodeID-Dpba952dAUnayPmf52GJzQj1PLoqtXahS", + "NodeID-DpwJ1uaLSpSqKu8ZTa7XpmFp5BuBYN4Ns", + "NodeID-DqE45ELtkEn4hSyU58oukvcPg1Ua5UdzV", + "NodeID-DqqpR7tQ3goBbudggakvzYs2K9TzR7daR", + "NodeID-DrijBGc9SonJrGUwex3vXtzzEGW3cB5DY", + "NodeID-Drv1Qh7iJvW3zGBBeRnYfCzk56VCRM2GQ", + "NodeID-Ds3ri3EksrtyjtAJ3RbX8Wku19LRdKTti", + "NodeID-DsMTbudbrnZtFyexvZihGfb9qETaxFBm2", + "NodeID-DsZ2yLdYvfHZptxMZqmHKgs4T5ehRTx35", + "NodeID-DtCpt2Bi6gvmQqwgKE59fanof5F3bepnd", + "NodeID-Du1Ebao2uJG8LukNGcUrzAevtVRhHdtKW", + "NodeID-Du7SAKQLR3j5iK7tGv7NvTSxiEVTfYfeS", + "NodeID-DueWyGi3B9jtKfa9mPoecd4YSDJ1ftF69", + "NodeID-Dv4KWhKB56So4dZNxRRi3J2V74TaUXWMg", + "NodeID-DvCdC1yWp8a9NNLBR7agvTV4YNmJY7zb4", + "NodeID-DvKgKdhkDk6CUf68iqA5Kwaa44RkBa4zN", + "NodeID-DvMo3a7eJYQexjLrR1rykrc1jsNuVwnPn", + "NodeID-DvwWC5ed3j5iG6e4Y5yxAUacPkXvrxLFT", + "NodeID-Dw7tuwxpAmcpvVGp9JzaHAR3REPoJ8f2R", + "NodeID-DwP4ATkTPn52orSSS1FbjjT21TXnXoEnX", + "NodeID-DxdedkGTUuVoRS4VJA1qYXuFU6MerKKyX", + "NodeID-Dy47rtrYr4eiyRTSnbaFi8KZVnR56XrUC", + "NodeID-DyXd5SxgwWWadT9PdEpieSPtjcKxYCMjE", + "NodeID-E1c6FEFavQ1RiaNARpxGx4sWnXAGh2YHN", + "NodeID-E28yXasMLXFQM3f3mQtGWUGh2jEgJATi", + "NodeID-E4KwVKcddwozth12cNvNYApyHhVbvnby6", + "NodeID-E5DL2H2CxUUdxe6TcMW7uTHC5mFujF1eD", + "NodeID-E6GJmPWBnGKad7hPfxCK9GRwsuAKc2MJQ", + "NodeID-E6uwzybtqQ8H7aArfxxVJWVsEcaCPLT7t", + "NodeID-E8rXzDkKySREDECoXPqaPPhQA7QuxL9M2", + "NodeID-E9cBEoCDBGi6qFGcW6bfFoyhWpq6Vc57Q", + "NodeID-EA7WyeKRdVx34ixXFCvfWbv5P9DSNsj1E", + "NodeID-EAdWoKJCXzi2jKAEoETCm89CB3dzyMdAy", + "NodeID-EBF7imfP93Sc4cv3cUAK21maYDvaFMS5j", + "NodeID-EBHJ9gHUbjtNQMnNNcJEsqojFpAbLrnVS", + "NodeID-ECNKxnabxDwevTW7bLgP2Meaid1j8ztVn", + "NodeID-ED4xkYFhZPNNj3WXDQiogcTULeBpDVjSY", + "NodeID-ED9nabFu9bXzdDizCeNytDisKqkdEk83Y", + "NodeID-EEQ4pJQ8FTsFmPi5qEiZDBkKsqCpBRZiT", + "NodeID-EEipCNkeRgfvwiDHbaBdQ2WeRnsRVSH7P", + "NodeID-EFWjQ2CYsraxPFxWVJL87nfqKNsAP2T9q", + "NodeID-EFd4jseGxHxQmYX1ATcJUSZoCnE7z6dFe", + "NodeID-EG6gFe5McNc2EyFzARxuvTApN995FdBZj", + "NodeID-EGKLqBNabvY52737RGARiuGgvu2vHRY6v", + "NodeID-EHJoNS9sgSnjg5gkvKL4AF5vyi8c4qTvg", + "NodeID-EKH5gkuABvZWiZGHvWuZLgGar4Zts6qX6", + "NodeID-ELNdDSKVpamvTWSQvqqBWzuAKFiJLzoT7", + "NodeID-ELjBQTBvaEAMk7Qx7qJNJsEh2DYC4nxUn", + "NodeID-ELx4yRvGiDRcG8r8Tow1LUAhMXkVYVzHo", + "NodeID-EM1NUe17RtPAJ4ofro1qPc8vEfcvEhjZM", + "NodeID-EMETD23g2Bne8CuQKyR8HB15KWomaAuFe", + "NodeID-EMq3MvHvWctiQ5CC2ioXpeyKPsHteXAeR", + "NodeID-EN2aDGGjPAqm4Npzk9wCjCdGfg5hVrWgp", + "NodeID-ENc7M77QRhgtpDojDQY5nqjndjiYCWR4i", + "NodeID-EPqYJkZ7knGWY2TNe4rxRtNgkqc6spyDC", + "NodeID-EQtsYPVt3rHBSJdp2Sss3pm645cpTJkGp", + "NodeID-ERzoQo45TdZFeWHG7JpHVnHbn7ULpTRiV", + "NodeID-ESRpVRqLwts8M93Ly9QPm4ZSdMF4wEpbt", + "NodeID-ESVMWUoEkKz8uAEDfKcEpu5UUCkA6GeyC", + "NodeID-ETwxZVMHXcikZmKsKfjiZuVG84KsZDdqz", + "NodeID-EWNvJxWq2UBBsn2UH35soQugQJn8CVKdS", + "NodeID-EWmdnUhsVGSfZPnbuSodLUTA6ReJcXz8v", + "NodeID-EY4iiXyj2TfoHspEHBHFfBYrHxXESnSN5", + "NodeID-EYDcoNgbX22og1oyEn5s7iH1BYSmR5yF1", + "NodeID-EYgNPuFCT4Qy7YhvsM2s8WKTT74QECvTV", + "NodeID-EZ38CcWHoSyoEfAkDN9zaieJ5Yq64YePY", + "NodeID-EZrxYbBm2ZcoK13eTPf2sdtYuJuSHyaTF", + "NodeID-EaEUnvUyfTr19uMW8g51bvFXfi7r9xL1W", + "NodeID-EbwYUq8FZAK4jf65nZ4dcb5kk5WZ7RBgb", + "NodeID-EcoA7iNFXhc2DwrmWxiEvjvsDvDmQrEKK", + "NodeID-Ed7NMisAoxzWRjxQVrWnBvMShLNcydRa6", + "NodeID-Ee6MdWL7mFKdPAXbJTWZGvwEUsi17869c", + "NodeID-EekL2aJ7TdkbiXDzSsPo5PKyaaQuQqVg7", + "NodeID-EfnXCVeLUtW24v8gwW1VYiNuqDeRPEVYC", + "NodeID-EgzctrMz5jVPbfUEWGufKxf9CXUMLBRGG", + "NodeID-Eht1FjvmFncRbagWk1PGMyE7hVrCS9Acp", + "NodeID-EibWFSUHeLNNjWZrEznmuxH7ZWNsavRgh", + "NodeID-EiyGqDavdQ8dMUtN5wjAezDXAWwVBdSBK", + "NodeID-Ek1pare46s7nXPF9JtsoyM7PcaFnCBms5", + "NodeID-EmPwabyobnM3jYDvQuxZdLBTut5V5pq2n", + "NodeID-EmRTfxEKTLpzTyWJmYTXVxyGr79bsv4Bp", + "NodeID-EmhfRnDPSzpEMhGy3KSC4BWhPdtoxmqqV", + "NodeID-EnANSJ3hrErv6v7ZGSi8p5RCHhdhbGrb", + "NodeID-EnVDmm6dUgePaWbtBem6HcbzptaigBxDe", + "NodeID-Eo31HyvvA1Madeagm6U8CM376cEfgS4Bj", + "NodeID-EoNXC39QyT1eHFPXb5PgvDNigBN8tMeCC", + "NodeID-EooPQfxvcRG9XQpNmbDg85sNzaFDzux6G", + "NodeID-EpU1DGKMMvDRtag9u9G7uwZHRsJF5NodL", + "NodeID-EqBR9F3QBBrTQTN7CDN7sDEUsDGDdMYps", + "NodeID-EqauaQBwVXnpamJvLi84aeZLTQra4GDep", + "NodeID-Ess2uf91CDTfKsLezaTLKqqsWcpEHGvJv", + "NodeID-EtFG3SrbbudeFQCaWRxhwvv28wHpo8VRq", + "NodeID-EvY8p6SYho6VwicRvVasSBBv1MtanH19D", + "NodeID-EvZXX2K2eVEV7n6Y8cnmeSRgj4JhkCVad", + "NodeID-EvdPJG2AAVGCP2s3543KDx1LZ1g5SiH2Y", + "NodeID-EzKkVMd9y6XqzQsvRDLuaaiXqXK8fFipU", + "NodeID-Ezk1hTiN5bogETxygJ5NciRvHXgXvUVVM", + "NodeID-F1JWRVsJ3Wq9eSdNEHvoivQVCsW19r8gT", + "NodeID-F1K4jks15y71PthvXVNks8pgutgBwdHZH", + "NodeID-F1KpunDAMisiGKLX3R6k9uBoJoeLwzJoU", + "NodeID-F1qvuNMn9XBXSjv9iWyyMGfV2PQ2Na7KM", + "NodeID-F3b5mj8PWnMP53hFbfGNxkiXpbK6tPyDU", + "NodeID-F3r4EqguPQgupLCYrTG3SeJSCi5qtc3x4", + "NodeID-F4Q66oFkqCmRnBjDUAh28iXvWx6WxbxJJ", + "NodeID-F4SmFmnXuFBkW5fgFgh5RT2T5w8KseGRN", + "NodeID-F4kKmjMhZJWkYwsWyPSLFxtNcj7BhELmY", + "NodeID-F5sBP4yNA2YXHbD6FJUjsZJ5zhoMqMi6g", + "NodeID-F8HvQKYDbonSeJ3x42Ynb6ibxiJa3FWVL", + "NodeID-F8VWioW7dC64159mMhswhzpkHxnB34cjS", + "NodeID-F8Yq4R4Mx1LHGbQ7g8s3beNB4LGnCUBpv", + "NodeID-FBtCkPRmkfDry8GGpNenBW2fn3fD3qoKn", + "NodeID-FDTRdm7eS7sBuzAnQqMrXUmNiuNHP7VM3", + "NodeID-FDZXWAQeuMzZVq8tv53b1NdiwyJDUHGpB", + "NodeID-FE6WTTtg1FvtgQMZBjjZ1qhZpPq4WoM4m", + "NodeID-FELjhENFRLHTgVrwiKTdL7aSQbU8mBd5Q", + "NodeID-FEUSZzFmjF2JyrniKXksGfRjkQUH5R4eH", + "NodeID-FEsoMJf2VjoRPdnm1Ux7CGg5RrHnAH3VX", + "NodeID-FEtgjYGndfWGzLZELTJJoikpSobjVKTjk", + "NodeID-FFWS6gV9FrKecNWCYJQuyR67t7GY6UiVq", + "NodeID-FGRoKnyYKFWYFMb6Xbocf4hKuyCBENgWM", + "NodeID-FGt2J8WREPFbbTXgVU9YC5NjdERhoMa7E", + "NodeID-FHGwnPLGdU3FBzbwnhayx1sDjxiiyb9sD", + "NodeID-FHHEXLYRNrnQhKFneEwDPX8TZV8WtUpQY", + "NodeID-FJ7WdKoGBkqb78mMGrr3r6kSL45R5Vspp", + "NodeID-FJu9WvWrxTucYcNzEVQj1pHbbS4LaFTnn", + "NodeID-FKCbGm3jmceEEpfSdA4uQUVANpmqFLAty", + "NodeID-FKGhEFYnHUdFadoR2ePvdTTM5Sz8tgjHp", + "NodeID-FLQ8ifj1DwXuRWKShjck624SAgYXVpQzz", + "NodeID-FNfrZNBD1bxGj1VmdgSrEKT8oMePC448e", + "NodeID-FPwgrsmgeX4DSkG4abJ28zZQhjcqirPYi", + "NodeID-FPzqAmcWBR9RZdNggUMtuyWwvuG2RkqrZ", + "NodeID-FQKJ8yATVmwGuy3LN2ExsURDEtZNWgr3v", + "NodeID-FR1WrbXocb3QKywNmjQABt3a159QqRMBe", + "NodeID-FRTCPrh8YwxmR9L1XJWjLuMA1r9SjPrRu", + "NodeID-FS8PXhYXxoo5GoTHoki5Y3Fqk5zzdMY6E", + "NodeID-FUpfjb5sPRnT3XxMbtephYBc1MHZE2RYL", + "NodeID-FVMb2XgKVaeonehFbPMe714kMJ7DRxhpt", + "NodeID-FVgPdpTafzx8jazSNoxvQmJNxoEcAMmxm", + "NodeID-FVgkUobMuUBABuMA2KngEsm2SG9XLGvdW", + "NodeID-FXsyWrGvZZEns2iZr4d2h3pW9piH9ZE5h", + "NodeID-FXzsVMpT3ipDikUzabDtmeLrXW8JYyNZp", + "NodeID-FYv1Lb29SqMpywYXH7yNkcFAzRF2jvm3K", + "NodeID-FZHUwdFr4rndHpmSFXLK9dJwKVcijv4yW", + "NodeID-FapbxinG8eCCXzZQKCjxicKcXijmnKzD3", + "NodeID-Fd4DMc88ELLiTHHSLVZ7L4MB3Z7jbDEAy", + "NodeID-Fe3121hoArBshDTJEEeMeJTNW6HcLXxT1", + "NodeID-FeddPuWb8xtJeuXCDa4CAbjgv3n4PBQXV", + "NodeID-Ff4Fx8mcbfBVZSpMFwQKDUURzufmLjLgS", + "NodeID-Ff4s5gpeGMf5HaBKNxBX1uRxB2hxRx5By", + "NodeID-FfRKiU9BpTzMx2KGmrN4YyiwftTAXZ6FH", + "NodeID-Ffh3UfBYLouumUQgGuFbSTBbD5wLXn7xG", + "NodeID-Fg3L1eo53UWpf3J9Y45PK2ksQ1zyqajHL", + "NodeID-Fh3Yr8SejArPdA3eHfg75sLAt1ypeSpCV", + "NodeID-FhH7R1HX2ipEetm1eH7u9aKfBCtCQyuQM", + "NodeID-FjPxKjpGqPNj8NyEVVdJgGT2bXFVkNyW5", + "NodeID-Fjgs4ALe7yajXjZYmcRAvaQBGrFE61QME", + "NodeID-FjpAr4XXwanzuhLMajEVdm6bwdcGgtZNv", + "NodeID-Fk1hBt9ZyaKiTPjq7MLyvL7Kgd2YMH4ie", + "NodeID-FkHyxMuq6DGNLdQtMKiLzhQQuwjSCqwHj", + "NodeID-FoVyB5QV1hmvAxsNunHRRzonCBJGkLvtj", + "NodeID-FpA1n8w3xHmJ1QK3iDAS49vbLJokejfMA", + "NodeID-FqiffXvatKSLPRhkkc5y2MkALrZ8EFN91", + "NodeID-FrGVKPnD3xVmoMK1RjtbD6evNGk3wrpv7", + "NodeID-FrPwbz7L7qvxbxR63g6CMpWPDEyHiiqrD", + "NodeID-FrTRZzNgZZoHMGvC5t6XtJKAv7E2LvYYU", + "NodeID-FrWaLbhUVQHEzXfeekv6MCbkmLfx74HrP", + "NodeID-FrhMaTxsagqjYCetCyxLxpbcpkN3cjVLH", + "NodeID-FroHhWhFsUAh3s6Y4hjh7mx3E79mBUZGj", + "NodeID-FsqY7xFDE7Cvnxkc1BkpnRbiLXH2tAVha", + "NodeID-FtJRoxeiXq4D4hsM9PCAZkoerJDTqPEW7", + "NodeID-Fugfqxb9NKZ3h7vTe7XsjqqKqxYwSrTsh", + "NodeID-Fv3t2shrpkmvLnvNzcv1rqRKbDAYFnUor", + "NodeID-FvAAKfRTbxB5ANcapWNtQ2VGMFWxX3LCh", + "NodeID-FvFBsrTf6BhCcS1nFPJtRrWZ5Nosb47h6", + "NodeID-FxZHg1hxvYyBWFCHzqQPhFYsXte84UT9A", + "NodeID-FxaeyWwirpBBbBwRgpytNrtR3NY6xTiwT", + "NodeID-FyTHAQeNPFhoDetc7pznBMJ5BRj5s6FJr", + "NodeID-FzJA5i97oBDNiKeePSfHc2t3zt5cRHnjJ", + "NodeID-G1KHG6SL2B9jnfbzdTudhjRPa9hp5tuRQ", + "NodeID-G36seLQVfXnaUgaNobbSWg8anRta2WUwE", + "NodeID-G4dtkvboFCTYpnLyHYMVPq8GLxns99NPq", + "NodeID-G5AGU54ib6jCTc9B3aUC6BVLyhEZUuqYE", + "NodeID-G5QuntEfMZS7A1TPSHN4owEa81C6MAGC2", + "NodeID-G6nXgtucmeKn4nkCp8YTDdM4btVSnW4ad", + "NodeID-G6oS8Nbb74RyJatPe34iwhTUPDqJNtuwD", + "NodeID-G7aPjXRfR2XtrB4PGATDS8NoqxmqZmryS", + "NodeID-G8uVCikgvakai25byQwYx1z56N33unmj", + "NodeID-G8yv8mWQy8FLdFfAhJ4v294T5Z2DWDVEH", + "NodeID-G9aa4VXB19wnUzrmVq2SheaxnKEieUW3S", + "NodeID-G9ucCcZoHdXC6mrVpKVqKzHsMrnoVZLU8", + "NodeID-GAEZpCd2Lg5RSrPtQhGvxw6Kid5tAL6bw", + "NodeID-GBvezLieTFYRqHsd8QzWqH1JUhuwD21mJ", + "NodeID-GCL85wyq71aw2FMRTct2PJ2F4qGQL7WH8", + "NodeID-GD3XH39S9feuK74XEh8AhhNEapSt2hqHo", + "NodeID-GDmLiXrEtmFvBiE2s1ZGDYdZZXdkkLW5F", + "NodeID-GDp4P4Jo84yyVRkL3UTkJDuTNno81BBxN", + "NodeID-GE2jerBVdHtvfKkqfNHwBr2MCkDf6tLir", + "NodeID-GFbG2ARoztCgE1FxUekg5KjEaA1bwThjW", + "NodeID-GGHpri3tVbRgLUEFKqxNivRbegTKBAnyG", + "NodeID-GGfhdfGbqenvgChEXpdngjkKYAByo9di7", + "NodeID-GHBthsGgAzrPLXuXk7vbq4qsc9CvSbwXS", + "NodeID-GHZZWcg26qxY8K9uDLXu6Gc9Gh3P11dK4", + "NodeID-GHd5dmrKvYkAbxii5V7wmJw8sRf4JqYAi", + "NodeID-GHomCXMMRTSPRZrrxEULYPFHHXEhb7qvY", + "NodeID-GKBykzpxeALzYee1fGQjFn6oZiA18W26m", + "NodeID-GL87o4Z7DghVRLf4z6befUxjDoWXZUDqg", + "NodeID-GLSSsNA2oHSaebdYDBQ3u98kvHfH7zjYf", + "NodeID-GMJcCQwhNFDGQy4NAkKM563BMudjmHpXb", + "NodeID-GMoxCucPFt2LPSpTh5hPbDPAp71fiV648", + "NodeID-GNHgGvi7xri6CckDAzmQYL4YgR42WkVsX", + "NodeID-GPYurKNwtkhJ7M7UQoDinFz5P7Rsu9yUo", + "NodeID-GQZBUwNZb7iSYWEyqpyrp6HXG7Z43uhWF", + "NodeID-GR11oP3kLHf82Xbi3H2kewqoM7ydHF5ES", + "NodeID-GRReUAPgHbGdXcv5Btpm4ByGvLrD1NgXG", + "NodeID-GRrpJze5AEaBezaYp5QRtVcLAF6AzdJSy", + "NodeID-GS68HENWev9AWcUrhLqxYvsv2opJBZ7km", + "NodeID-GSgaA47umS1px2ohVjodW9621Ks63xDxD", + "NodeID-GTu4yYzFRv4QGXyyvcorGDUMWjzzMWuqN", + "NodeID-GU7boimY7cxeqdDmnPRyYrwLiRmwiHxhn", + "NodeID-GYAMwLv3XViragqqJUQyTNRcUtoRWbi5F", + "NodeID-GYYGoe6y5TtL8vNBVkvTqk8sPTVVqbGaR", + "NodeID-Gajvbi91pdURX1xzYQ7vzm1qUzNQJEfbG", + "NodeID-GbhKbbABkTdzExgzbbsvCdc4gZe8eHpgP", + "NodeID-Gc1hRnfx1mvhgaUnmymNq9TKwvhFjhEkC", + "NodeID-Ge9DfHbtays3Yfsyr1C2Z6eHSLpmTXQ6H", + "NodeID-Ges52zCMZimxqri3ia1J32mUAKyUsvcpV", + "NodeID-Gey3HmUZyAXuvawThT1McvEjrofAwrGi6", + "NodeID-GfDdBdRqzEKK55gCKqThE2LVxLdLhJY3A", + "NodeID-Gfeb3fWPCSpA4g9edG68NS5aJcYpXn6T3", + "NodeID-Gg8SEQhx4vU2KWzQS8ESCrSpcjzEqQ7kY", + "NodeID-GhG3ac1x2g1Fc4xPWXxXRoTLmMuT2asMV", + "NodeID-GhUSE8wcdouEgd3xeCvmYhmevkdNetfXk", + "NodeID-GiEUKDy7YfYRdxD5rA7t6CgEQxyrWP9sj", + "NodeID-GjYkDpBkCueF4vTkZVdtq3yFiMJha2XpF", + "NodeID-GjjemGsR2kXtPLSwv4oNV3JFt579oJUnn", + "NodeID-GkiJ8CRTibePNf3Y2F7LbGTinHm71HvnF", + "NodeID-Gmitibwg27b6WqHLjqmGiXZAh1kmZVq5P", + "NodeID-GmzoRvWfUdTTUZBmWFM8rswgmwMKkcDAB", + "NodeID-Gn7jLTVPuvdrx6mF5hxSbazRoKPWsM14s", + "NodeID-GnKLq2ipT4D2CMcrrGYndSCuQL58yhCR7", + "NodeID-GpDhyVHYVaL8qXFB2a1QPBsXyUMZjiXLF", + "NodeID-GpEnAaYG182MkURWh8KY4wL5KgWB7cBw8", + "NodeID-Gq6cTyqtHRGHSHELPNdwXCjQJH6vwUkBc", + "NodeID-GqRuMoELZus5EYMwBpiBVtWFDnvLmaJj7", + "NodeID-Gr9xzzL6emX5jWv1LoMKLpCebYR2V2nYs", + "NodeID-GrL4JXttpkikBorzFchuaGhnDTWc8nmEb", + "NodeID-GrkiQsGvbv9PSqJf9r3eE79fUgbZqydmK", + "NodeID-Gs4BSxTpRPxx1ioRz18VUB3kEYVBsDV5T", + "NodeID-GsQoMs5GAL8inkh88XPVJWKMwmJqm5T8N", + "NodeID-GuTDsR1tBFgQUbKqCf7J29yBo8ofudHEZ", + "NodeID-GuWMRuFkQPvPjdnuegSQYDmTMbj3UtPBm", + "NodeID-GujHWE263qNN3MjEYhEeAQdPpQdhHquLJ", + "NodeID-GvEoakPRjaB9jTKup6nAMpFSF3HiEeocP", + "NodeID-GvQdXQD6qFzfAYBvnvxiep7WaXVj2FSnJ", + "NodeID-GwcsDtmQrD8thUHiQWgrpRvhuMjxTqZP6", + "NodeID-GwfPzhzby4Ur8mgdqPDjR3jSTevtmfmY", + "NodeID-Gx4oY9nZmAzJrHUfobrmuK9LrJhwejykC", + "NodeID-GxUaibkyR88sErviW2WK3ukDJsT3DFpoU", + "NodeID-GzSFF2tfNMc7YdJzGeNeNxfSWjDvENJUt", + "NodeID-GzhNocnYWSCziBTP5Jj4xAKn29xDKW9Yr", + "NodeID-H1R3mBHFDeBoQzVYyHzX6RGnVV4nMTrWZ", + "NodeID-H1YSzbspeyANTzErprmBQgV2Crq5hXSW8", + "NodeID-H4mD37NM384RtLGjz5aERQoNCrtGEjgK4", + "NodeID-H94Q5zdkBByZVWvqrB5ByiX4dZdG1J45y", + "NodeID-HCw7S2TVbFPDWNBo1GnFWqJ47f9rDJtt1", + "NodeID-HDXjAjk4rG42HXey7gzQPx6dMBfpWeh5k", + "NodeID-HDmmLUSLtok2sGQEruUsYSMjBeQbUrwpH", + "NodeID-HFFiNVtYmv2YF2WHCZWHSCigo25RxgnFz", + "NodeID-HHV1onrMrN68JdRoKfQGeZphebgRrcADT", + "NodeID-HKskHZG5zu629RsEkScAaGrpfGQABqep7", + "NodeID-HM1vYJWnjWhJkJi8dErcu4p5UDxwUKbp9", + "NodeID-HMFqHmnsaeBGdeg83uXsraRJSH67DNsi6", + "NodeID-HRZAiPM3BaZJpAq9jy3UME2EGsrv1ZMaa", + "NodeID-HRmKWRK8HjqqTKWvYMBNv9hcddSYrhqnF", + "NodeID-HS1PKAMjZczSb5ZQRastn1WG6eJeNmahd", + "NodeID-HTG7W4VHoGJBUTip1gKXzXGtEBhiW1DXT", + "NodeID-HWwYYkiTKnXgYycNN5gXQfxhhA8TZdBvs", + "NodeID-HX7H9nBZX3nZRvLVUxCDBesVX6n8Uv4a", + "NodeID-HYwzPrs3tDR6yyMtKZo22fq8grJsP1DVF", + "NodeID-HZnBEWRUGocZ4HYX8EuMP2YEY94NtySWR", + "NodeID-Ha5u99aoKUdNdTdomp8bQcWHUw8PSF7em", + "NodeID-HaxdTdeEr1sWMRZvXwRN2iotd6RYd9XYM", + "NodeID-Hbf2ZY1Zs1s8KMp3rA1zK36N7zt5fSLkz", + "NodeID-HbmSiec8zTKiPWxaBwStg58a3N5KePhC9", + "NodeID-Hby7B4s5wb697sM4XFN5VusKW7eNyDMg6", + "NodeID-Hcq3gpdMqAEiVwq19E5DZhsjggbkxFx2L", + "NodeID-HdByRj5WVuR7REF2eJDndNyHA94hYhjMp", + "NodeID-HdG8EdGWAy8LR8YPA8hndoa1uwwgGv1si", + "NodeID-Hfa96c7a4xF9vYHuMh3cRsKUHa6YkDsYN", + "NodeID-Hgs8rDmSTASnKigsVfz7k3yZ3DgXP5s1j", + "NodeID-HhEvRMAeXAuXqULRysWKNLK2XZKzYwCnK", + "NodeID-Hhkq3c3UzzX3UpwC2kq9TcbJdguht164A", + "NodeID-HiFv1DpKXkAAfJ1NHWVqQoojjznibZXHP", + "NodeID-HkE9eyj5U8saYexeGWuPcQQbiu5MyRHHa", + "NodeID-HkEQ2sJogxEps4tmqFRDuVG9hyfxe8vMg", + "NodeID-HkpgaE4YB9Z7KmueQ2mz2cZFyfttvo6GP", + "NodeID-Hn7YWxiVC9JufWW5n8u1mvaYgNgqWG8VT", + "NodeID-HnCFvKLXEVsT1fJtMxCnkNj5fs9ExEugv", + "NodeID-HoC4UxP4UuJyda7MZs5jUkZRk8Rfj3pzD", + "NodeID-Hp82btBefk1ffBD7d1qtoBL3DuuNUv6qa", + "NodeID-HpMSfYT2ox1vkr1hFNMwmdWuq8QWhvH2u", + "NodeID-Hq2aEQ2RYhEo7BkVR83c78n6wbVLPrbqJ", + "NodeID-Hq5vdoGnZLMfasw4ykMzdM6PhUFDmp7FL", + "NodeID-HqPSHk6fxtJW1EsF9Y8RPH7KvPvdRpnzy", + "NodeID-Hr78Fy8uDYiRYocRYHXp4eLCYeb8x5UuM", + "NodeID-HrGKtuSRaRj9b4CNy2JeLfv1WAULmhwtF", + "NodeID-HrPfqGPogaKpTTcBhNBhmYfLRHZhiMWyR", + "NodeID-HsBEx3L71EHWSXaE6gvk2VsNntFEZsxqc", + "NodeID-HsSzvb9mMLFcidMWoQYXwHrnG2gZKQtPC", + "NodeID-HuPCfRaLN2cXoXDYyi8GYvjATLDmNJMHs", + "NodeID-HvmgHvMoe15ks8rbGXHyKHvJWDyN5yAGU", + "NodeID-HvxZoKdC58RkD4ZVTYwjVZ3TsiQoTESEP", + "NodeID-Hw7wL2of6tnqntucHPpca6xnnTYZCvBRL", + "NodeID-HwD5fu2NCooHXCPc2GMuNHrhfsThpsQYY", + "NodeID-HwSpUzHSq3ZQC5H6sL9xyze8r9APmFunH", + "NodeID-HwTU12fiGyZoyqaShTAFgTadY2w4DnazT", + "NodeID-Hxx8vTs8kH3stQWgPw1XdHKqpbuD2nMRi", + "NodeID-Hy2aSw3vVNwwQhKas9ETUxtfZgcefsPZD", + "NodeID-HysvJteG6zyScbEu5WKiqAetFXHNrnJHA", + "NodeID-HzSHrBKrycaSu3FMCLwd4WfPrQF2n61tG", + "NodeID-Hzfy8c2s9PyC7CPiUB1PmPckWYs7nj9Tm", + "NodeID-Hzwoqn1Vmj46T1HZzEUNG9kb633SLLHxZ", + "NodeID-J1CD2Nw1eX7iNUVw3iXjFJkUgrSm9qbm6", + "NodeID-J2RTGUXRNDSx2kBU8w7Am6Y7k3LABR5iU", + "NodeID-J7GwUKfHXhxTqgFbuQd4us6yKYdwahHHx", + "NodeID-J7PV3mKZgSJRDy3atopJhh15Y1TqYsySk", + "NodeID-J7PjfVJgpLL3mBMUwpicPqiqDFAmNWhKA", + "NodeID-J7gmHAAEPkSPbNeurMqhg6bknPXWgJgS2", + "NodeID-J7n7bUvog8Zdgj4bCNxRzjzrSKAirk7vi", + "NodeID-J7vWeKgzhtKNy14NLT82Vkz5N6L9sqB38", + "NodeID-JAkZxmCTCkBmscBpSisz5xPFDWcrBbkUC", + "NodeID-JAqrskRRwjmmU8fRp6RAf5wDoEtjXFyXu", + "NodeID-JCT5WWZVaKqcMNDXuuYm3Gpgah9TpKevV", + "NodeID-JDH5gzcB2DTfbEokaFU7EvpLN7E1RHzpH", + "NodeID-JDYxVGLrUb8SHwuu5jZBXNC9rqYWd29PG", + "NodeID-JE8niwmtgpDARKNUqTEdXPQY41uHtb1PN", + "NodeID-JFMBphfETbxfarEkQBHw1RSpfoFhdhUSR", + "NodeID-JGVF16zbVcXqH8ysCQdK2ap38DfHk8ie3", + "NodeID-JH744d6NN5DbtYR3fwuonjNGqUNwhYG5V", + "NodeID-JKCgdbWtCa8BreNP3xftfUuoWGXoy8hWX", + "NodeID-JKxZwnEvK2MAsL9SQ46fyCnTo1ZDkz22D", + "NodeID-JPSnjdvcPjD2NQNaTgC8YP6dhcGeKhse8", + "NodeID-JPvThCax26E5wEh4HSpjhfbfGn8LnvGx8", + "NodeID-JQb8aC4ny7Np8CVAasacpby9moWWzZXzT", + "NodeID-JR96tvzbG8L6xdoGLUWUWhQiGkCfNFP7z", + "NodeID-JR9oQgVHfNBybNVMm64AB2AGvHFCfaaxA", + "NodeID-JRCLnbo62xRaNLhNeNd3HzSHK4dW5iMx4", + "NodeID-JReo9xxdEAX82pAt3berdoi59Pn9km8nV", + "NodeID-JSKsSxV74dyp94CvBKqEKUcEEKBXyNbge", + "NodeID-JUncQvtGY9EVfsfHNHrdsiv4kRfAQSsvu", + "NodeID-JVRaZNCXjBjKqsmrd8grSR9LjFDr1zcgo", + "NodeID-JWBfuYPeFXRkuL1AeW6p2fquqoQTVJsV2", + "NodeID-JWwGze2HXQq7THFQj5zWRmBa5XaMj55jZ", + "NodeID-JYoS8akGmcoeLq4kxpxfsPJxwi3UWeXu2", + "NodeID-JZg54UPtzhHhXoVkX4r6WSTc74cAgdFjY", + "NodeID-JZoTeECdroSJ8sDK3nwpeBz2cYCEPPjiu", + "NodeID-JaS7Df16bwZUSL87wersMaXez7836EH2X", + "NodeID-JbXcSSwYvmnwD85nw8Yro79xaNuAJKxqp", + "NodeID-JbsnKEwxa9J6arKtZ52j4obF7FwvH634k", + "NodeID-JbyG4cewqSwjatqGtg7mDYbcvb299Cb8Z", + "NodeID-JcVg17s5KwKNv2yJtYfBjEFfB6GdHDmXN", + "NodeID-Je9mYMihjzv2oDF3ebvzDKs96zfaXDLHF", + "NodeID-JfXzxLA38XSz8EfYiWDUvsz6Tu8xwhdkF", + "NodeID-Jfx9FcVMQuy1FXGd53EuigXnCEgRQUUMg", + "NodeID-JgEayrUnvk1tvMFS5Qvr6LPNSE9wNYNik", + "NodeID-JgdYonZgwW2LRYGMwcDcHywQ4GmvjJoe2", + "NodeID-JgfyD94R76Wj1KBXWkGpnsSAXLM6f6dnN", + "NodeID-JkjTJNKyJwjEfxdfgQhvrUuf9x3ovpQBs", + "NodeID-Jmvu5u8svgTa716m6xBEcrB6wweP9tX5h", + "NodeID-JnREhSP5nvBcrCY1L3it6odDaHKZDryA6", + "NodeID-Jp4dLMTHd6huttS1jZhqNnBN9ZMNmTmWC", + "NodeID-Jpad4xARtJKgtzo199VoEaGbmYdBYbN39", + "NodeID-JqHkFFzz1tYoGHzPuTsueqF1yKifPiBEG", + "NodeID-JqXD8Tsj1nKXCBEp65ESmFhBrEhPL4WCs", + "NodeID-Jr2wjVqKeLpnyK23Y4EgDpCBf1HJ8XpJg", + "NodeID-JrHvS97Avz8uqX3E1RgG6mCSaiedjWf5E", + "NodeID-JwiikR8FhmnYxxGjywAZeJMFjxfpfD3L3", + "NodeID-JxJxzCNp5c2p447FF24jEDPdHkbB2mhJY", + "NodeID-JxxUAnbxUYQXzTQ8eNeE4jWQt9fTzWd8o", + "NodeID-JyCGTjJaPELmLZdtn5J1HwKHZzki6Ae9W", + "NodeID-JyomXASU2NoPhoGGzQtiVKUAHFpm3NeU7", + "NodeID-Jz2nuAFAfyvgni96rviZWsXxyYYBgSmXf", + "NodeID-Jz9EUhGBC19bzUE7kurEPyoKTRjVMgi3v", + "NodeID-K1E6w1DSaHzZky2aUM4qyQfPYMxF6Mixi", + "NodeID-K2fyNZcrWA4DBUNyFme4pNshAgSw2YSSL", + "NodeID-K3gyeR7GXuuf4coJXD4CxviWpRopysX4j", + "NodeID-K3unRwiJFD4p6dXDHJcdeQn6CaoFiqo2", + "NodeID-K3wvYyc46fLE7sSy3G81uta4wDKVkdKbZ", + "NodeID-K484QXg69NxbC2BUsB7Bgc511t22E6MAs", + "NodeID-K5J2aanpMN3PJLmYT7amdzSGygetejuPQ", + "NodeID-K5ecmXoCnxQPFTwrDxqL63kfdh8X4Z3oJ", + "NodeID-K6qqr2J2JcYZnSMRSqpbVv5ysZfnwMreQ", + "NodeID-K7ZsFPBybHPuD3YbzBpp1fe9QTj81WYSU", + "NodeID-K8EaudGE2PS93rgD6aayqS4dPRfrseWdi", + "NodeID-K8tzccU8StAHuSZfS5trxHKbvA17wixN2", + "NodeID-K8zr3RfHUQuupNJdPDKnbhFhwcD3TTbuD", + "NodeID-KALbip9YkRerN81YxU32ySYk5PbGrkysz", + "NodeID-KAjUQ36dNGmMpw35iavX7i8Cr5XGXwby7", + "NodeID-KB1wcvduXXXkTUw97gWchLs4oXbQK8Gwk", + "NodeID-KB5e5rq9ioGR5uicmKdmgjR5hU7iRUFTz", + "NodeID-KBcYcnQmw47qpb49rKtRr5tZgsxnsdkrZ", + "NodeID-KBvpDcnJ9ztfqA4jiAoMwARqfDaS91xqx", + "NodeID-KC9tasJTftYR2SYdNcPRF6XJGng6gBYcw", + "NodeID-KCQARVq1pjYw31EPSaW9571giKoFH498X", + "NodeID-KCX4L5dr83TbmBnE4UeFtzGeE2NSLskt8", + "NodeID-KDHVpdM7PeMtoVm2mmNNXjFfs93cABSut", + "NodeID-KFhoZ7aZMtAReBrb8KgcKG322q27akhN4", + "NodeID-KGYVGbz1pbyuJpujtJAkXhSY3AtLqBG2Z", + "NodeID-KH8r5RoMBBpy4XHHb9DMrbMLc2a1WJKWC", + "NodeID-KH9WnxfcrJLod4JWXHxESehVTAqE5DoLy", + "NodeID-KHRscCPBynyUHXSz4nXrxmFmoMgUPZBTn", + "NodeID-KHaBbtVbSMHmkqs5SKsdCPCgRbouNcwy1", + "NodeID-KHqznMCanU1tA9DcNLBwJQ7zUkRHC9yDW", + "NodeID-KJLkmhbvSYUqdMybmRScnyo7aDY6X2mYa", + "NodeID-KJfoYJrDRXdmLaaaaRMjNbNKAwqSj3pWT", + "NodeID-KK6ifknXca7VGbqB3d5ccUzKta1zJiVM7", + "NodeID-KKz1wXitLQWsfKSSUPE5JL6Wxw35S2218", + "NodeID-KLPimX5rN51nsSg9UQQahoRvKapDLD1pc", + "NodeID-KLiUDfG672VocEHwbAwGxxFdCHovg1Zp5", + "NodeID-KLsfWtM1cJLWNG21LdwPxaLk83pZo99oV", + "NodeID-KNLkh3KVKFFhBWujmcZ5P3p2fJc3BbdNA", + "NodeID-KNPCT1vLFcbVsFvLzgQqrbCS2kvEyZKBz", + "NodeID-KNm1ZqZXdAMJxggBGJkSs7A4tSwYLAjEb", + "NodeID-KQfghSuT1CskYLvgrQAt6oxUQU7ask2QS", + "NodeID-KQfmZVTmDMvuSEW2kts1rmVNmspyzUwVq", + "NodeID-KRhtytNVPjiSeZMJrqqE1vdHTCoMTHCer", + "NodeID-KU58LYgLuboacvuaNQ1T3VZMSdcTxzFfH", + "NodeID-KUgJ4Tiwfx9di8h6oPCkTEcrBBDiDq4hg", + "NodeID-KVB8F5Gy8adXA2zCg3i3m3qJ23NoCDDiA", + "NodeID-KVF3A8uB916AuexeZWS6eeeMdK2ep2vi2", + "NodeID-KXKwcGbqgdKb5ksmyB13p4oP1r8jmsPTW", + "NodeID-KXizASmXqFZsywyF2n41NEs5y6MBbJaa6", + "NodeID-KXuvoHe9KgzW6KfVNHXW4CUNunzWDDC4M", + "NodeID-KZLSjb8umvkj4nUVdARnY55NBKQcYfVV8", + "NodeID-KZiZi4sS44KZjmGtkeF6r1vs2MNN7p2kZ", + "NodeID-KZnxZckzgz9eQWzB3iuvM5GC2QdJLE5yv", + "NodeID-KbqaQBQVzCb44wByiZX4ZChg7CtmX1pVK", + "NodeID-KdEdrTAZpr9rdfqcpXjJxEbHC1oSWnvZP", + "NodeID-KdxMzeB5ubHBCbk8SqgesC4BAUx7fCT6q", + "NodeID-Keh97V9b6k4QvYt4JewbkabSmGoQ4sA3Q", + "NodeID-KfPXTUa3haWHCr1z91ULmY9Pit4VwUQ9C", + "NodeID-Kfqu9wo9FhcXG1jK9nVwbTrBV3d3kzmKR", + "NodeID-KhvVVixdSrBD6u9q25YYNbayxBnNsw9h", + "NodeID-Kk1UZNLJiyf57AL7vYPHntjPYLBdmTyu6", + "NodeID-KkCG4Sktn8zxkaFRV9bi5RaJxZNu1XyMq", + "NodeID-KkZX7xaSAUZ2sGBwpGWLSfLRER4pzR8Fs", + "NodeID-KqKMF5JYhxjkMVpYWQz8zTiv1N1qm2nSf", + "NodeID-KqmFvyCFfsaTcs2XDquCerCfWx3S6Zhj9", + "NodeID-KriCxu39kfZ99PrLwzX2pDUf2Uh8V796m", + "NodeID-KsSwTshRyF2xV75YDNXcEB5psLaBFHKfr", + "NodeID-KsUkk3D4hSKJGM1WBmeu1yj8Ab7h512X1", + "NodeID-KtJJDa5PVQYChXaNd1fKVmF5LSsBB8NHP", + "NodeID-KtJLagoYnWfw2oBnRki63NcguiGjuA2Cr", + "NodeID-KtKf9AuKyBvofsQE7Jn4eAzAndKHu1f4M", + "NodeID-KtSsjRubDzNuGP6FsXphHmtjWx62KCqvf", + "NodeID-KuGgrUTz4ki5oYkE91wHSdXgGL2Gb5fju", + "NodeID-KuStLqzCMZmLv3NigAtKPt2BEE12q71wQ", + "NodeID-KvBcQpCDXbb3w8x7ktMhsx6rCq8eSYcd3", + "NodeID-KvN79EtxbHdT86o7w9GBQEVeRWZ5woAJx", + "NodeID-KxLfzAQyjR1At4rPWmWYGueRUkxSfbVBF", + "NodeID-KzbTvxZgKCWpKcL62Y2HbB54uuDVNTCSo", + "NodeID-L1RcwxtQjb72GiCABuonnV3PWFZKPTV4P", + "NodeID-L1u2bQdMywnBLHVkrMiAoTSCYJTGSwE9k", + "NodeID-L2zTrwT5JitNvBTbC8L8N8i3ANQ1KyNd7", + "NodeID-L3JrbBLmihsqtaXAPXg7acKQbGa9tjhEc", + "NodeID-L3SyWrZH6uorqgExkkmGuXPx9neqELqXA", + "NodeID-L9D4FZhAGu8xGd6LL6Kn4ZqfpXuXSxTXJ", + "NodeID-L9HsJPQ9TF5aXjdx3KnQrh1eWaHJaNXNj", + "NodeID-LBA1YYYBbxwYMMAA9odGhR69d9SCbyq8v", + "NodeID-LCQBGoE9MtECsDjNLohwAP6L4rXDbSooL", + "NodeID-LCRMLuFKUYvcdAemtKiudy1XNM9oMN5Wv", + "NodeID-LCVUCE4bvtPopRuYvoWwTopRScmpLHTcg", + "NodeID-LCvxVSwts7BQAS5mVuXH8XYGuVJZSRtCn", + "NodeID-LDgwboNU4iVyy5MQgArg2rcDPssiAe6jT", + "NodeID-LDv41AQj1469CJsYDux9cLZepfaAWKzCr", + "NodeID-LF7ktpNDXo1fjHYN6MYHnitMPrHwN3pup", + "NodeID-LFNBojkAbg1az3ys8htBJHmyQ96hYhFzP", + "NodeID-LFg6CHJ6gQKNjcCMoz97BdDwqTH9JDUbG", + "NodeID-LGPn5jkr3MueEHLLt71wCRire43N5rp5u", + "NodeID-LGrCunBRJPWSqZLEw2QTHruuQtZai5P64", + "NodeID-LJBZ5j8beyjdcnVdNk5fwtThNn4LokYpV", + "NodeID-LKbxWKUAE7s5dtGWhm4Cr8hsosDxLsNzu", + "NodeID-LL3tKdbQ9qCDRMm4cif2pGsxGDXsNNzBp", + "NodeID-LLVMxBRYjgvczhD9u9RJDSmZ48bbXdWx5", + "NodeID-LN7tsaTsJaPDTJHjUPUX4ePk2HFBWZVh9", + "NodeID-LP9FxV6iBCayBiupHa4QiPxtqDWNoBoVx", + "NodeID-LPf42gVqz97N6bogZGXJNTWhgBABEe4qE", + "NodeID-LQP4aci528q8z4KMe8Ug3Gh1aHj4yojPX", + "NodeID-LQhwkBnuj2vjE786WcgsGneFVWcjiH6KA", + "NodeID-LRBqzoj2mB8eD4AHE2oCTBziNdssTfxuf", + "NodeID-LTM3DRAzRtWoWHVtB58s5CohXKYexiqVp", + "NodeID-LUFJGzAfqkmUaTDBVScjvdWtrruDkbFvr", + "NodeID-LUhB7mVaTMnkYLQsqf2RV2PUerJfsq2wW", + "NodeID-LUqMj414dv39Z5SNRXRb3bz9YhLJ2oWrS", + "NodeID-LVqsakFrQLyqNiChxcYqsBzKaKFRMiBsN", + "NodeID-LXCSfcmd7p7LroJzJtYoSAhbsawe47CNk", + "NodeID-LXVb4LZEU53c7XCk2v4bU2qacG7ux5dUB", + "NodeID-LXpULpbU1A4AobEzCSBy6wYLEbogwsMK1", + "NodeID-LYXB7GQj7vd4Ju3y6fMnX43wkAKJoEN9b", + "NodeID-LZMN1BDS3u6pe7T5Umjuhg1Z1MahXkK9C", + "NodeID-Lai2VTTYk897ae9uq6cGk9FbhKD1KHvFS", + "NodeID-Lb2X2hJpi6krux6aRpYSG7xcNvCXNg6M7", + "NodeID-LbAkNKxyv3kCs1C2ngkb91STbKpYHKs1q", + "NodeID-LcBGn2kscipZRgqhKqDCA5dq4WmHuuvai", + "NodeID-LcG4rUqdBPdrnBuNQ543C2pHVwfknyMJJ", + "NodeID-LcNseW1dfEL8FnXQB5HidFpzNu452vuGc", + "NodeID-Le5rVEBBPdgYEcEJpvYeKo36hRs75WE5S", + "NodeID-LeZUTECdpTLux94oAXKruxYgz5ppx7tfr", + "NodeID-LeesBWnR4x3zWaDmYTPQHmNhsMwm6KqWK", + "NodeID-Lf7b2RCtkb9mYi2Y5kZ3mJvJNc6MEjVnC", + "NodeID-Lga7mB6EfqgSJv4VTPaUwDjEtq6utiNQH", + "NodeID-Lgjda988AdujVYMYdwuEM4Tc8aopF6DRk", + "NodeID-LhLiUsJSSSpW8SrXCT319enhrYDybuG3H", + "NodeID-LhjS7bvahdBn2BomUUBHHFNbnzLRjri6D", + "NodeID-LhkzuEyUkHR9SW4cMxB55QQRbbGF83zzc", + "NodeID-LjxLvskdhxQ5jAhBKSzKrxU9NQD1DPuKR", + "NodeID-LkDLSLrAW1E7Sga1zng17L1AqrtkyWTGg", + "NodeID-LkTbz4orQbSz9hQsjyY743JUA2v1vPjAf", + "NodeID-LmZ33PJKkQqueDDzPnmVSyHMgFFmaADrb", + "NodeID-LmvNsnX956Es8bWDXqaVm4UTQ5wHC3uf7", + "NodeID-Ln3NxH8SbGS4CukCp9Hs3q7ijKfGqneXz", + "NodeID-LnR5bqTLkmeGjsDKiPdJNSbRPr4pjAURR", + "NodeID-LqLgyDEyMbd1uXfRa6kf8EPjnRhpF4KCF", + "NodeID-LqqUzQs77BPhtbXR7jFBsaNRxdLdfLSnT", + "NodeID-Ltq93eHazD1AtdGNuWb739t4fyvnRLryC", + "NodeID-LvKMfPzfWT1VfAPLef6denZ8hTwSAFMGY", + "NodeID-LvWcDhi9d75T8dwerYcQRoVWD6yDtywFN", + "NodeID-LvpYyT4UY4UAgP4quynTFzffYmjoxqZtR", + "NodeID-LwiNyRJeqRfAGS5phAQvvKhX9xj8LbQ86", + "NodeID-LxrJtyQXpdweZH7LR3ujcEQZJ9VDTW3tC", + "NodeID-LydNf31SmNLztbKL7iNsHXPhnKxz7g3Cw", + "NodeID-LzEWm3dhVaGJeHA3ZzzjaRaAVWVeL4Bi3", + "NodeID-M17yakji8RzPBFJNPXnksjoeabg7BXUJH", + "NodeID-M1SrBrn1szYR6jQJkyrJzd6YXsbAeWSnj", + "NodeID-M1Zsz7o9AiDgUqA6KFE8hrCkvybN6EqcP", + "NodeID-M33qUGE35HbVRppTTMY1qyyfK5T52id2a", + "NodeID-M3VrNcHHd3LGpwm6U6Gw1vfuJHoJzfG7g", + "NodeID-M4AhBqhjwLxGixtjxaLvenUpTe8mxNfhg", + "NodeID-M4FXQatkpbrCWRwdf381tfifDzXz34XUS", + "NodeID-M4ojoWJoEmsVD3vf7KxZ2BnfR5Ue2wr41", + "NodeID-M4psvzu2dS7hQ8gHgCUzXfipVKuFEsoFU", + "NodeID-M51d2sDyE92NjgRaiTnSjSLaG3U9eJ3BD", + "NodeID-M5wUvCymq18DUFjXk9s2UCZmyWzgo9ZtJ", + "NodeID-M64wor3rGStFTBWuzvkGPYYsWUu2dX7S2", + "NodeID-M6tt22qVLnGoaYY4aVUXTpKCCCEiupNHA", + "NodeID-M77VJM2GfeAzueJ6DjumoM5enZma86Zop", + "NodeID-M7GC8ZPn3JACK7sxBXqzeoNW6Q2XZeCpM", + "NodeID-M96ypSznK7ECF4EGd5KfxcxCiboyAZnAQ", + "NodeID-M9LeDgEaTfEN1qnEtLw3CXBwjh2VcGbeG", + "NodeID-M9Q4Fc2StCtFCofjppsy6DC7JDGmtRfUA", + "NodeID-MAg11ijmoZunT94rEdvxhmQToJzgH4bYj", + "NodeID-MBuKQwYxNAttaiugN4eMt1aV7vhLUo7Qh", + "NodeID-MBucJmfSNuMy7CiXWb9Zzd1Gf1yCoKwGy", + "NodeID-MBy2Rde5LJyWunzffJ9FfBj3BVNg1haib", + "NodeID-MEnWKkj5iUC3Z2rNhKuDTm5EVDu2ZUZZo", + "NodeID-MFp5wwuKzSfCNESpdD3avbZBkZ3KMkEeK", + "NodeID-MGJcpxBEXDn2Yk94uTXQwxgmYtuX6Ky99", + "NodeID-MHBrAomZpJhaYjqWKWXSS3YusVo7afBn6", + "NodeID-MJvk5Su2t8dFhcGe6z58Fx8A2QAAZTMGd", + "NodeID-MK6HYMwXj3k3HZfzjdegdnd9p9LQ1dk8x", + "NodeID-MKfE4StupLg6ts5v7u6Drs7HhFf1rHjEj", + "NodeID-MLRgfkWbezKNtcH2ghSyE5J3C4Rcxg5Sy", + "NodeID-MLkE5eQrTGa4BoQSR9QLEZKQB9yPJEnuN", + "NodeID-MMemzZih83vtU6TRK5oiJt6bJpNSWm1uE", + "NodeID-MN3pkk9h9anNMuCpUuXPVxGKw1rUw177u", + "NodeID-MNm7zKhCHwqQ3a3oqqEqDSXM9uVFMcEA7", + "NodeID-MP8NsoqJxPMUfCbw1izND6n4i5pTJJkhS", + "NodeID-MPLzWkWAbT3937j7U1uWfxdTMg3iw3BqY", + "NodeID-MSbDAksgjGPoKE4azGEgvrDj1EK2GG8T7", + "NodeID-MSrtdRQ9bEWLhi5d8YLQtnB9E2biVkkrJ", + "NodeID-MTEbgxTNBdL6GWh8SdQMfMXvEU1jC9aws", + "NodeID-MTQbFuBcjcQ7TuVQjvoXC7hiWAWPkouzA", + "NodeID-MTdVMzfENtZAEGdji5EGgiLsLkG3hV9D3", + "NodeID-MTmtdGgEg6gSEVhG5ShyfReR8ubtcZhbb", + "NodeID-MV2a235PcwYscp7qq8N9M4pYQBqMnfZ6T", + "NodeID-MVRcFZmxaMJWedF4oVqsxT2YYkX3kSuGG", + "NodeID-MVUMPzSBAtrHwTD5SZSNPRVTLkYnDPDz4", + "NodeID-MVayMsZd2VrMvEwFMDreTrauJHGBZu3Xa", + "NodeID-MWqZRLNFJzDPKnEuUHJSpEg56ejrzY56V", + "NodeID-MWwstjwXDrzkd3Zqnfaoa9vWtCpb32Fhd", + "NodeID-MXNBsr8xSHE9CKfmyzvBYyEzxAbLuHTAn", + "NodeID-MXxYFnzdP24DpJmdtUKqzMEN7nh69aYqD", + "NodeID-MZV1PpqVuFPa56t7TmfA8VVdXxDSHnQW7", + "NodeID-MZxjrRvxrjvAdaz6pDS82v65xLwckmiti", + "NodeID-Ma3Ztm2A48bCVjfiSmoZS5MKNzKVTRN7j", + "NodeID-Mb528V8a2cdddEYWmxh7ThDGQoQWhMt73", + "NodeID-McTi7BY4csWEnBAb5B9Z2ojNSk21Nt4NK", + "NodeID-MdQZNCCZN5gTEMVaJkSekNYCZkvp5xp4L", + "NodeID-Mdpo4kYtg8eqn61AhjTrZ51exnHLJmcpA", + "NodeID-Mf3rZ5w3aEtM1bzRBwFqhZYZMwks6FYF5", + "NodeID-MfVDQRZ6jSUmke5aofBuNXruEQunxZ5F7", + "NodeID-MgLEPkYaWQD6XqbvRRMk6CfMWe3Zw3P7S", + "NodeID-MikkG849wcdpBdeUJ2HTV2gTEDfeXWseg", + "NodeID-Mm8qGQPb5tHF96ENH1MuYsYwhA5Nj2DHj", + "NodeID-MmTfUacXsPdKryR3WkVteMqoFvyxEdYf4", + "NodeID-Mn2isVXFR85bfjMHPM1DT8EY8wig7YhTW", + "NodeID-MnQoJXMCgPk3zxKQ7kkYdmoZwj2dcDcmb", + "NodeID-Moej2WBv9BCw1pnPGsm31GYvfyY5BHZCD", + "NodeID-Moz5Jcjj2JoDgay9ScwcRDQReSqX8sjyz", + "NodeID-Mq7hjS5ySKApHMKCGw16YFo1tW7xA5DXm", + "NodeID-Mrh4UKAifNZtd8RJXqokDzajQnAsZuw8Q", + "NodeID-Mss1z2VZYsNSa4LyqD99vXeNd8h3hUMDA", + "NodeID-MtJWp4WidtWe1jzPf5u8hFtxMy6TCmaRX", + "NodeID-MtiHM2m6jKECmfTDBcfCut99JHzFm91T5", + "NodeID-Mu35k31HAXFbEp1SqJr1uUrue9nwqVFe6", + "NodeID-MuEFvXvbZ964sk1rdQKWNtUc5PJAGkWeL", + "NodeID-MvfTRb81PAx9AHYxbz5xACaoG84PM7YQe", + "NodeID-MwZ3XEbAdDciHW65MpvfPjiyS9D68hiKz", + "NodeID-MyaTMxwSKbtrFRRypqr1nWKJ3EK6di3dF", + "NodeID-MyeSQZnEZDzTfFFQvxZ5qqfqVDwg15NPB", + "NodeID-N1aFXom4C5ALmH33bXHuRpPBo7Nwqeu8t", + "NodeID-N21EWyi4Y5D8MNapwZ5tXs9D5A3rrfPi1", + "NodeID-N2nTLuaVwKk8EF3NBsSygCVmpGHwNz2Nz", + "NodeID-N2t1CAS75972obgtRPHwVanMnRF1rRo1B", + "NodeID-N35BjCN6ARikprxERf4WiMeUnh2ZsfwJJ", + "NodeID-N3WnyXmV89jbtQgmjqpoxBxpi5Rah86az", + "NodeID-N3e9W3EngjabGnTZVqyZwunVcbCdrY5Qy", + "NodeID-N4eXezpVzti1ssWvyQKqkuGB15dJNJsp5", + "NodeID-N4r8sLfeDVa2c9eDFpE4wuxJccUku3LKu", + "NodeID-N4vaumNTt2kixxUTtWEBgy4vwsxnuahc2", + "NodeID-N5FWBAn5j1UVxb3nH45N9DyXEAaeZiU4q", + "NodeID-N6c8ZxrQwMWWMGn3GVbZvW3GwC6PyWYMz", + "NodeID-N6z9WSBMoSxLSRtdPDz8jZvasAskYcCrd", + "NodeID-N7FjoUy3LrDi1kSYeaBwZjja6H2Y2NLsN", + "NodeID-N7WPJ9DXvEx8xLJdtDswNh1JdNmCKejSq", + "NodeID-N7iZFQdtUfPRcWGs8DB3JQRZt3Pq33eoZ", + "NodeID-N8MfE7gFMQUdH3wirj6HXkYZeeKd13xAx", + "NodeID-N8tijV4qwWMep4STmLzWJEkqbg6bSdCmn", + "NodeID-N95GoeyiprvxNj6E3G8QvYg3gpQ7x8NKE", + "NodeID-N9S6JJT2VKRZ48vA5vtWgboDnfzXXycQn", + "NodeID-N9c2YTRzGpRNp2J6qyPEEiAvyrViL4cGH", + "NodeID-N9qur4q5HCDb7xK6b89peZ5dKJxEWPdFm", + "NodeID-NAFCCsFtU8WXbgpgbciz6zUzwnRZAE99L", + "NodeID-NBjvikZcadcyCJ2hEJsUn9EcCjys5hav9", + "NodeID-NCVrEpmYjJBHqj9vx9r5usFiMd1PooCfZ", + "NodeID-NDMPc8h9L6keJw8F82NBobc874zZ6bDa6", + "NodeID-NEeK7hrVjQUHCwFQVMTmUaVDr33EH6WAw", + "NodeID-NGspMWkA7FgT6DnDghg4B95pQ468j7DK8", + "NodeID-NGxhFFwufwUSXGHjxdCUZW1hNpodfTrmj", + "NodeID-NHBv1LqcCdrcwatzQ9pGbGXPZQTi462Ur", + "NodeID-NHEESrJJNnyCPn5c5Uyp9EZYHCCkZwzuW", + "NodeID-NJZWC8HxGtLkxjQk5F9KSG44WC3gCm8a9", + "NodeID-NJaSwZPtuiN9TxaFLVF2sk3eNNHGm2UC9", + "NodeID-NKStfwoiZwBnZEFka1rqT54AiZ4CN3ksi", + "NodeID-NLBgQfsmCGFzXDehvwvmzjyfmkWQndx9L", + "NodeID-NMfBW4PDVkVZhJzJa6SiCDZ4M7wAkXbAc", + "NodeID-NN5j35c8DsuMydM5icoxhKePqc7AjwH1K", + "NodeID-NN6iut3SAWZqM56kbg8EhtpTU2NkMJgWV", + "NodeID-NNWrYMQgNFRNbvwAUA8grXiAVVE9VEyV6", + "NodeID-NNrbnhLBjgzBvhi3JXK48ckVFcg6FXVnn", + "NodeID-NNyxDSovcVYmXHacSM8aK2pg67oPjkRrE", + "NodeID-NPexpo2AXQ7RZC3wCKugj9q4y599pSfrb", + "NodeID-NSAi8z5XtoagdADeMNAPuWxfZpzUxacYB", + "NodeID-NSHmxWXYXAvHLdHqNyKZjP8MgSSu1dNnW", + "NodeID-NST51yBNtgVkXWuofyacbR1MbqYJEYdor", + "NodeID-NSUM9tzmeZ1DujarhTwjXAcnCeAH273v3", + "NodeID-NSngpSyxjXz3ErfjsFoc6CpfVPLxApJen", + "NodeID-NTTNYBYdkf3n5U4zrZknt1nst9rHzpuCj", + "NodeID-NTvJb2gZdvYU2kaMUrMbtu79KUMgyJNPW", + "NodeID-NUur4HBKXj8GS5TX11jZhtPHLH6zMa6L5", + "NodeID-NVSSRYMjjYyMkUpt7AHXWnKNTTJCd8UbS", + "NodeID-NVkHd9WCbVAr79r3gtA7AmKBkVkNL9EXb", + "NodeID-NWwQRYczsB8gf2tiiLLrk1foSgNUyrFLe", + "NodeID-NYjgvA2mJy4kTCQeEHnSuycnu5Kgo9gEq", + "NodeID-NZEnXQpa55wAhwpcUZFgSAREXW1FLdkzQ", + "NodeID-NZGsXrgoEuExZdJn8WxmyrLt4Cxb7LFyD", + "NodeID-NaGEfmLttvK5r93aqaoxLKoADVcs2HASy", + "NodeID-NcPHhZ4wgGywviLVxEQ6s2q5NZm6Wgrqb", + "NodeID-NcSLm9skkUrfA9mKGjdtmT163iDjqAw6H", + "NodeID-NcZtrWEjPY7XDT5PHgZbwXLCW3LGBjxui", + "NodeID-NdpTU44ZgiACprPmr9evdh4NS6Knz1PfR", + "NodeID-NfgHajZM47C3CaAXFa1B2YrwZmc4EdSgk", + "NodeID-Nfz4ZGQ8ZPvNQ3yHoRNMT7aTCiFYjMnSA", + "NodeID-NgkksDCWH73BnfgN2ALUBykc2kftsquff", + "NodeID-NhL9H8VSfhrktdjmQQ3P47wYXzRfQwBiy", + "NodeID-NiW2qTm2fSKHGuMqeo1twtM8j7LoAGzHR", + "NodeID-NkYLNRp4S6exbWamVvMzUUpXvHeVEzLR6", + "NodeID-Nom39tjDi1Vf51VxuaZvJiDeQHwbyg6MU", + "NodeID-NovHTxGVasNU2y33YPwmQ39DbYDkXiEfe", + "NodeID-Nr584bLpGgbCUbZFSBaBz3Xum5wpca9Ym", + "NodeID-NrVSfY4zCbPjYTe4XPjmQB7qLJhYW3V6h", + "NodeID-Nrs246dXUZmALw7a2HRTANx6U1LeawsKq", + "NodeID-NsVdtyjprgWXLsvmuWvYe749tx3tdbY55", + "NodeID-Nsdn6eemD7KUpKEaGYjYFuJjxC36c3qXC", + "NodeID-NtDMBzQGPnp7suCbyCMcdE2CFU4JXcvwX", + "NodeID-NtX4Q64xT4Yad8wdhQ4erSG6mi3DPUwzX", + "NodeID-NtgR3kDx7Mm2szCiDTs3rtWaqUDWRMDyY", + "NodeID-NuJhcXgmQMbFJwm9yTTmdvGN86qygnXwP", + "NodeID-NuXW8S95Hvv2eq6JoY1qCDmK1h8yuAAt3", + "NodeID-NudTZgq7DKGQsY1Gkt1v3ZG7pgaWbbBuj", + "NodeID-NvsbNmirter6SLhta1kkvoEjcgvaHxEt9", + "NodeID-Nwr26Q5CPcRfSHQKgQ5y3ayVPgjziWkkT", + "NodeID-NxgyyDw6YGHprTJjZv7qh9X1yoj3yZN7u", + "NodeID-Nz93c8UB78eEVVtfxpcecxHmiy2gZ4iAi", + "NodeID-NzQnRDwTYZJPwYMcDkyNGCnhH7S1HQZgy", + "NodeID-Nza7rHanhSFgJ6m9D8fFWgrx7fqbxp7q9", + "NodeID-NzaVh4SaZh7zaT5uFmHyAT99zRbQhzvvd", + "NodeID-P2EM7MDMhKVo4LLgSqtxYSyc39wfPe2TL", + "NodeID-P2Jp7sd5mv4ZGBqThYfjHkz1EyJUXoMcE", + "NodeID-P2xEUig1YeBTwrUF3Xj52pxaNH6gSHjsb", + "NodeID-P3EhUmKXJ5F2BiZw1TwNftAC1y4XyGufz", + "NodeID-P3budG9yRu9ph2UedCGdf3KaLQKQLehy2", + "NodeID-P4tSY1ZLb4q2xTdYGXCkgwXWeQN7mUUWq", + "NodeID-P5hTsAQgfSLJumMDVDK2773U4gjyPtYnr", + "NodeID-P5q5numCvCNo6GmD8vhDqq1tU25Yok4Ky", + "NodeID-P6M41urqadxF7NBcxEHNFxKkGPkHjC95v", + "NodeID-P9FGdfCp7sHRcUwHERp2hzo98ifUaCZ5x", + "NodeID-P9kf9VSRrgD8B1MQKMsjravHvMHVxvoaZ", + "NodeID-PDZYy4rfhPZWskL32EMt45zuLD2rWjDLs", + "NodeID-PERaNX2PDtAr8zkXgbZvwXHfjq4VKtbew", + "NodeID-PF9fY3h6ZMMqPmTZZUwc7wyLnZ1rSoz5u", + "NodeID-PGonFPDqHgyAWCFaoLxQvAQEz6uJYzZpW", + "NodeID-PH5DLzGXEhe6RMHgod3CH9T3s3WVfQm8Z", + "NodeID-PJA7yH2ZVtccSj4RrB94a8MtHboS9xozZ", + "NodeID-PKiUH3xw8SmVNVJsAqFV1mXwDXToFVATi", + "NodeID-PL7xq98yQ9kTYyGDvVQg3efowKeHug3HN", + "NodeID-PLs2JZXeTtfZvYx4DJqkA3sKXz8ynvKLM", + "NodeID-PPH8k8RGmrTuc6Dun92atkbDkgs3sj6ws", + "NodeID-PPYetyVA4dk3bYdNtvTsy5Cqg4L31EUze", + "NodeID-PPoo9VFqdSWsrZZHrnrBBfi3uE2cw9JgW", + "NodeID-PRxbx3LZSnwvLqFa5audJBsSqAJe9rmsP", + "NodeID-PT45awwLjTTiEqvFufS4nCkVN7wtEsDmf", + "NodeID-PUw5p37RZKVjpK8kxBa12TBtZZdhykGUP", + "NodeID-PVm9UudEC7N6HJ3jus5KJ1Rufck88g1Dt", + "NodeID-PY3QRSGTdYzmPTQEwrH2AmYUMBnrcge9Y", + "NodeID-PY5kT7pHUEZPmh2QK5k6w3H6pKhFGUfq1", + "NodeID-PYFkkThWT79Sc7QqHijAc1yZjSqfXu6Eb", + "NodeID-PYJbbkTHqyuowa3EKkkxeYxRcXUhxehkF", + "NodeID-PYecKsWbPTayLsHDdopKjB1Mw5NP3kDUJ", + "NodeID-PYmftFnrKf6y9k6kXXuB5GWwjLtkP7sLB", + "NodeID-PYwMEam56vM4Nix3AG1vufHZbMvdv8zwy", + "NodeID-PZ66baswBL8xsSHaFVG2Yx7qH8MkBfXSK", + "NodeID-PZvapthv5iRB7txuVNTrKCo2fWovBExLh", + "NodeID-PaSvEqWq5ZESHSuNGTZeQL6sybzsQzoZ5", + "NodeID-PcDGg5a4P3XK1E7PXS5YedYL7vi8vSXvB", + "NodeID-PcN5Zh5GNvUJnoiaqkvRvMYBo4oAMDvTs", + "NodeID-PczCXQXPxNgeKy3P4jtAt1ycvrjvghVw1", + "NodeID-Pd7Hexv94PQNVnEbYXWUttEAAybG34Yiy", + "NodeID-Pe1pUMoFAts11s2kA3F7EPmzLyZwj6Msv", + "NodeID-Pe5xmD9DhTrSCDykMqFdG8CazG3QyJxos", + "NodeID-PfY5X2GWsSaj8y6usEji4HZmokHHdWsqi", + "NodeID-Pfj9qXo6LGR3goGzcLSJsCZwYNFFndV75", + "NodeID-PhB7iV3nDxiotJnAMetF6puzg6fxTqZCP", + "NodeID-PhDmCZH4R8VFjVzQLRBquKMVss5sKukw3", + "NodeID-Phds7RN13xWBmpe9MTPhJde3hajVpUB38", + "NodeID-PmH5X1xavW8PjgvkW1u4K9qTHAbMx36hq", + "NodeID-PmJQ7UjZo5db9XL494EbbEufAbYxqWswU", + "NodeID-PnuVZDXvdSNRbC92ibq5w1FtPvbTFvwmM", + "NodeID-PqPwxtYAt6AmikigAwkRzTQCaeMqPvpff", + "NodeID-PtG5NyNK2mGku7fSkRDAUyaZwtj5hcbFV", + "NodeID-PuF1sG8hh4CLLznER6vf4d1o6bWZc5Vof", + "NodeID-PuYX3u6gfLje9bt6fCDFHkSwqaL8VSpv6", + "NodeID-Puv7ksUTmwsxqEx7XLikRicKtKss6QNNi", + "NodeID-Pv5X1dBk1wfZJwRqm8Aj9QecxvA87r31j", + "NodeID-PvssbRjYWsCmvck5umz9V2qMRXMHPpz9W", + "NodeID-PxXndSoasDvvx9ax2h9bWGb9iF3LcR11h", + "NodeID-PxmdCJV62VecySivang3vVymEBwjPQeTm", + "NodeID-PzEsAB7RCHTFwHszw3yfs99GshYm6Qs5b", + "NodeID-PzQafFPgbCLkvMgrP7nh7aCG1eKypdNi2", + "NodeID-Q2NiN9VSKEYK11GMMFZm1r9vYQ2D7srq1", + "NodeID-Q44XVjVEhgxacoq2ETj7yLBuUra9ru2BA", + "NodeID-Q4mUNVKvwEHF3tkxQcCYbrQA8XWdybLzv", + "NodeID-Q5rNbr5yFwotdbeRPGeTagHP1hbHYuaJg", + "NodeID-Q5xvrPQSjHhJX8eLKbBnZMWbS648NqnsZ", + "NodeID-Q6ZLZ1EifMyNd6evH83FJrVGyauaUSP9S", + "NodeID-QBowbP1jz8zA86CtSzRwsFsxr5q1rY6pB", + "NodeID-QCfTXZ1cdPt5XT1jiJfycHKTFo6Yg164L", + "NodeID-QCkFTaWJg5ixVhZoNbePxzs9HWrTutVy4", + "NodeID-QDLrtBngMA6mFveLNZTXxKQVEYx518fG8", + "NodeID-QDfLjUuR9D5a6G5ZAQdPPbggJxtLnbPj3", + "NodeID-QHX9w6oLs8zjHdBvbzx1ZadHvy9aUNLr5", + "NodeID-QHa12bzXnupukwvFdWkRrRzRkC1Cz9nXd", + "NodeID-QJMNxwTLjdg9C1vY2BKNtapWUuoeCgHFj", + "NodeID-QKGoUvqcgormCoMj6yPw9isY7DX9H4mdd", + "NodeID-QKdEYKG21HJ5DCQQaotjG4rJmcTgjno7f", + "NodeID-QKfhLAkwYPwAZNKTanfuNfmm7cYRTzfYo", + "NodeID-QKn4ujiEcP1n8iFbEJqNsKZUYbUG4fHAc", + "NodeID-QL7j3UxnxJz2HUn6jNdqH7gqcGX5XcPug", + "NodeID-QLC3sD3RU2SkeWhSrzM8tfb5QF3XYgDgr", + "NodeID-QmTPCR33deSWVwZ4Ri2jUw6cDAhoUwZP", + "NodeID-SWPRmNkYP7vP1y1EKnuH78QZNyJ2ZpZM", + "NodeID-TgaMGAEpkXKAisumnnmzzRzVkexbSkB7", + "NodeID-UC1qmKMoMcfShU6idcreEAsxSHs3BDBj", + "NodeID-V3fYZv2vHtLB5gQF548JRnSyk4tZfZ2o", + "NodeID-V5R4XjWDtjs8Zi2g3TRNzngnGJiXm2ic", + "NodeID-VEhPJXs67q8rs8hifAfBxYUgE1s8YKfz", + "NodeID-WAHX8MXekynX6xZSFiSewY9Qxnhq5HZh", + "NodeID-WRz6mTt6LCdSSvuHdqsrBYxi4hdfecTW", + "NodeID-XYAkZN1GfMwcYRRyGLiZsGV2thZbqDGT", + "NodeID-YdCKQdXecSPWqABWcRtPZvBaLd3MPPR8", + "NodeID-ZVLPagHUUrQ2vB7mjsWQ44wNGf5SVKKW", + "NodeID-ZwShn4JT1Bg91QphD3VBLryA88GFnAg5", + "NodeID-aFRSyUykQjScZiWASVqmdTGzBfV9KKj1", + "NodeID-bx2UN8z5nfLMTFphQqnYeyngQnccL6tN", + "NodeID-cme6KToKFLCwqj27F7a7Ys9iC4cLrqMz", + "NodeID-duaNgYtghtCRzj2rkiRDDs51BPX8CCjn", + "NodeID-fr4FFtJ5PF5UZpyA3RyoxhNks6Yfhu7u", + "NodeID-i9uQjR8P5Wzd1wTLeALkZ2oXqKjMKQbs", + "NodeID-iTKqsK6Qhjj9RT2ZA9PVAC96FTiDvK7k", + "NodeID-jB3tEp5oBcHYRLLmNaMgP4v59dyr1a9E", + "NodeID-jjTi6fViUUkPwZQbE9N8Bm7AThmVyjGW", + "NodeID-jz1mR5zZukizoZEZ1Wa17h18Kou1Rfzt", + "NodeID-kCLme7NNnAVrbQGZadzgWwtoPJpkQcJR", + "NodeID-kXTWkRtur8X9vchkcXAiG6qD8PDogo2N", + "NodeID-kZNuQMHhydefgnwjYX1fhHMpRNAs9my1", + "NodeID-krdJFoXmC68zd4pSFCwMnCM4dhiR8gnq", + "NodeID-mEbcN58z2esHFnnXHeR3T2UTfsR5H6WJ", + "NodeID-mfUNFSM9ak3NuCvaiFCtcmEcHF9ptfrU", + "NodeID-mwty8tviuGBvDEG8e6vTQByDZN2MKnNo", + "NodeID-mzhghLeMzv8qrRrK76wABwrf3LrL6mP1", + "NodeID-nJm8Ltnf7MDCiuP3PzU4hv3RqXXKmeqd", + "NodeID-oZyu1tEJrBeFkquoqZ5rYpvsEAkfFiwD", + "NodeID-rKCpkJKNAZeE8sKEHduq8Co7GwrAdTJb", + "NodeID-rS4TWt8iuAbQTsg6ez6u5tXvUzicRzuq", + "NodeID-rU9hLEWJCxRksiPuEYm2qqof6uwWEDeo", + "NodeID-rroLHLixQkZMTsTnePdWhDrGpWenYX7t", + "NodeID-sAyLfHTC8ZQRn5HtfjNKxXDbnZTaJUqa", + "NodeID-tHkVEqdhWPdX2vJGbqDWebw2VPrho4Aq", + "NodeID-uhGb1JoQ5sv9oJaECWYN7H5PabwxehN2", + "NodeID-vV5S1LUtAxCWq9ASiDSjZpX88y2WcFGM", + "NodeID-vrzTEabKUgCBXYg3oHFwJFkR1Np6Avh5", + "NodeID-w6kJAMpdT4hB9jE4NnEfBm47PzD1UXfW", + "NodeID-xHLKdNkbh94JMSFp2vU7fbwEA712vub7", + "NodeID-xQU1Ntg4uYkkSgXivV2ELwr8s8FovRCS", + "NodeID-xVVbmFL3eww4dmBkgQ2akBxcnEy4VyLh", + "NodeID-z3Z8s8xoYQhN8A1fmNZcQx9CyXN6zxJz", + "NodeID-z3rX3CnWLi5KBXQ8nVB4W8NnLNdUBcPD", + "NodeID-zbFM8qH7MnQ8uo6rm4tZaq6vhbF4cpgt" + ] +} \ No newline at end of file diff --git a/go.mod b/go.mod index 13dc82f4bd4e..cf4ca86ee324 100644 --- a/go.mod +++ b/go.mod @@ -1,30 +1,30 @@ module github.com/ava-labs/avalanchego // Changes to the minimum golang version must also be replicated in -// scripts/build_avalanche.sh -// Dockerfile +// CONTRIBUTING.md // README.md -// go.mod (here, only major.minor can be specified) -go 1.21 +// go.mod (here) +go 1.21.11 require ( github.com/DataDog/zstd v1.5.2 - github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/coreth v0.12.11-rc.3.0.20240220045216-6960ed45c201 - github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 + github.com/antithesishq/antithesis-sdk-go v0.3.8 + github.com/ava-labs/coreth v0.13.5-rc.0 + github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 github.com/btcsuite/btcd/btcutil v1.1.3 - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 + github.com/compose-spec/compose-go v1.20.2 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 - github.com/ethereum/go-ethereum v1.12.0 - github.com/golang-jwt/jwt/v4 v4.3.0 + github.com/ethereum/go-ethereum v1.13.8 github.com/google/btree v1.1.2 github.com/google/renameio/v2 v2.0.0 + github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/huin/goupnp v1.0.3 + github.com/huin/goupnp v1.3.0 github.com/jackpal/gateway v1.0.6 github.com/jackpal/go-nat-pmp v1.0.2 github.com/leanovate/gopter v0.2.9 @@ -32,14 +32,14 @@ require ( github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d github.com/onsi/ginkgo/v2 v2.13.1 - github.com/onsi/gomega v1.29.0 github.com/pires/go-proxyproto v0.6.2 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 + github.com/prometheus/common v0.42.0 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cast v1.5.0 - github.com/spf13/cobra v1.0.0 + github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.8.4 @@ -47,109 +47,121 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/thepudds/fzgen v0.4.2 github.com/tyler-smith/go-bip32 v1.0.0 - go.opentelemetry.io/otel v1.11.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 - go.opentelemetry.io/otel/sdk v1.11.0 - go.opentelemetry.io/otel/trace v1.11.0 - go.uber.org/goleak v1.2.1 + go.opentelemetry.io/otel v1.22.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 + go.opentelemetry.io/otel/sdk v1.22.0 + go.opentelemetry.io/otel/trace v1.22.0 + go.uber.org/goleak v1.3.0 go.uber.org/mock v0.4.0 go.uber.org/zap v1.26.0 - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20231127185646-65229373498e - golang.org/x/net v0.19.0 - golang.org/x/sync v0.5.0 - golang.org/x/term v0.15.0 - golang.org/x/time v0.0.0-20220922220347-f3bd1da661af + golang.org/x/net v0.23.0 + golang.org/x/sync v0.6.0 + golang.org/x/term v0.18.0 + golang.org/x/time v0.3.0 gonum.org/v1/gonum v0.11.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 - google.golang.org/grpc v1.58.3 - google.golang.org/protobuf v1.31.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 + google.golang.org/grpc v1.62.0 + google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/BurntSushi/toml v1.2.1 // indirect github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e // indirect github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec // indirect - github.com/VictoriaMetrics/fastcache v1.10.0 // indirect + github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect - github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/consensys/bavard v0.1.13 // indirect + github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 // indirect + github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set/v2 v2.1.0 // indirect + github.com/distribution/reference v0.5.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect - github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect + github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect github.com/frankban/quicktest v1.14.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-stack/stack v1.8.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect - github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c // indirect + github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/klauspost/compress v1.15.15 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect + github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sanity-io/litter v1.5.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect - github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa // indirect + github.com/urfave/cli/v2 v2.25.7 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/zondax/hid v0.9.2 // indirect github.com/zondax/ledger-go v0.14.3 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/sys v0.15.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.16.0 // indirect - google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + golang.org/x/tools v0.17.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 895258dcbaf2..f969c7512b4f 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,8 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= @@ -50,31 +50,27 @@ github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUq github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec h1:1Qb69mGp/UtRPn422BH4/Y4Q3SLUrD9KHuDkm8iodFc= github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= -github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= +github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= +github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= +github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/coreth v0.12.11-rc.3.0.20240220045216-6960ed45c201 h1:mNQmcCQpl1wlC6Fgw6awK1hJaYnmEWk/r8u+UBVv8fw= -github.com/ava-labs/coreth v0.12.11-rc.3.0.20240220045216-6960ed45c201/go.mod h1:lQsgG8/6Lp6tJICedwdFmy9LT2NFNqGpPGQCo21y08Q= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 h1:mg9Uw6oZFJKytJxgxnl3uxZOs/SB8CVHg6Io4Tf99Zc= -github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= +github.com/ava-labs/coreth v0.13.5-rc.0 h1:PJQbR9o2RrW3j9ba4r1glXnmM2PNAP3xR569+gMcBd0= +github.com/ava-labs/coreth v0.13.5-rc.0/go.mod h1:cm5c12xo5NiTgtbmeduv8i2nYdzgkczz9Wm3yiwwTRU= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95 h1:dOVbtdnZL++pENdTCNZ1nu41eYDQkTML4sWebDnnq8c= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20240610153809-9c955cc90a95/go.mod h1:pJxaT9bUgeRNVmNRgtCHb7sFDIRKy7CzTQVi8gGNT6g= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= @@ -99,14 +95,11 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= -github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -121,33 +114,37 @@ github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q1 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A= +github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= +github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/compose-spec/compose-go v1.20.2 h1:u/yfZHn4EaHGdidrZycWpxXgFffjYULlTbRfJ51ykjQ= +github.com/compose-spec/compose-go v1.20.2/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM= +github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 h1:d28BXYi+wUpz1KBmiF9bWrjEMacUEREV6MBi2ODnrfQ= +github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs= +github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA= +github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -163,15 +160,19 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -182,12 +183,12 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= -github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= +github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= +github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= +github.com/ethereum/go-ethereum v1.13.8 h1:1od+thJel3tM52ZUNQwvpYOeRHlbkVFZ5S8fhi0Lgsg= +github.com/ethereum/go-ethereum v1.13.8/go.mod h1:sc48XYQxCzH3fG9BcrXCOOgQk2JfZzNAmIKnceogzsA= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= @@ -202,10 +203,11 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= +github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46/go.mod h1:QNpY22eby74jVhqH4WhDLDwxc/vqsern6pW+u2kbkpc= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -215,12 +217,9 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -228,9 +227,6 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= -github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -239,20 +235,14 @@ github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= -github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -279,8 +269,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -299,12 +289,13 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -324,9 +315,10 @@ github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8q github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -335,18 +327,13 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -356,14 +343,15 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= +github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= -github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= +github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -382,21 +370,18 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= @@ -405,9 +390,7 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -436,12 +419,15 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= @@ -453,6 +439,9 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -460,7 +449,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= @@ -470,7 +458,6 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96d github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -492,6 +479,8 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -502,7 +491,6 @@ github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTw github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8= github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -510,26 +498,17 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= @@ -539,7 +518,6 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -550,12 +528,10 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= @@ -563,8 +539,8 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -572,13 +548,11 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -602,11 +576,10 @@ github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5f github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/thepudds/fzgen v0.4.2 h1:HlEHl5hk2/cqEomf2uK5SA/FeJc12s/vIHmOG+FbACw= github.com/thepudds/fzgen v0.4.2/go.mod h1:kHCWdsv5tdnt32NIHYDdgq083m6bMtaY0M+ipiO9xWE= -github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= -github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= -github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tyler-smith/go-bip32 v1.0.0 h1:sDR9juArbUgX+bO/iblgZnMPeWY1KZMUC2AFUJdv5KE= github.com/tyler-smith/go-bip32 v1.0.0/go.mod h1:onot+eHknzV4BVPwrzqY5OoVpyCvnwD7lMawL5aQupE= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= @@ -615,8 +588,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= -github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -626,7 +599,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= @@ -646,44 +618,38 @@ github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= -go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 h1:0dly5et1i/6Th3WHn0M6kYiJfFNzhhxanrJ0bOfnjEo= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0/go.mod h1:+Lq4/WkdCkjbGcBMVHHg2apTbv8oMBf29QCnyCCJjNQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 h1:eyJ6njZmH16h9dOKCi7lMswAnGsSOwgTqWzfxqcuNr8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0/go.mod h1:FnDp7XemjN3oZ3xGunnfOUTVwd2XcvLbtRAuOSU3oc8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 h1:j2RFV0Qdt38XQ2Jvi4WIsQ56w8T7eSirYbMw19VXRDg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0/go.mod h1:pILgiTEtrqvZpoiuGdblDgS5dbIaTgDrkIuKfEFkt+A= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 h1:v29I/NbVp7LXQYMFZhU6q17D0jSEbYOAVONlrO1oH5s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0/go.mod h1:/RpLsmbQLDO1XCbWAM4S6TSwj8FKwwgyKKyqtvVfAnw= -go.opentelemetry.io/otel/sdk v1.11.0 h1:ZnKIL9V9Ztaq+ME43IUi/eo22mNsb6a7tGfzaOWB5fo= -go.opentelemetry.io/otel/sdk v1.11.0/go.mod h1:REusa8RsyKaq0OlyangWXaw97t2VogoO4SSEeKkSTAk= -go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= -go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -696,8 +662,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -736,13 +702,10 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -751,7 +714,6 @@ golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -784,8 +746,8 @@ golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -795,7 +757,6 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -808,13 +769,10 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -831,7 +789,6 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -858,9 +815,7 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -873,18 +828,21 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -901,9 +859,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -959,8 +916,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1018,7 +975,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -1033,17 +989,15 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -1055,16 +1009,12 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1077,10 +1027,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1096,13 +1044,10 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1112,6 +1057,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1124,3 +1071,5 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/indexer/codec.go b/indexer/codec.go index afde47502ac3..795a59461914 100644 --- a/indexer/codec.go +++ b/indexer/codec.go @@ -5,7 +5,6 @@ package indexer import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -16,7 +15,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt) if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { diff --git a/indexer/examples/p-chain/main.go b/indexer/examples/p-chain/main.go index b690ebf0efb9..8c2a17c86360 100644 --- a/indexer/examples/p-chain/main.go +++ b/indexer/examples/p-chain/main.go @@ -9,7 +9,7 @@ import ( "time" "github.com/ava-labs/avalanchego/indexer" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/wallet/subnet/primary" platformvmblock "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -29,12 +29,12 @@ func main() { container, err := client.GetContainerByIndex(ctx, nextIndex) if err != nil { time.Sleep(time.Second) - log.Printf("polling for next accepted block\n") + log.Println("polling for next accepted block") continue } platformvmBlockBytes := container.Bytes - proposerVMBlock, err := proposervmblock.Parse(container.Bytes, version.DefaultUpgradeTime) + proposerVMBlock, err := proposervmblock.Parse(container.Bytes, constants.PlatformChainID) if err == nil { platformvmBlockBytes = proposerVMBlock.Block() } diff --git a/indexer/examples/x-chain-blocks/main.go b/indexer/examples/x-chain-blocks/main.go index 2687e5a03c68..5b57d4d35e47 100644 --- a/indexer/examples/x-chain-blocks/main.go +++ b/indexer/examples/x-chain-blocks/main.go @@ -8,10 +8,10 @@ import ( "log" "time" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/wallet/chain/x" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary" ) @@ -20,6 +20,7 @@ import ( func main() { var ( uri = primary.LocalAPIURI + "/ext/index/X/block" + xChainID = ids.FromStringOrPanic("2eNy1mUFdmaxXNj1eQHUe7Np4gju9sJsEtWQ4MX3ToiNKuADed") client = indexer.NewClient(uri) ctx = context.Background() nextIndex uint64 @@ -28,17 +29,17 @@ func main() { container, err := client.GetContainerByIndex(ctx, nextIndex) if err != nil { time.Sleep(time.Second) - log.Printf("polling for next accepted block\n") + log.Println("polling for next accepted block") continue } - proposerVMBlock, err := block.Parse(container.Bytes, version.DefaultUpgradeTime) + proposerVMBlock, err := block.Parse(container.Bytes, xChainID) if err != nil { log.Fatalf("failed to parse proposervm block: %s\n", err) } avmBlockBytes := proposerVMBlock.Block() - avmBlock, err := x.Parser.ParseBlock(avmBlockBytes) + avmBlock, err := builder.Parser.ParseBlock(avmBlockBytes) if err != nil { log.Fatalf("failed to parse avm block: %s\n", err) } diff --git a/indexer/service.md b/indexer/service.md new file mode 100644 index 000000000000..416cc4e2055b --- /dev/null +++ b/indexer/service.md @@ -0,0 +1,584 @@ +--- +tags: [AvalancheGo APIs] +description: This page is an overview of the Index API associated with AvalancheGo. +sidebar_label: Index API +pagination_label: Index API +--- + +# Index API + +AvalancheGo can be configured to run with an indexer. That is, it saves (indexes) every container (a +block, vertex or transaction) it accepts on the X-Chain, P-Chain and C-Chain. To run AvalancheGo +with indexing enabled, set command line flag +[--index-enabled](/nodes/configure/avalanchego-config-flags.md#apis) to true. **AvalancheGo +will only index containers that are accepted when running with `--index-enabled` set to true.** To +ensure your node has a complete index, run a node with a fresh database and `--index-enabled` set to +true. The node will accept every block, vertex and transaction in the network history during +bootstrapping, ensuring your index is complete. It is OK to turn off your node if it is running with +indexing enabled. If it restarts with indexing still enabled, it will accept all containers that +were accepted while it was offline. The indexer should never fail to index an accepted block, vertex +or transaction. + +Indexed containers (that is, accepted blocks, vertices and transactions) are timestamped with the +time at which the node accepted that container. Note that if the container was indexed during +bootstrapping, other nodes may have accepted the container much earlier. Every container indexed +during bootstrapping will be timestamped with the time at which the node bootstrapped, not when it +was first accepted by the network. + +If `--index-enabled` is changed to `false` from `true`, AvalancheGo won't start as doing so would +cause a previously complete index to become incomplete, unless the user explicitly says to do so +with `--index-allow-incomplete`. This protects you from accidentally running with indexing disabled, +after previously running with it enabled, which would result in an incomplete index. + +This document shows how to query data from AvalancheGo's Index API. The Index API is only available +when running with `--index-enabled`. + +## Go Client + +There is a Go implementation of an Index API client. See documentation +[here](https://pkg.go.dev/github.com/ava-labs/avalanchego/indexer#Client). This client can be used +inside a Go program to connect to an AvalancheGo node that is running with the Index API enabled and +make calls to the Index API. + +## Format + +This API uses the `json 2.0` RPC format. For more information on making JSON RPC calls, see +[here](/reference/standards/guides/issuing-api-calls.md). + +## Endpoints + +Each chain has one or more index. To see if a C-Chain block is accepted, for example, send an API +call to the C-Chain block index. To see if an X-Chain vertex is accepted, for example, send an API +call to the X-Chain vertex index. + +### C-Chain Blocks + +```text +/ext/index/C/block +``` + +### P-Chain Blocks + +```text +/ext/index/P/block +``` + +### X-Chain Transactions + +```text +/ext/index/X/tx +``` + +### X-Chain Blocks + +```text +/ext/index/X/block +``` + +:::caution + +To ensure historical data can be accessed, the `/ext/index/X/vtx` is still accessible, +even though it is no longer populated with chain data since the Cortina activation. +If you are using `V1.10.0` or higher, you need to migrate to using the `/ext/index/X/block` endpoint. + +::: + +## Methods + +### `index.getContainerByID` + +Get container by ID. + +**Signature:** + +```sh +index.getContainerByID({ + id: string, + encoding: string +}) -> { + id: string, + bytes: string, + timestamp: string, + encoding: string, + index: string +} +``` + +**Request:** + +- `id` is the container's ID +- `encoding` is `"hex"` only. + +**Response:** + +- `id` is the container's ID +- `bytes` is the byte representation of the container +- `timestamp` is the time at which this node accepted the container +- `encoding` is `"hex"` only. +- `index` is how many containers were accepted in this index before this one + +**Example Call:** + +```sh +curl --location --request POST 'localhost:9650/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.getContainerByID", + "params": { + "id": "6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "encoding":"hex" + }, + "id": 1 +}' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "id": "6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "bytes": "0x00000000000400003039d891ad56056d9c01f18f43f58b5c784ad07a4a49cf3d1f11623804b5cba2c6bf00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000070429ccc5c5eb3b80000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000050429d069189e0000000000010000000000000000c85fc1980a77c5da78fe5486233fc09a769bb812bcb2cc548cf9495d046b3f1b00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000007000003a352a38240000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000100000009000000011cdb75d4e0b0aeaba2ebc1ef208373fedc1ebbb498f8385ad6fb537211d1523a70d903b884da77d963d56f163191295589329b5710113234934d0fd59c01676b00b63d2108", + "timestamp": "2021-04-02T15:34:00.262979-07:00", + "encoding": "hex", + "index": "0" + } +} +``` + +### `index.getContainerByIndex` + +Get container by index. The first container accepted is at index 0, the second is at index 1, etc. + +**Signature:** + +```sh +index.getContainerByIndex({ + index: uint64, + encoding: string +}) -> { + id: string, + bytes: string, + timestamp: string, + encoding: string, + index: string +} +``` + +**Request:** + +- `index` is how many containers were accepted in this index before this one +- `encoding` is `"hex"` only. + +**Response:** + +- `id` is the container's ID +- `bytes` is the byte representation of the container +- `timestamp` is the time at which this node accepted the container +- `index` is how many containers were accepted in this index before this one +- `encoding` is `"hex"` only. + +**Example Call:** + +```sh +curl --location --request POST 'localhost:9650/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.getContainerByIndex", + "params": { + "index":0, + "encoding": "hex" + }, + "id": 1 +}' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "id": "6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "bytes": "0x00000000000400003039d891ad56056d9c01f18f43f58b5c784ad07a4a49cf3d1f11623804b5cba2c6bf00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000070429ccc5c5eb3b80000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000050429d069189e0000000000010000000000000000c85fc1980a77c5da78fe5486233fc09a769bb812bcb2cc548cf9495d046b3f1b00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000007000003a352a38240000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000100000009000000011cdb75d4e0b0aeaba2ebc1ef208373fedc1ebbb498f8385ad6fb537211d1523a70d903b884da77d963d56f163191295589329b5710113234934d0fd59c01676b00b63d2108", + "timestamp": "2021-04-02T15:34:00.262979-07:00", + "encoding": "hex", + "index": "0" + } +} +``` + +### `index.getContainerRange` + +Returns the transactions at index [`startIndex`], [`startIndex+1`], ... , [`startIndex+n-1`] + +- If [`n`] == 0, returns an empty response (for example: null). +- If [`startIndex`] > the last accepted index, returns an error (unless the above apply.) +- If [`n`] > [`MaxFetchedByRange`], returns an error. +- If we run out of transactions, returns the ones fetched before running out. +- `numToFetch` must be in `[0,1024]`. + +**Signature:** + +```sh +index.getContainerRange({ + startIndex: uint64, + numToFetch: uint64, + encoding: string +}) -> []{ + id: string, + bytes: string, + timestamp: string, + encoding: string, + index: string +} +``` + +**Request:** + +- `startIndex` is the beginning index +- `numToFetch` is the number of containers to fetch +- `encoding` is `"hex"` only. + +**Response:** + +- `id` is the container's ID +- `bytes` is the byte representation of the container +- `timestamp` is the time at which this node accepted the container +- `encoding` is `"hex"` only. +- `index` is how many containers were accepted in this index before this one + +**Example Call:** + +```sh +curl --location --request POST 'localhost:9650/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.getContainerRange", + "params": { + "startIndex":0, + "numToFetch":100, + "encoding": "hex" + }, + "id": 1 +}' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": [ + { + "id": "6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "bytes": "0x00000000000400003039d891ad56056d9c01f18f43f58b5c784ad07a4a49cf3d1f11623804b5cba2c6bf00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000070429ccc5c5eb3b80000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000050429d069189e0000000000010000000000000000c85fc1980a77c5da78fe5486233fc09a769bb812bcb2cc548cf9495d046b3f1b00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000007000003a352a38240000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000100000009000000011cdb75d4e0b0aeaba2ebc1ef208373fedc1ebbb498f8385ad6fb537211d1523a70d903b884da77d963d56f163191295589329b5710113234934d0fd59c01676b00b63d2108", + "timestamp": "2021-04-02T15:34:00.262979-07:00", + "encoding": "hex", + "index": "0" + } + ] +} +``` + +### `index.getIndex` + +Get a container's index. + +**Signature:** + +```sh +index.getIndex({ + id: string, + encoding: string +}) -> { + index: string +} +``` + +**Request:** + +- `id` is the ID of the container to fetch +- `encoding` is `"hex"` only. + +**Response:** + +- `index` is how many containers were accepted in this index before this one + +**Example Call:** + +```sh +curl --location --request POST 'localhost:9650/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.getIndex", + "params": { + "id":"6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "encoding": "hex" + }, + "id": 1 +}' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "index": "0" + }, + "id": 1 +} +``` + +### `index.getLastAccepted` + +Get the most recently accepted container. + +**Signature:** + +```sh +index.getLastAccepted({ + encoding:string +}) -> { + id: string, + bytes: string, + timestamp: string, + encoding: string, + index: string +} +``` + +**Request:** + +- `encoding` is `"hex"` only. + +**Response:** + +- `id` is the container's ID +- `bytes` is the byte representation of the container +- `timestamp` is the time at which this node accepted the container +- `encoding` is `"hex"` only. + +**Example Call:** + +```sh +curl --location --request POST 'localhost:9650/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.getLastAccepted", + "params": { + "encoding": "hex" + }, + "id": 1 +}' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "id": "6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "bytes": "0x00000000000400003039d891ad56056d9c01f18f43f58b5c784ad07a4a49cf3d1f11623804b5cba2c6bf00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000070429ccc5c5eb3b80000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db000000050429d069189e0000000000010000000000000000c85fc1980a77c5da78fe5486233fc09a769bb812bcb2cc548cf9495d046b3f1b00000001dbcf890f77f49b96857648b72b77f9f82937f28a68704af05da0dc12ba53f2db00000007000003a352a38240000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c0000000100000009000000011cdb75d4e0b0aeaba2ebc1ef208373fedc1ebbb498f8385ad6fb537211d1523a70d903b884da77d963d56f163191295589329b5710113234934d0fd59c01676b00b63d2108", + "timestamp": "2021-04-02T15:34:00.262979-07:00", + "encoding": "hex", + "index": "0" + } +} +``` + +### `index.isAccepted` + +Returns true if the container is in this index. + +**Signature:** + +```sh +index.isAccepted({ + id: string, + encoding: string +}) -> { + isAccepted: bool +} +``` + +**Request:** + +- `id` is the ID of the container to fetch +- `encoding` is `"hex"` only. + +**Response:** + +- `isAccepted` displays if the container has been accepted + +**Example Call:** + +```sh +curl --location --request POST 'localhost:9650/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.isAccepted", + "params": { + "id":"6fXf5hncR8LXvwtM8iezFQBpK5cubV6y1dWgpJCcNyzGB1EzY", + "encoding": "hex" + }, + "id": 1 +}' +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "isAccepted": true + }, + "id": 1 +} +``` + +## Example: Iterating Through X-Chain Transaction + +Here is an example of how to iterate through all transactions on the X-Chain. + +:::warning +To help users to try out this example and other index APIs, we have set up a testing +indexer node located at [https://indexer-demo.avax.network](https://indexer-demo.avax.network). This +indexer node is not for production use. We may change or shut it down at any time without notice. +::: + +You can use the Index API to get the ID of every transaction that has been accepted on the X-Chain, +and use the X-Chain API method `avm.getTx` to get a human-readable representation of the +transaction. + +To get an X-Chain transaction by its index (the order it was accepted in), use Index API method +[index.getlastaccepted](#indexgetlastaccepted). + +For example, to get the _second_ transaction (note that `"index":1`) accepted on the X-Chain, do: + +```sh +curl --location --request POST 'https://indexer-demo.avax.network/ext/index/X/tx' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "jsonrpc": "2.0", + "method": "index.getContainerByIndex", + "params": { + "encoding":"hex", + "index":1 + }, + "id": 1 +}' +``` + +This returns the ID of the second transaction accepted in the X-Chain's history. To get the third +transaction on the X-Chain, use `"index":2`, and so on. + +The above API call gives the response below: + +```json +{ + "jsonrpc": "2.0", + "result": { + "id": "ZGYTSU8w3zUP6VFseGC798vA2Vnxnfj6fz1QPfA9N93bhjJvo", + "bytes": "0x00000000000000000001ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b0000000221e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000070000000129f6afc0000000000000000000000001000000017416792e228a765c65e2d76d28ab5a16d18c342f21e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000700000222afa575c00000000000000000000000010000000187d6a6dd3cd7740c8b13a410bea39b01fa83bb3e000000016f375c785edb28d52edb59b54035c96c198e9d80f5f5f5eee070592fe9465b8d0000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff0000000500000223d9ab67c0000000010000000000000000000000010000000900000001beb83d3d29f1247efb4a3a1141ab5c966f46f946f9c943b9bc19f858bd416d10060c23d5d9c7db3a0da23446b97cd9cf9f8e61df98e1b1692d764c84a686f5f801a8da6e40", + "timestamp": "2021-11-04T00:42:55.01643414Z", + "encoding": "hex", + "index": "1" + }, + "id": 1 +} +``` + +The ID of this transaction is `ZGYTSU8w3zUP6VFseGC798vA2Vnxnfj6fz1QPfA9N93bhjJvo`. + +To get the transaction by its ID, use API method `avm.getTx`: + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.getTx", + "params" :{ + "txID":"ZGYTSU8w3zUP6VFseGC798vA2Vnxnfj6fz1QPfA9N93bhjJvo", + "encoding": "json" + } +}' -H 'content-type:application/json;' https://api.avax.network/ext/bc/X +``` + +Response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "tx": { + "unsignedTx": { + "networkID": 1, + "blockchainID": "2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM", + "outputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": ["X-avax1wst8jt3z3fm9ce0z6akj3266zmgccdp03hjlaj"], + "amount": 4999000000, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": ["X-avax1slt2dhfu6a6qezcn5sgtagumq8ag8we75f84sw"], + "amount": 2347999000000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "qysTYUMCWdsR3MctzyfXiSvoSf6evbeFGRLLzA4j2BjNXTknh", + "outputIndex": 0, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 2352999000000, + "signatureIndices": [0] + } + } + ], + "memo": "0x" + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "0xbeb83d3d29f1247efb4a3a1141ab5c966f46f946f9c943b9bc19f858bd416d10060c23d5d9c7db3a0da23446b97cd9cf9f8e61df98e1b1692d764c84a686f5f801" + ] + } + } + ] + }, + "encoding": "json" + }, + "id": 1 +} +``` diff --git a/ipcs/chainipc.go b/ipcs/chainipc.go deleted file mode 100644 index fc8e230e5f9a..000000000000 --- a/ipcs/chainipc.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ipcs - -import ( - "fmt" - "path/filepath" - - "go.uber.org/zap" - "golang.org/x/exp/maps" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -const ( - // DefaultBaseURL can be used as a reasonable default value for the base URL - DefaultBaseURL = "/tmp" - - ipcIdentifierPrefix = "ipc" - ipcConsensusIdentifier = "consensus" - ipcDecisionsIdentifier = "decisions" -) - -type context struct { - log logging.Logger - networkID uint32 - path string -} - -// ChainIPCs maintains IPCs for a set of chains -type ChainIPCs struct { - context - chains map[ids.ID]*EventSockets - blockAcceptorGroup snow.AcceptorGroup - txAcceptorGroup snow.AcceptorGroup - vertexAcceptorGroup snow.AcceptorGroup -} - -// NewChainIPCs creates a new *ChainIPCs that writes consensus and decision -// events to IPC sockets -func NewChainIPCs( - log logging.Logger, - path string, - networkID uint32, - blockAcceptorGroup snow.AcceptorGroup, - txAcceptorGroup snow.AcceptorGroup, - vertexAcceptorGroup snow.AcceptorGroup, - defaultChainIDs []ids.ID, -) (*ChainIPCs, error) { - cipcs := &ChainIPCs{ - context: context{ - log: log, - networkID: networkID, - path: path, - }, - chains: make(map[ids.ID]*EventSockets), - blockAcceptorGroup: blockAcceptorGroup, - txAcceptorGroup: txAcceptorGroup, - vertexAcceptorGroup: vertexAcceptorGroup, - } - for _, chainID := range defaultChainIDs { - if _, err := cipcs.Publish(chainID); err != nil { - return nil, err - } - } - return cipcs, nil -} - -// Publish creates a set of eventSockets for the given chainID -func (cipcs *ChainIPCs) Publish(chainID ids.ID) (*EventSockets, error) { - if es, ok := cipcs.chains[chainID]; ok { - cipcs.log.Info("returning existing event sockets", - zap.Stringer("blockchainID", chainID), - ) - return es, nil - } - - es, err := newEventSockets( - cipcs.context, - chainID, - cipcs.blockAcceptorGroup, - cipcs.txAcceptorGroup, - cipcs.vertexAcceptorGroup, - ) - if err != nil { - cipcs.log.Error("can't create ipcs", - zap.Error(err), - ) - return nil, err - } - - cipcs.chains[chainID] = es - cipcs.log.Info("created IPC sockets", - zap.Stringer("blockchainID", chainID), - zap.String("consensusURL", es.ConsensusURL()), - zap.String("decisionsURL", es.DecisionsURL()), - ) - return es, nil -} - -// Unpublish stops the eventSocket for the given chain if it exists. It returns -// whether or not the socket existed and errors when trying to close it -func (cipcs *ChainIPCs) Unpublish(chainID ids.ID) (bool, error) { - chainIPCs, ok := cipcs.chains[chainID] - if !ok { - return false, nil - } - delete(cipcs.chains, chainID) - return true, chainIPCs.stop() -} - -// GetPublishedBlockchains returns the chains that are currently being published -func (cipcs *ChainIPCs) GetPublishedBlockchains() []ids.ID { - return maps.Keys(cipcs.chains) -} - -func (cipcs *ChainIPCs) Shutdown() error { - cipcs.log.Info("shutting down chain IPCs") - - errs := wrappers.Errs{} - for _, ch := range cipcs.chains { - errs.Add(ch.stop()) - } - return errs.Err -} - -func ipcURL(ctx context, chainID ids.ID, eventType string) string { - return filepath.Join(ctx.path, fmt.Sprintf("%d-%s-%s", ctx.networkID, chainID.String(), eventType)) -} diff --git a/ipcs/eventsocket.go b/ipcs/eventsocket.go deleted file mode 100644 index 0dbbe1c92e5a..000000000000 --- a/ipcs/eventsocket.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ipcs - -import ( - "errors" - "os" - "syscall" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/ipcs/socket" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var _ snow.Acceptor = (*EventSockets)(nil) - -// EventSockets is a set of named eventSockets -type EventSockets struct { - consensusSocket *eventSocket - decisionsSocket *eventSocket -} - -// newEventSockets creates a *ChainIPCs with both consensus and decisions IPCs -func newEventSockets( - ctx context, - chainID ids.ID, - blockAcceptorGroup snow.AcceptorGroup, - txAcceptorGroup snow.AcceptorGroup, - vertexAcceptorGroup snow.AcceptorGroup, -) (*EventSockets, error) { - consensusIPC, err := newEventIPCSocket( - ctx, - chainID, - ipcConsensusIdentifier, - blockAcceptorGroup, - vertexAcceptorGroup, - ) - if err != nil { - return nil, err - } - - decisionsIPC, err := newEventIPCSocket( - ctx, - chainID, - ipcDecisionsIdentifier, - blockAcceptorGroup, - txAcceptorGroup, - ) - if err != nil { - return nil, err - } - - return &EventSockets{ - consensusSocket: consensusIPC, - decisionsSocket: decisionsIPC, - }, nil -} - -// Accept delivers a message to the underlying eventSockets -func (ipcs *EventSockets) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { - if ipcs.consensusSocket != nil { - if err := ipcs.consensusSocket.Accept(ctx, containerID, container); err != nil { - return err - } - } - - if ipcs.decisionsSocket != nil { - if err := ipcs.decisionsSocket.Accept(ctx, containerID, container); err != nil { - return err - } - } - - return nil -} - -// stop closes the underlying eventSockets -func (ipcs *EventSockets) stop() error { - errs := wrappers.Errs{} - - if ipcs.consensusSocket != nil { - errs.Add(ipcs.consensusSocket.stop()) - } - - if ipcs.decisionsSocket != nil { - errs.Add(ipcs.decisionsSocket.stop()) - } - - return errs.Err -} - -// ConsensusURL returns the URL of socket receiving consensus events -func (ipcs *EventSockets) ConsensusURL() string { - return ipcs.consensusSocket.URL() -} - -// DecisionsURL returns the URL of socket receiving decisions events -func (ipcs *EventSockets) DecisionsURL() string { - return ipcs.decisionsSocket.URL() -} - -// eventSocket is a single IPC socket for a single chain -type eventSocket struct { - url string - log logging.Logger - socket *socket.Socket - unregisterFn func() error -} - -// newEventIPCSocket creates a *eventSocket for the given chain and -// EventDispatcher that writes to a local IPC socket -func newEventIPCSocket( - ctx context, - chainID ids.ID, - name string, - snowmanAcceptorGroup snow.AcceptorGroup, - avalancheAcceptorGroup snow.AcceptorGroup, -) (*eventSocket, error) { - var ( - url = ipcURL(ctx, chainID, name) - ipcName = ipcIdentifierPrefix + "-" + name - ) - - err := os.Remove(url) - if err != nil && !errors.Is(err, syscall.ENOENT) { - return nil, err - } - - eis := &eventSocket{ - log: ctx.log, - url: url, - socket: socket.NewSocket(url, ctx.log), - unregisterFn: func() error { - return utils.Err( - snowmanAcceptorGroup.DeregisterAcceptor(chainID, ipcName), - avalancheAcceptorGroup.DeregisterAcceptor(chainID, ipcName), - ) - }, - } - - if err := eis.socket.Listen(); err != nil { - if err := eis.socket.Close(); err != nil { - return nil, err - } - return nil, err - } - - if err := snowmanAcceptorGroup.RegisterAcceptor(chainID, ipcName, eis, false); err != nil { - if err := eis.stop(); err != nil { - return nil, err - } - return nil, err - } - - if err := avalancheAcceptorGroup.RegisterAcceptor(chainID, ipcName, eis, false); err != nil { - if err := eis.stop(); err != nil { - return nil, err - } - return nil, err - } - - return eis, nil -} - -// Accept delivers a message to the eventSocket -func (eis *eventSocket) Accept(_ *snow.ConsensusContext, _ ids.ID, container []byte) error { - eis.socket.Send(container) - return nil -} - -// stop unregisters the event handler and closes the eventSocket -func (eis *eventSocket) stop() error { - eis.log.Info("closing Chain IPC") - return utils.Err( - eis.unregisterFn(), - eis.socket.Close(), - ) -} - -// URL returns the URL of the socket -func (eis *eventSocket) URL() string { - return eis.url -} diff --git a/ipcs/socket/socket.go b/ipcs/socket/socket.go deleted file mode 100644 index 77f2d6fdb8e2..000000000000 --- a/ipcs/socket/socket.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package socket - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "os" - "sync" - "sync/atomic" - "syscall" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var ( - // ErrMessageTooLarge is returned when reading a message that is larger than - // our max size - ErrMessageTooLarge = errors.New("message too large") - - _ error = errReadTimeout{} -) - -// Socket manages sending messages over a socket to many subscribed clients -type Socket struct { - log logging.Logger - addr string - accept acceptFn - connLock *sync.RWMutex - conns map[net.Conn]struct{} - quitCh chan struct{} - doneCh chan struct{} - listener net.Listener // the current listener -} - -// NewSocket creates a new socket object for the given address. It does not open -// the socket until Listen is called. -func NewSocket(addr string, log logging.Logger) *Socket { - return &Socket{ - log: log, - addr: addr, - accept: accept, - connLock: &sync.RWMutex{}, - conns: map[net.Conn]struct{}{}, - quitCh: make(chan struct{}), - doneCh: make(chan struct{}), - } -} - -// Listen starts listening on the socket for new connection -func (s *Socket) Listen() error { - l, err := listen(s.addr) - if err != nil { - return err - } - s.listener = l - - // Start a loop that accepts new connections until told to quit - go func() { - for { - select { - case <-s.quitCh: - close(s.doneCh) - return - default: - s.accept(s, l) - } - } - }() - - return nil -} - -// Send writes the given message to all connection clients -func (s *Socket) Send(msg []byte) { - var conns []net.Conn - - // Get a copy of connections - s.connLock.RLock() - if len(s.conns) > 0 { - conns = make([]net.Conn, len(s.conns)) - i := 0 - for conn := range s.conns { - conns[i] = conn - i++ - } - } - s.connLock.RUnlock() - - // Write to each connection - if len(conns) == 0 { - return - } - - // Prefix the message with an 8 byte length - lenBytes := [8]byte{} - binary.BigEndian.PutUint64(lenBytes[:], uint64(len(msg))) - for _, conn := range conns { - for _, byteSlice := range [][]byte{lenBytes[:], msg} { - if _, err := conn.Write(byteSlice); err != nil { - s.removeConn(conn) - s.log.Debug("failed to write message", - zap.Stringer("remoteAddress", conn.RemoteAddr()), - zap.Error(err), - ) - } - } - } -} - -// Close closes the socket by cutting off new connections, closing all -// existing ones, and then zero'ing out the connection pool -func (s *Socket) Close() error { - // Signal to the event loop to stop and wait for it to signal back - close(s.quitCh) - - listener := s.listener - s.listener = nil - - // close the listener to break the loop - err := listener.Close() - - <-s.doneCh - - // Zero out the connection pool but save a reference so we can close them all - s.connLock.Lock() - conns := s.conns - s.conns = nil - s.connLock.Unlock() - - // Close all connections that were open at the time of shutdown - errs := wrappers.Errs{Err: err} - for conn := range conns { - if conn != nil { - errs.Add(conn.Close()) - } - } - return errs.Err -} - -func (s *Socket) Running() bool { - return s.listener != nil -} - -func (s *Socket) removeConn(c net.Conn) { - s.connLock.Lock() - delete(s.conns, c) - s.connLock.Unlock() -} - -// Client is a read-only connection to a socket -type Client struct { - net.Conn - maxMessageSize int64 -} - -// Recv waits for a message from the socket. It's guaranteed to either return a -// complete message or an error -func (c *Client) Recv() ([]byte, error) { - // Read length - var sz uint64 - if err := binary.Read(c.Conn, binary.BigEndian, &sz); err != nil { - if isTimeoutError(err) { - return nil, errReadTimeout{c.Conn.RemoteAddr()} - } - return nil, err - } - - if sz > uint64(atomic.LoadInt64(&c.maxMessageSize)) { - return nil, ErrMessageTooLarge - } - - // Create buffer for entire message and read it all in - msg := make([]byte, sz) - if _, err := io.ReadFull(c.Conn, msg); err != nil { - if isTimeoutError(err) { - return nil, errReadTimeout{c.Conn.RemoteAddr()} - } - return nil, err - } - - return msg, nil -} - -// SetMaxMessageSize sets the maximum size to allow for messages -func (c *Client) SetMaxMessageSize(s int64) { - atomic.StoreInt64(&c.maxMessageSize, s) -} - -// Close closes the underlying socket connection -func (c *Client) Close() error { - return c.Conn.Close() -} - -// errReadTimeout is returned a socket read times out -type errReadTimeout struct { - addr net.Addr -} - -func (e errReadTimeout) Error() string { - return fmt.Sprintf("read from %s timed out", e.addr) -} - -// acceptFn takes accepts connections from a Listener and gives them to a Socket -type acceptFn func(*Socket, net.Listener) - -// accept is the default acceptFn for sockets. It accepts the next connection -// from the given listener and adds it to the Socket's connection list -func accept(s *Socket, l net.Listener) { - conn, err := l.Accept() - if err != nil { - if !s.Running() { - return - } - s.log.Error("socket accept error", - zap.Error(err), - ) - } - if conn, ok := conn.(*net.TCPConn); ok { - if err := conn.SetLinger(0); err != nil { - s.log.Warn("failed to set no linger", - zap.Error(err), - ) - } - if err := conn.SetNoDelay(true); err != nil { - s.log.Warn("failed to set socket nodelay", - zap.Error(err), - ) - } - } - s.connLock.Lock() - s.conns[conn] = struct{}{} - s.connLock.Unlock() -} - -// isTimeoutError checks if an error is a timeout as per the net.Error interface -func isTimeoutError(err error) bool { - iErr, ok := err.(net.Error) - if !ok { - return false - } - return iErr.Timeout() -} - -// isSyscallError checks if an error is one of the given syscall.Errno codes -func isSyscallError(err error, codes ...syscall.Errno) bool { - opErr, ok := err.(*net.OpError) - if !ok { - return false - } - syscallErr, ok := opErr.Err.(*os.SyscallError) - if !ok { - return false - } - errno, ok := syscallErr.Err.(syscall.Errno) - if !ok { - return false - } - for _, code := range codes { - if errno == code { - return true - } - } - return false -} diff --git a/ipcs/socket/socket_test.go b/ipcs/socket/socket_test.go deleted file mode 100644 index a2c1ec638754..000000000000 --- a/ipcs/socket/socket_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package socket - -import ( - "net" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" -) - -func TestSocketSendAndReceive(t *testing.T) { - require := require.New(t) - - var ( - connCh chan net.Conn - socketName = "/tmp/pipe-test.sock" - msg = append([]byte("avalanche"), make([]byte, 1000000)...) - msgLen = int64(len(msg)) - ) - - // Create socket and client; wait for client to connect - socket := NewSocket(socketName, logging.NoLog{}) - socket.accept, connCh = newTestAcceptFn(t) - require.NoError(socket.Listen()) - - client, err := Dial(socketName) - require.NoError(err) - <-connCh - - // Start sending in the background - go func() { - for { - socket.Send(msg) - } - }() - - // Receive message and compare it to what was sent - receivedMsg, err := client.Recv() - require.NoError(err) - require.Equal(msg, receivedMsg) - - // Test max message size - client.SetMaxMessageSize(msgLen) - _, err = client.Recv() - require.NoError(err) - - client.SetMaxMessageSize(msgLen - 1) - _, err = client.Recv() - require.ErrorIs(err, ErrMessageTooLarge) -} - -// newTestAcceptFn creates a new acceptFn and a channel that receives all new -// connections -func newTestAcceptFn(t *testing.T) (acceptFn, chan net.Conn) { - connCh := make(chan net.Conn) - - return func(s *Socket, l net.Listener) { - conn, err := l.Accept() - require.NoError(t, err) - - s.connLock.Lock() - s.conns[conn] = struct{}{} - s.connLock.Unlock() - - connCh <- conn - }, connCh -} diff --git a/ipcs/socket/socket_unix.go b/ipcs/socket/socket_unix.go deleted file mode 100644 index 14d5aabde4e3..000000000000 --- a/ipcs/socket/socket_unix.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -//go:build !windows && !plan9 && !js -// +build !windows,!plan9,!js - -package socket - -import ( - "net" - "os" - "syscall" - "time" - - "github.com/ava-labs/avalanchego/utils/constants" -) - -var staleSocketTimeout = 100 * time.Millisecond - -func listen(addr string) (net.Listener, error) { - uAddr, err := net.ResolveUnixAddr("unix", addr) - if err != nil { - return nil, err - } - - // Try to listen on the socket. - l, err := net.ListenUnix("unix", uAddr) - if err == nil { - return l, nil - } - - // Check to see if the socket is stale and remove it if it is. - if err := removeIfStaleUnixSocket(addr); err != nil { - return nil, err - } - - // Try listening again now that it shouldn't be stale. - return net.ListenUnix("unix", uAddr) -} - -// Dial creates a new *Client connected to the given address over a Unix socket -func Dial(addr string) (*Client, error) { - unixAddr, err := net.ResolveUnixAddr("unix", addr) - if err != nil { - return nil, err - } - - c, err := net.DialUnix("unix", nil, unixAddr) - if err != nil { - if isTimeoutError(err) { - return nil, errReadTimeout{c.RemoteAddr()} - } - return nil, err - } - - return &Client{Conn: c, maxMessageSize: int64(constants.DefaultMaxMessageSize)}, nil -} - -// removeIfStaleUnixSocket takes in a path and removes it iff it is a socket -// that is refusing connections -func removeIfStaleUnixSocket(socketPath string) error { - // Ensure it's a socket; if not return without an error - st, err := os.Stat(socketPath) - if err != nil { - return nil - } - if st.Mode()&os.ModeType != os.ModeSocket { - return nil - } - - // Try to connect - conn, err := net.DialTimeout("unix", socketPath, staleSocketTimeout) - switch { - // The connection was refused so this socket is stale; remove it - case isSyscallError(err, syscall.ECONNREFUSED): - return os.Remove(socketPath) - - // The socket is alive so close this connection and leave the socket alone - case err == nil: - return conn.Close() - - default: - return nil - } -} diff --git a/ipcs/socket/socket_windows.go b/ipcs/socket/socket_windows.go deleted file mode 100644 index 99590cb674b4..000000000000 --- a/ipcs/socket/socket_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -//go:build windows -// +build windows - -package socket - -import ( - "net" - - "github.com/Microsoft/go-winio" - - "github.com/ava-labs/avalanchego/utils/constants" -) - -// listen creates a net.Listen backed by a Windows named pipe -func listen(addr string) (net.Listener, error) { - return winio.ListenPipe(windowsPipeName(addr), nil) -} - -// Dial creates a new *Client connected to a Windows named pipe -func Dial(addr string) (*Client, error) { - c, err := winio.DialPipe(windowsPipeName(addr), nil) - if err != nil { - return nil, err - } - return &Client{Conn: c, maxMessageSize: int64(constants.DefaultMaxMessageSize)}, nil -} - -// windowsPipeName turns an address into a valid Windows named pipes name -func windowsPipeName(addr string) string { - return `\\.\pipe\` + addr -} diff --git a/main/main.go b/main/main.go index 5651bf61940f..88de72dadbbe 100644 --- a/main/main.go +++ b/main/main.go @@ -4,6 +4,7 @@ package main import ( + "encoding/json" "errors" "fmt" "os" @@ -29,8 +30,24 @@ func main() { os.Exit(1) } + if v.GetBool(config.VersionJSONKey) && v.GetBool(config.VersionKey) { + fmt.Println("can't print both JSON and human readable versions") + os.Exit(1) + } + + if v.GetBool(config.VersionJSONKey) { + versions := version.GetVersions() + jsonBytes, err := json.MarshalIndent(versions, "", " ") + if err != nil { + fmt.Printf("couldn't marshal versions: %s\n", err) + os.Exit(1) + } + fmt.Println(string(jsonBytes)) + os.Exit(0) + } + if v.GetBool(config.VersionKey) { - fmt.Print(version.String) + fmt.Println(version.GetVersions().String()) os.Exit(0) } diff --git a/message/creator.go b/message/creator.go index 8040bccb1861..86c9af1f1076 100644 --- a/message/creator.go +++ b/message/creator.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" ) var _ Creator = (*creator)(nil) @@ -28,14 +27,11 @@ type creator struct { func NewCreator( log logging.Logger, metrics prometheus.Registerer, - parentNamespace string, compressionType compression.Type, maxMessageTimeout time.Duration, ) (Creator, error) { - namespace := metric.AppendNamespace(parentNamespace, "codec") builder, err := newMsgBuilder( log, - namespace, metrics, maxMessageTimeout, ) diff --git a/message/fields.go b/message/fields.go index 08e744fab911..453315f4323a 100644 --- a/message/fields.go +++ b/message/fields.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" - "github.com/ava-labs/avalanchego/utils/constants" ) var ( @@ -52,13 +51,7 @@ var ( _ requestIDGetter = (*p2p.AppRequest)(nil) _ requestIDGetter = (*p2p.AppResponse)(nil) - _ engineTypeGetter = (*p2p.GetAcceptedFrontier)(nil) - _ engineTypeGetter = (*p2p.GetAccepted)(nil) _ engineTypeGetter = (*p2p.GetAncestors)(nil) - _ engineTypeGetter = (*p2p.Get)(nil) - _ engineTypeGetter = (*p2p.Put)(nil) - _ engineTypeGetter = (*p2p.PushQuery)(nil) - _ engineTypeGetter = (*p2p.PullQuery)(nil) _ deadlineGetter = (*p2p.GetStateSummaryFrontier)(nil) _ deadlineGetter = (*p2p.GetAcceptedStateSummary)(nil) @@ -106,14 +99,10 @@ func GetRequestID(m any) (uint32, bool) { return requestID, true } - // AppGossip is the only message currently not containing a requestID - // Here we assign the requestID already in use for gossiped containers - // to allow a uniform handling of all messages - if _, ok := m.(*p2p.AppGossip); ok { - return constants.GossipMsgRequestID, true - } - - return 0, false + // AppGossip is the only inbound message not containing a requestID. For + // ease of handling, imagine that it does have a requestID. + _, ok := m.(*p2p.AppGossip) + return 0, ok } type engineTypeGetter interface { diff --git a/message/inbound_msg_builder.go b/message/inbound_msg_builder.go index b32dbc5d480d..b7b6048d89c8 100644 --- a/message/inbound_msg_builder.go +++ b/message/inbound_msg_builder.go @@ -117,16 +117,14 @@ func InboundGetAcceptedFrontier( requestID uint32, deadline time.Duration, nodeID ids.NodeID, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, op: GetAcceptedFrontierOp, message: &p2p.GetAcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), }, expiration: time.Now().Add(deadline), } @@ -156,7 +154,6 @@ func InboundGetAccepted( deadline time.Duration, containerIDs []ids.ID, nodeID ids.NodeID, - engineType p2p.EngineType, ) InboundMessage { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) @@ -168,7 +165,6 @@ func InboundGetAccepted( RequestId: requestID, Deadline: uint64(deadline), ContainerIds: containerIDBytes, - EngineType: engineType, }, expiration: time.Now().Add(deadline), } @@ -201,7 +197,6 @@ func InboundPushQuery( container []byte, requestedHeight uint64, nodeID ids.NodeID, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, @@ -212,7 +207,6 @@ func InboundPushQuery( Deadline: uint64(deadline), Container: container, RequestedHeight: requestedHeight, - EngineType: engineType, }, expiration: time.Now().Add(deadline), } @@ -225,7 +219,6 @@ func InboundPullQuery( containerID ids.ID, requestedHeight uint64, nodeID ids.NodeID, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, @@ -236,7 +229,6 @@ func InboundPullQuery( Deadline: uint64(deadline), ContainerId: containerID[:], RequestedHeight: requestedHeight, - EngineType: engineType, }, expiration: time.Now().Add(deadline), } diff --git a/message/inbound_msg_builder_test.go b/message/inbound_msg_builder_test.go index 37f713a387bf..92d18b6836be 100644 --- a/message/inbound_msg_builder_test.go +++ b/message/inbound_msg_builder_test.go @@ -23,7 +23,6 @@ func Test_newMsgBuilder(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 10*time.Second, ) @@ -45,7 +44,6 @@ func TestInboundMsgBuilder(t *testing.T) { acceptedContainerID = ids.GenerateTestID() summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} heights = []uint64{1000, 2000} - engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) t.Run( @@ -162,7 +160,6 @@ func TestInboundMsgBuilder(t *testing.T) { requestID, deadline, nodeID, - engineType, ) end := time.Now() @@ -174,7 +171,6 @@ func TestInboundMsgBuilder(t *testing.T) { innerMsg := msg.Message().(*p2p.GetAcceptedFrontier) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) - require.Equal(engineType, innerMsg.EngineType) }, ) @@ -213,7 +209,6 @@ func TestInboundMsgBuilder(t *testing.T) { deadline, containerIDs, nodeID, - engineType, ) end := time.Now() @@ -225,7 +220,6 @@ func TestInboundMsgBuilder(t *testing.T) { innerMsg := msg.Message().(*p2p.GetAccepted) require.Equal(chainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) - require.Equal(engineType, innerMsg.EngineType) }, ) @@ -270,7 +264,6 @@ func TestInboundMsgBuilder(t *testing.T) { container, requestedHeight, nodeID, - engineType, ) end := time.Now() @@ -284,7 +277,6 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(requestID, innerMsg.RequestId) require.Equal(container, innerMsg.Container) require.Equal(requestedHeight, innerMsg.RequestedHeight) - require.Equal(engineType, innerMsg.EngineType) }, ) @@ -301,7 +293,6 @@ func TestInboundMsgBuilder(t *testing.T) { containerIDs[0], requestedHeight, nodeID, - engineType, ) end := time.Now() @@ -315,7 +306,6 @@ func TestInboundMsgBuilder(t *testing.T) { require.Equal(requestID, innerMsg.RequestId) require.Equal(containerIDs[0][:], innerMsg.ContainerId) require.Equal(requestedHeight, innerMsg.RequestedHeight) - require.Equal(engineType, innerMsg.EngineType) }, ) @@ -402,7 +392,6 @@ func TestAppError(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "", prometheus.NewRegistry(), time.Second, ) diff --git a/message/internal_msg_builder.go b/message/internal_msg_builder.go index 38a95cb78a8c..141dabaae414 100644 --- a/message/internal_msg_builder.go +++ b/message/internal_msg_builder.go @@ -27,30 +27,26 @@ var ( _ chainIDGetter = (*GetAcceptedStateSummaryFailed)(nil) _ requestIDGetter = (*GetAcceptedStateSummaryFailed)(nil) - _ fmt.Stringer = (*GetAcceptedFrontierFailed)(nil) - _ chainIDGetter = (*GetAcceptedFrontierFailed)(nil) - _ requestIDGetter = (*GetAcceptedFrontierFailed)(nil) - _ engineTypeGetter = (*GetAcceptedFrontierFailed)(nil) + _ fmt.Stringer = (*GetAcceptedFrontierFailed)(nil) + _ chainIDGetter = (*GetAcceptedFrontierFailed)(nil) + _ requestIDGetter = (*GetAcceptedFrontierFailed)(nil) - _ fmt.Stringer = (*GetAcceptedFailed)(nil) - _ chainIDGetter = (*GetAcceptedFailed)(nil) - _ requestIDGetter = (*GetAcceptedFailed)(nil) - _ engineTypeGetter = (*GetAcceptedFailed)(nil) + _ fmt.Stringer = (*GetAcceptedFailed)(nil) + _ chainIDGetter = (*GetAcceptedFailed)(nil) + _ requestIDGetter = (*GetAcceptedFailed)(nil) _ fmt.Stringer = (*GetAncestorsFailed)(nil) _ chainIDGetter = (*GetAncestorsFailed)(nil) _ requestIDGetter = (*GetAncestorsFailed)(nil) _ engineTypeGetter = (*GetAncestorsFailed)(nil) - _ fmt.Stringer = (*GetFailed)(nil) - _ chainIDGetter = (*GetFailed)(nil) - _ requestIDGetter = (*GetFailed)(nil) - _ engineTypeGetter = (*GetFailed)(nil) + _ fmt.Stringer = (*GetFailed)(nil) + _ chainIDGetter = (*GetFailed)(nil) + _ requestIDGetter = (*GetFailed)(nil) - _ fmt.Stringer = (*QueryFailed)(nil) - _ chainIDGetter = (*QueryFailed)(nil) - _ requestIDGetter = (*QueryFailed)(nil) - _ engineTypeGetter = (*QueryFailed)(nil) + _ fmt.Stringer = (*QueryFailed)(nil) + _ chainIDGetter = (*QueryFailed)(nil) + _ requestIDGetter = (*QueryFailed)(nil) _ fmt.Stringer = (*CrossChainAppRequest)(nil) _ sourceChainIDGetter = (*CrossChainAppRequest)(nil) @@ -147,15 +143,14 @@ func InternalGetAcceptedStateSummaryFailed( } type GetAcceptedFrontierFailed struct { - ChainID ids.ID `json:"chain_id,omitempty"` - RequestID uint32 `json:"request_id,omitempty"` - EngineType p2p.EngineType `json:"engine_type,omitempty"` + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` } func (m *GetAcceptedFrontierFailed) String() string { return fmt.Sprintf( - "ChainID: %s RequestID: %d EngineType: %s", - m.ChainID, m.RequestID, m.EngineType, + "ChainID: %s RequestID: %d", + m.ChainID, m.RequestID, ) } @@ -167,38 +162,31 @@ func (m *GetAcceptedFrontierFailed) GetRequestId() uint32 { return m.RequestID } -func (m *GetAcceptedFrontierFailed) GetEngineType() p2p.EngineType { - return m.EngineType -} - func InternalGetAcceptedFrontierFailed( nodeID ids.NodeID, chainID ids.ID, requestID uint32, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, op: GetAcceptedFrontierFailedOp, message: &GetAcceptedFrontierFailed{ - ChainID: chainID, - RequestID: requestID, - EngineType: engineType, + ChainID: chainID, + RequestID: requestID, }, expiration: mockable.MaxTime, } } type GetAcceptedFailed struct { - ChainID ids.ID `json:"chain_id,omitempty"` - RequestID uint32 `json:"request_id,omitempty"` - EngineType p2p.EngineType `json:"engine_type,omitempty"` + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` } func (m *GetAcceptedFailed) String() string { return fmt.Sprintf( - "ChainID: %s RequestID: %d EngineType: %s", - m.ChainID, m.RequestID, m.EngineType, + "ChainID: %s RequestID: %d", + m.ChainID, m.RequestID, ) } @@ -210,23 +198,17 @@ func (m *GetAcceptedFailed) GetRequestId() uint32 { return m.RequestID } -func (m *GetAcceptedFailed) GetEngineType() p2p.EngineType { - return m.EngineType -} - func InternalGetAcceptedFailed( nodeID ids.NodeID, chainID ids.ID, requestID uint32, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, op: GetAcceptedFailedOp, message: &GetAcceptedFailed{ - ChainID: chainID, - RequestID: requestID, - EngineType: engineType, + ChainID: chainID, + RequestID: requestID, }, expiration: mockable.MaxTime, } @@ -276,15 +258,14 @@ func InternalGetAncestorsFailed( } type GetFailed struct { - ChainID ids.ID `json:"chain_id,omitempty"` - RequestID uint32 `json:"request_id,omitempty"` - EngineType p2p.EngineType `json:"engine_type,omitempty"` + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` } func (m *GetFailed) String() string { return fmt.Sprintf( - "ChainID: %s RequestID: %d EngineType: %s", - m.ChainID, m.RequestID, m.EngineType, + "ChainID: %s RequestID: %d", + m.ChainID, m.RequestID, ) } @@ -296,38 +277,31 @@ func (m *GetFailed) GetRequestId() uint32 { return m.RequestID } -func (m *GetFailed) GetEngineType() p2p.EngineType { - return m.EngineType -} - func InternalGetFailed( nodeID ids.NodeID, chainID ids.ID, requestID uint32, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, op: GetFailedOp, message: &GetFailed{ - ChainID: chainID, - RequestID: requestID, - EngineType: engineType, + ChainID: chainID, + RequestID: requestID, }, expiration: mockable.MaxTime, } } type QueryFailed struct { - ChainID ids.ID `json:"chain_id,omitempty"` - RequestID uint32 `json:"request_id,omitempty"` - EngineType p2p.EngineType `json:"engine_type,omitempty"` + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` } func (m *QueryFailed) String() string { return fmt.Sprintf( - "ChainID: %s RequestID: %d EngineType: %s", - m.ChainID, m.RequestID, m.EngineType, + "ChainID: %s RequestID: %d", + m.ChainID, m.RequestID, ) } @@ -339,23 +313,17 @@ func (m *QueryFailed) GetRequestId() uint32 { return m.RequestID } -func (m *QueryFailed) GetEngineType() p2p.EngineType { - return m.EngineType -} - func InternalQueryFailed( nodeID ids.NodeID, chainID ids.ID, requestID uint32, - engineType p2p.EngineType, ) InboundMessage { return &inboundMessage{ nodeID: nodeID, op: QueryFailedOp, message: &QueryFailed{ - ChainID: chainID, - RequestID: requestID, - EngineType: engineType, + ChainID: chainID, + RequestID: requestID, }, expiration: mockable.MaxTime, } diff --git a/message/messages.go b/message/messages.go index aef0577b60d1..06ef3125d69a 100644 --- a/message/messages.go +++ b/message/messages.go @@ -9,23 +9,32 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" +) + +const ( + typeLabel = "type" + opLabel = "op" + directionLabel = "direction" + + compressionLabel = "compression" + decompressionLabel = "decompression" ) var ( _ InboundMessage = (*inboundMessage)(nil) _ OutboundMessage = (*outboundMessage)(nil) + metricLabels = []string{typeLabel, opLabel, directionLabel} + errUnknownCompressionType = errors.New("message is compressed with an unknown compression type") ) @@ -131,27 +140,18 @@ func (m *outboundMessage) BytesSavedCompression() int { type msgBuilder struct { log logging.Logger - // TODO: Remove gzip once v1.11.x is out. - gzipCompressor compression.Compressor - gzipDecompressTimeMetrics map[Op]metric.Averager - - zstdCompressor compression.Compressor - zstdCompressTimeMetrics map[Op]metric.Averager - zstdDecompressTimeMetrics map[Op]metric.Averager + zstdCompressor compression.Compressor + count *prometheus.CounterVec // type + op + direction + duration *prometheus.GaugeVec // type + op + direction maxMessageTimeout time.Duration } func newMsgBuilder( log logging.Logger, - namespace string, metrics prometheus.Registerer, maxMessageTimeout time.Duration, ) (*msgBuilder, error) { - gzipCompressor, err := compression.NewGzipCompressor(constants.DefaultMaxMessageSize) - if err != nil { - return nil, err - } zstdCompressor, err := compression.NewZstdCompressor(constants.DefaultMaxMessageSize) if err != nil { return nil, err @@ -160,41 +160,28 @@ func newMsgBuilder( mb := &msgBuilder{ log: log, - gzipCompressor: gzipCompressor, - gzipDecompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), - - zstdCompressor: zstdCompressor, - zstdCompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), - zstdDecompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), + zstdCompressor: zstdCompressor, + count: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "codec_compressed_count", + Help: "number of compressed messages", + }, + metricLabels, + ), + duration: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "codec_compressed_duration", + Help: "time spent handling compressed messages", + }, + metricLabels, + ), maxMessageTimeout: maxMessageTimeout, } - - errs := wrappers.Errs{} - for _, op := range ExternalOps { - mb.gzipDecompressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("gzip_%s_decompress_time", op), - fmt.Sprintf("time (in ns) to decompress %s messages with gzip", op), - metrics, - &errs, - ) - mb.zstdCompressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("zstd_%s_compress_time", op), - fmt.Sprintf("time (in ns) to compress %s messages with zstd", op), - metrics, - &errs, - ) - mb.zstdDecompressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("zstd_%s_decompress_time", op), - fmt.Sprintf("time (in ns) to decompress %s messages with zstd", op), - metrics, - &errs, - ) - } - return mb, errs.Err + return mb, utils.Err( + metrics.Register(mb.count), + metrics.Register(mb.duration), + ) } func (mb *msgBuilder) marshal( @@ -218,9 +205,8 @@ func (mb *msgBuilder) marshal( // This recursive packing allows us to avoid an extra compression on/off // field in the message. var ( - startTime = time.Now() - compressedMsg p2p.Message - opToCompressTimeMetrics map[Op]metric.Averager + startTime = time.Now() + compressedMsg p2p.Message ) switch compressionType { case compression.TypeNone: @@ -235,7 +221,6 @@ func (mb *msgBuilder) marshal( CompressedZstd: compressedBytes, }, } - opToCompressTimeMetrics = mb.zstdCompressTimeMetrics default: return nil, 0, 0, errUnknownCompressionType } @@ -246,15 +231,13 @@ func (mb *msgBuilder) marshal( } compressTook := time.Since(startTime) - if compressTimeMetric, ok := opToCompressTimeMetrics[op]; ok { - compressTimeMetric.Observe(float64(compressTook)) - } else { - // Should never happen - mb.log.Warn("no compression metric found for op", - zap.Stringer("op", op), - zap.Stringer("compressionType", compressionType), - ) + labels := prometheus.Labels{ + typeLabel: compressionType.String(), + opLabel: op.String(), + directionLabel: compressionLabel, } + mb.count.With(labels).Inc() + mb.duration.With(labels).Add(float64(compressTook)) bytesSaved := len(uncompressedMsgBytes) - len(compressedMsgBytes) return compressedMsgBytes, bytesSaved, op, nil @@ -268,19 +251,12 @@ func (mb *msgBuilder) unmarshal(b []byte) (*p2p.Message, int, Op, error) { // Figure out what compression type, if any, was used to compress the message. var ( - opToDecompressTimeMetrics map[Op]metric.Averager - compressor compression.Compressor - compressedBytes []byte - gzipCompressed = m.GetCompressedGzip() - zstdCompressed = m.GetCompressedZstd() + compressor compression.Compressor + compressedBytes []byte + zstdCompressed = m.GetCompressedZstd() ) switch { - case len(gzipCompressed) > 0: - opToDecompressTimeMetrics = mb.gzipDecompressTimeMetrics - compressor = mb.gzipCompressor - compressedBytes = gzipCompressed case len(zstdCompressed) > 0: - opToDecompressTimeMetrics = mb.zstdDecompressTimeMetrics compressor = mb.zstdCompressor compressedBytes = zstdCompressed default: @@ -307,14 +283,14 @@ func (mb *msgBuilder) unmarshal(b []byte) (*p2p.Message, int, Op, error) { if err != nil { return nil, 0, 0, err } - if decompressTimeMetric, ok := opToDecompressTimeMetrics[op]; ok { - decompressTimeMetric.Observe(float64(decompressTook)) - } else { - // Should never happen - mb.log.Warn("no decompression metric found for op", - zap.Stringer("op", op), - ) + + labels := prometheus.Labels{ + typeLabel: compression.TypeZstd.String(), + opLabel: op.String(), + directionLabel: decompressionLabel, } + mb.count.With(labels).Inc() + mb.duration.With(labels).Add(float64(decompressTook)) return m, bytesSavedCompression, op, nil } diff --git a/message/messages_benchmark_test.go b/message/messages_benchmark_test.go index 595ba1b18266..9a96f1f41911 100644 --- a/message/messages_benchmark_test.go +++ b/message/messages_benchmark_test.go @@ -51,7 +51,6 @@ func BenchmarkMarshalHandshake(b *testing.B) { MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), IpPort: 0, - MyVersion: "v1.2.3", IpSigningTime: uint64(time.Now().Unix()), IpNodeIdSig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{id[:]}, @@ -63,7 +62,7 @@ func BenchmarkMarshalHandshake(b *testing.B) { useBuilder := os.Getenv("USE_BUILDER") != "" - codec, err := newMsgBuilder(logging.NoLog{}, "", prometheus.NewRegistry(), 10*time.Second) + codec, err := newMsgBuilder(logging.NoLog{}, prometheus.NewRegistry(), 10*time.Second) require.NoError(err) b.Logf("proto length %d-byte (use builder %v)", msgLen, useBuilder) @@ -108,7 +107,6 @@ func BenchmarkUnmarshalHandshake(b *testing.B) { MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), IpPort: 0, - MyVersion: "v1.2.3", IpSigningTime: uint64(time.Now().Unix()), IpNodeIdSig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{id[:]}, @@ -121,7 +119,7 @@ func BenchmarkUnmarshalHandshake(b *testing.B) { require.NoError(err) useBuilder := os.Getenv("USE_BUILDER") != "" - codec, err := newMsgBuilder(logging.NoLog{}, "", prometheus.NewRegistry(), 10*time.Second) + codec, err := newMsgBuilder(logging.NoLog{}, prometheus.NewRegistry(), 10*time.Second) require.NoError(err) b.StartTimer() diff --git a/message/messages_test.go b/message/messages_test.go index 6e4978dd71ab..583f26533d13 100644 --- a/message/messages_test.go +++ b/message/messages_test.go @@ -25,7 +25,6 @@ func TestMessage(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) @@ -67,13 +66,11 @@ func TestMessage(t *testing.T) { bytesSaved: false, }, { - desc: "pong message with no compression no subnet uptimes", + desc: "pong message with no compression", op: PongOp, msg: &p2p.Message{ Message: &p2p.Message_Pong{ - Pong: &p2p.Pong{ - Uptime: 100, - }, + Pong: &p2p.Pong{}, }, }, compressionType: compression.TypeNone, @@ -99,26 +96,6 @@ func TestMessage(t *testing.T) { bypassThrottling: true, bytesSaved: false, }, - { - desc: "pong message with no compression and subnet uptimes", - op: PongOp, - msg: &p2p.Message{ - Message: &p2p.Message_Pong{ - Pong: &p2p.Pong{ - Uptime: 100, - SubnetUptimes: []*p2p.SubnetUptime{ - { - SubnetId: testID[:], - Uptime: 100, - }, - }, - }, - }, - }, - compressionType: compression.TypeNone, - bypassThrottling: true, - bytesSaved: false, - }, { desc: "Handshake message with no compression", op: HandshakeOp, @@ -129,7 +106,6 @@ func TestMessage(t *testing.T) { MyTime: uint64(nowUnix), IpAddr: []byte(net.IPv6zero), IpPort: 9651, - MyVersion: "v1.2.3", IpSigningTime: uint64(nowUnix), IpNodeIdSig: []byte{'y', 'e', 'e', 't'}, TrackedSubnets: [][]byte{testID[:]}, @@ -339,10 +315,9 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_GetAcceptedFrontier{ GetAcceptedFrontier: &p2p.GetAcceptedFrontier{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + ChainId: testID[:], + RequestId: 1, + Deadline: 1, }, }, }, @@ -376,7 +351,6 @@ func TestMessage(t *testing.T) { RequestId: 1, Deadline: 1, ContainerIds: [][]byte{testID[:], testID[:]}, - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, @@ -460,7 +434,6 @@ func TestMessage(t *testing.T) { RequestId: 1, Deadline: 1, ContainerId: testID[:], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, @@ -474,10 +447,9 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_Put{ Put: &p2p.Put{ - ChainId: testID[:], - RequestId: 1, - Container: []byte{0}, - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + ChainId: testID[:], + RequestId: 1, + Container: []byte{0}, }, }, }, @@ -491,10 +463,9 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_Put{ Put: &p2p.Put{ - ChainId: testID[:], - RequestId: 1, - Container: compressibleContainers[0], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + ChainId: testID[:], + RequestId: 1, + Container: compressibleContainers[0], }, }, }, @@ -508,11 +479,10 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_PushQuery{ PushQuery: &p2p.PushQuery{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - Container: []byte{0}, - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + Container: []byte{0}, }, }, }, @@ -526,11 +496,10 @@ func TestMessage(t *testing.T) { msg: &p2p.Message{ Message: &p2p.Message_PushQuery{ PushQuery: &p2p.PushQuery{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - Container: compressibleContainers[0], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + Container: compressibleContainers[0], }, }, }, @@ -548,7 +517,6 @@ func TestMessage(t *testing.T) { RequestId: 1, Deadline: 1, ContainerId: testID[:], - EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, @@ -699,7 +667,6 @@ func TestInboundMessageToString(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) @@ -708,9 +675,7 @@ func TestInboundMessageToString(t *testing.T) { // msg that will become the tested InboundMessage msg := &p2p.Message{ Message: &p2p.Message_Pong{ - Pong: &p2p.Pong{ - Uptime: 100, - }, + Pong: &p2p.Pong{}, }, } msgBytes, err := proto.Marshal(msg) @@ -719,7 +684,7 @@ func TestInboundMessageToString(t *testing.T) { inboundMsg, err := mb.parseInbound(msgBytes, ids.EmptyNodeID, func() {}) require.NoError(err) - require.Equal("NodeID-111111111111111111116DBWJs Op: pong Message: uptime:100", inboundMsg.String()) + require.Equal("NodeID-111111111111111111116DBWJs Op: pong Message: ", inboundMsg.String()) internalMsg := InternalGetStateSummaryFrontierFailed(ids.EmptyNodeID, ids.Empty, 1) require.Equal("NodeID-111111111111111111116DBWJs Op: get_state_summary_frontier_failed Message: ChainID: 11111111111111111111111111111111LpoYY RequestID: 1", internalMsg.String()) @@ -732,7 +697,6 @@ func TestEmptyInboundMessage(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) @@ -753,7 +717,6 @@ func TestNilInboundMessage(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 5*time.Second, ) diff --git a/message/mock_outbound_message_builder.go b/message/mock_outbound_message_builder.go index d3ec69a0b0cc..917d764028fe 100644 --- a/message/mock_outbound_message_builder.go +++ b/message/mock_outbound_message_builder.go @@ -10,6 +10,7 @@ package message import ( + netip "net/netip" reflect "reflect" time "time" @@ -178,48 +179,48 @@ func (mr *MockOutboundMsgBuilderMockRecorder) Chits(arg0, arg1, arg2, arg3, arg4 } // Get mocks base method. -func (m *MockOutboundMsgBuilder) Get(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Get(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Get), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Get), arg0, arg1, arg2, arg3) } // GetAccepted mocks base method. -func (m *MockOutboundMsgBuilder) GetAccepted(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) GetAccepted(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []ids.ID) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAccepted", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "GetAccepted", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // GetAccepted indicates an expected call of GetAccepted. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAccepted), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAccepted), arg0, arg1, arg2, arg3) } // GetAcceptedFrontier mocks base method. -func (m *MockOutboundMsgBuilder) GetAcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) GetAcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 time.Duration) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAcceptedFrontier", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "GetAcceptedFrontier", arg0, arg1, arg2) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // GetAcceptedFrontier indicates an expected call of GetAcceptedFrontier. -func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedFrontier), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedFrontier), arg0, arg1, arg2) } // GetAcceptedStateSummary mocks base method. @@ -283,18 +284,18 @@ func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1 } // Handshake mocks base method. -func (m *MockOutboundMsgBuilder) Handshake(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3, arg4 string, arg5, arg6, arg7 uint32, arg8 uint64, arg9, arg10 []byte, arg11 []ids.ID, arg12, arg13 []uint32, arg14, arg15 []byte) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Handshake(arg0 uint32, arg1 uint64, arg2 netip.AddrPort, arg3 string, arg4, arg5, arg6 uint32, arg7 uint64, arg8, arg9 []byte, arg10 []ids.ID, arg11, arg12 []uint32, arg13, arg14 []byte) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Handshake", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15) + ret := m.ctrl.Call(m, "Handshake", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Handshake indicates an expected call of Handshake. -func (mr *MockOutboundMsgBuilderMockRecorder) Handshake(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Handshake(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Handshake", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Handshake), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Handshake", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Handshake), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14) } // PeerList mocks base method. @@ -328,63 +329,63 @@ func (mr *MockOutboundMsgBuilderMockRecorder) Ping(arg0, arg1 any) *gomock.Call } // Pong mocks base method. -func (m *MockOutboundMsgBuilder) Pong(arg0 uint32, arg1 []*p2p.SubnetUptime) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Pong() (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Pong", arg0, arg1) + ret := m.ctrl.Call(m, "Pong") ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Pong indicates an expected call of Pong. -func (mr *MockOutboundMsgBuilderMockRecorder) Pong(arg0, arg1 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Pong() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pong", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Pong), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pong", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Pong)) } // PullQuery mocks base method. -func (m *MockOutboundMsgBuilder) PullQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 uint64, arg5 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) PullQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 uint64) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PullQuery", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "PullQuery", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // PullQuery indicates an expected call of PullQuery. -func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PullQuery), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PullQuery), arg0, arg1, arg2, arg3, arg4) } // PushQuery mocks base method. -func (m *MockOutboundMsgBuilder) PushQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []byte, arg4 uint64, arg5 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) PushQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []byte, arg4 uint64) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PushQuery", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "PushQuery", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // PushQuery indicates an expected call of PushQuery. -func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4, arg5 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PushQuery), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PushQuery), arg0, arg1, arg2, arg3, arg4) } // Put mocks base method. -func (m *MockOutboundMsgBuilder) Put(arg0 ids.ID, arg1 uint32, arg2 []byte, arg3 p2p.EngineType) (OutboundMessage, error) { +func (m *MockOutboundMsgBuilder) Put(arg0 ids.ID, arg1 uint32, arg2 []byte) (OutboundMessage, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2) ret0, _ := ret[0].(OutboundMessage) ret1, _ := ret[1].(error) return ret0, ret1 } // Put indicates an expected call of Put. -func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2, arg3 any) *gomock.Call { +func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Put), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Put), arg0, arg1, arg2) } // StateSummaryFrontier mocks base method. diff --git a/message/outbound_msg_builder.go b/message/outbound_msg_builder.go index f90a5f501b87..78aacdce3e08 100644 --- a/message/outbound_msg_builder.go +++ b/message/outbound_msg_builder.go @@ -4,6 +4,7 @@ package message import ( + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -21,8 +22,7 @@ type OutboundMsgBuilder interface { Handshake( networkID uint32, myTime uint64, - ip ips.IPPort, - myVersion string, + ip netip.AddrPort, client string, major uint32, minor uint32, @@ -52,10 +52,7 @@ type OutboundMsgBuilder interface { subnetUptimes []*p2p.SubnetUptime, ) (OutboundMessage, error) - Pong( - primaryUptime uint32, - subnetUptimes []*p2p.SubnetUptime, - ) (OutboundMessage, error) + Pong() (OutboundMessage, error) GetStateSummaryFrontier( chainID ids.ID, @@ -86,7 +83,6 @@ type OutboundMsgBuilder interface { chainID ids.ID, requestID uint32, deadline time.Duration, - engineType p2p.EngineType, ) (OutboundMessage, error) AcceptedFrontier( @@ -100,7 +96,6 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerIDs []ids.ID, - engineType p2p.EngineType, ) (OutboundMessage, error) Accepted( @@ -128,14 +123,12 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerID ids.ID, - engineType p2p.EngineType, ) (OutboundMessage, error) Put( chainID ids.ID, requestID uint32, container []byte, - engineType p2p.EngineType, ) (OutboundMessage, error) PushQuery( @@ -144,7 +137,6 @@ type OutboundMsgBuilder interface { deadline time.Duration, container []byte, requestedHeight uint64, - engineType p2p.EngineType, ) (OutboundMessage, error) PullQuery( @@ -153,7 +145,6 @@ type OutboundMsgBuilder interface { deadline time.Duration, containerID ids.ID, requestedHeight uint64, - engineType p2p.EngineType, ) (OutboundMessage, error) Chits( @@ -223,17 +214,11 @@ func (b *outMsgBuilder) Ping( ) } -func (b *outMsgBuilder) Pong( - primaryUptime uint32, - subnetUptimes []*p2p.SubnetUptime, -) (OutboundMessage, error) { +func (b *outMsgBuilder) Pong() (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Pong{ - Pong: &p2p.Pong{ - Uptime: primaryUptime, - SubnetUptimes: subnetUptimes, - }, + Pong: &p2p.Pong{}, }, }, compression.TypeNone, @@ -244,8 +229,7 @@ func (b *outMsgBuilder) Pong( func (b *outMsgBuilder) Handshake( networkID uint32, myTime uint64, - ip ips.IPPort, - myVersion string, + ip netip.AddrPort, client string, major uint32, minor uint32, @@ -261,15 +245,16 @@ func (b *outMsgBuilder) Handshake( ) (OutboundMessage, error) { subnetIDBytes := make([][]byte, len(trackedSubnets)) encodeIDs(trackedSubnets, subnetIDBytes) + // TODO: Use .AsSlice() after v1.12.x activates. + addr := ip.Addr().As16() return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Handshake{ Handshake: &p2p.Handshake{ NetworkId: networkID, MyTime: myTime, - IpAddr: ip.IP.To16(), - IpPort: uint32(ip.Port), - MyVersion: myVersion, + IpAddr: addr[:], + IpPort: uint32(ip.Port()), IpSigningTime: ipSigningTime, IpNodeIdSig: ipNodeIDSig, TrackedSubnets: subnetIDBytes, @@ -317,10 +302,12 @@ func (b *outMsgBuilder) GetPeerList( func (b *outMsgBuilder) PeerList(peers []*ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { claimIPPorts := make([]*p2p.ClaimedIpPort, len(peers)) for i, p := range peers { + // TODO: Use .AsSlice() after v1.12.x activates. + ip := p.AddrPort.Addr().As16() claimIPPorts[i] = &p2p.ClaimedIpPort{ X509Certificate: p.Cert.Raw, - IpAddr: p.IPPort.IP.To16(), - IpPort: uint32(p.IPPort.Port), + IpAddr: ip[:], + IpPort: uint32(p.AddrPort.Port()), Timestamp: p.Timestamp, Signature: p.Signature, TxId: ids.Empty[:], @@ -427,16 +414,14 @@ func (b *outMsgBuilder) GetAcceptedFrontier( chainID ids.ID, requestID uint32, deadline time.Duration, - engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_GetAcceptedFrontier{ GetAcceptedFrontier: &p2p.GetAcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), }, }, }, @@ -470,7 +455,6 @@ func (b *outMsgBuilder) GetAccepted( requestID uint32, deadline time.Duration, containerIDs []ids.ID, - engineType p2p.EngineType, ) (OutboundMessage, error) { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) @@ -482,7 +466,6 @@ func (b *outMsgBuilder) GetAccepted( RequestId: requestID, Deadline: uint64(deadline), ContainerIds: containerIDBytes, - EngineType: engineType, }, }, }, @@ -562,7 +545,6 @@ func (b *outMsgBuilder) Get( requestID uint32, deadline time.Duration, containerID ids.ID, - engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ @@ -572,7 +554,6 @@ func (b *outMsgBuilder) Get( RequestId: requestID, Deadline: uint64(deadline), ContainerId: containerID[:], - EngineType: engineType, }, }, }, @@ -585,16 +566,14 @@ func (b *outMsgBuilder) Put( chainID ids.ID, requestID uint32, container []byte, - engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ Message: &p2p.Message_Put{ Put: &p2p.Put{ - ChainId: chainID[:], - RequestId: requestID, - Container: container, - EngineType: engineType, + ChainId: chainID[:], + RequestId: requestID, + Container: container, }, }, }, @@ -609,7 +588,6 @@ func (b *outMsgBuilder) PushQuery( deadline time.Duration, container []byte, requestedHeight uint64, - engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ @@ -620,7 +598,6 @@ func (b *outMsgBuilder) PushQuery( Deadline: uint64(deadline), Container: container, RequestedHeight: requestedHeight, - EngineType: engineType, }, }, }, @@ -635,7 +612,6 @@ func (b *outMsgBuilder) PullQuery( deadline time.Duration, containerID ids.ID, requestedHeight uint64, - engineType p2p.EngineType, ) (OutboundMessage, error) { return b.builder.createOutbound( &p2p.Message{ @@ -646,7 +622,6 @@ func (b *outMsgBuilder) PullQuery( Deadline: uint64(deadline), ContainerId: containerID[:], RequestedHeight: requestedHeight, - EngineType: engineType, }, }, }, diff --git a/message/outbound_msg_builder_test.go b/message/outbound_msg_builder_test.go index 02e46ef166a5..1f7187cdd437 100644 --- a/message/outbound_msg_builder_test.go +++ b/message/outbound_msg_builder_test.go @@ -20,7 +20,6 @@ func Test_newOutboundBuilder(t *testing.T) { mb, err := newMsgBuilder( logging.NoLog{}, - "test", prometheus.NewRegistry(), 10*time.Second, ) diff --git a/nat/nat.go b/nat/nat.go index a6e37078e7a6..28cdb1083eac 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -4,13 +4,13 @@ package nat import ( - "net" + "net/netip" "sync" "time" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -29,7 +29,7 @@ type Router interface { // Undo a port mapping UnmapPort(intPort, extPort uint16) error // Return our external IP - ExternalIP() (net.IP, error) + ExternalIP() (netip.Addr, error) } // GetRouter returns a router on the current network. @@ -63,7 +63,13 @@ func NewPortMapper(log logging.Logger, r Router) *Mapper { // Map external port [extPort] (exposed to the internet) to internal port [intPort] (where our process is listening) // and set [ip]. Does this every [updateTime]. [ip] may be nil. -func (m *Mapper) Map(intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { +func (m *Mapper) Map( + intPort uint16, + extPort uint16, + desc string, + ip *utils.Atomic[netip.AddrPort], + updateTime time.Duration, +) { if !m.r.SupportsNAT() { return } @@ -110,7 +116,13 @@ func (m *Mapper) retryMapPort(intPort, extPort uint16, desc string, timeout time // keepPortMapping runs in the background to keep a port mapped. It renews the mapping from [extPort] // to [intPort]] every [updateTime]. Updates [ip] every [updateTime]. -func (m *Mapper) keepPortMapping(intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { +func (m *Mapper) keepPortMapping( + intPort uint16, + extPort uint16, + desc string, + ip *utils.Atomic[netip.AddrPort], + updateTime time.Duration, +) { updateTimer := time.NewTimer(updateTime) defer func(extPort uint16) { @@ -150,22 +162,25 @@ func (m *Mapper) keepPortMapping(intPort, extPort uint16, desc string, ip ips.Dy } } -func (m *Mapper) updateIP(ip ips.DynamicIPPort) { +func (m *Mapper) updateIP(ip *utils.Atomic[netip.AddrPort]) { if ip == nil { return } - newIP, err := m.r.ExternalIP() + newAddr, err := m.r.ExternalIP() if err != nil { m.log.Error("failed to get external IP", zap.Error(err), ) return } - oldIP := ip.IPPort().IP - ip.SetIP(newIP) - if !oldIP.Equal(newIP) { + oldAddrPort := ip.Get() + oldAddr := oldAddrPort.Addr() + if newAddr != oldAddr { + port := oldAddrPort.Port() + ip.Set(netip.AddrPortFrom(newAddr, port)) m.log.Info("external IP updated", - zap.Stringer("newIP", newIP), + zap.Stringer("oldIP", oldAddr), + zap.Stringer("newIP", newAddr), ) } } diff --git a/nat/no_router.go b/nat/no_router.go index 19c68dac5538..ebdf6015020c 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -6,6 +6,7 @@ package nat import ( "errors" "net" + "net/netip" "time" ) @@ -19,7 +20,7 @@ var ( const googleDNSServer = "8.8.8.8:80" type noRouter struct { - ip net.IP + ip netip.Addr ipErr error } @@ -35,26 +36,30 @@ func (noRouter) UnmapPort(uint16, uint16) error { return nil } -func (r noRouter) ExternalIP() (net.IP, error) { +func (r noRouter) ExternalIP() (netip.Addr, error) { return r.ip, r.ipErr } -func getOutboundIP() (net.IP, error) { +func getOutboundIP() (netip.Addr, error) { conn, err := net.Dial("udp", googleDNSServer) if err != nil { - return nil, err + return netip.Addr{}, err } - addr := conn.LocalAddr() + localAddr := conn.LocalAddr() if err := conn.Close(); err != nil { - return nil, err + return netip.Addr{}, err } - udpAddr, ok := addr.(*net.UDPAddr) + udpAddr, ok := localAddr.(*net.UDPAddr) if !ok { - return nil, errFetchingIP + return netip.Addr{}, errFetchingIP } - return udpAddr.IP, nil + addr := udpAddr.AddrPort().Addr() + if addr.Is4In6() { + addr = addr.Unmap() + } + return addr, nil } // NewNoRouter returns a router that assumes the network is public diff --git a/nat/pmp.go b/nat/pmp.go index ecee9793f934..c10bdbdbc4fa 100644 --- a/nat/pmp.go +++ b/nat/pmp.go @@ -6,7 +6,7 @@ package nat import ( "errors" "math" - "net" + "net/netip" "time" "github.com/jackpal/gateway" @@ -66,12 +66,12 @@ func (r *pmpRouter) UnmapPort(internalPort uint16, _ uint16) error { return err } -func (r *pmpRouter) ExternalIP() (net.IP, error) { +func (r *pmpRouter) ExternalIP() (netip.Addr, error) { response, err := r.client.GetExternalAddress() if err != nil { - return nil, err + return netip.Addr{}, err } - return response.ExternalIPAddress[:], nil + return netip.AddrFrom4(response.ExternalIPAddress), nil } func getPMPRouter() *pmpRouter { diff --git a/nat/upnp.go b/nat/upnp.go index aa26d6d82fc6..943017dc7560 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -7,11 +7,14 @@ import ( "fmt" "math" "net" + "net/netip" "time" "github.com/huin/goupnp" "github.com/huin/goupnp/dcps/internetgateway1" "github.com/huin/goupnp/dcps/internetgateway2" + + "github.com/ava-labs/avalanchego/utils/ips" ) const ( @@ -111,17 +114,12 @@ func (r *upnpRouter) localIP() (net.IP, error) { return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) } -func (r *upnpRouter) ExternalIP() (net.IP, error) { +func (r *upnpRouter) ExternalIP() (netip.Addr, error) { str, err := r.client.GetExternalIPAddress() if err != nil { - return nil, err - } - - ip := net.ParseIP(str) - if ip == nil { - return nil, fmt.Errorf("invalid IP %s", str) + return netip.Addr{}, err } - return ip, nil + return ips.ParseAddr(str) } func (r *upnpRouter) MapPort( @@ -161,7 +159,7 @@ func getUPnPClient(client goupnp.ServiceClient) upnpClient { } } -// discover() tries to find gateway device +// discover() tries to find gateway device func discover(target string) *upnpRouter { devs, err := goupnp.DiscoverDevices(target) if err != nil { diff --git a/network/README.md b/network/README.md index 303d1f56ab82..b4a6a01d648b 100644 --- a/network/README.md +++ b/network/README.md @@ -4,14 +4,18 @@ - [Overview](#overview) - [Peers](#peers) - - [Lifecycle](#lifecycle) + - [Message Handling](#message-handling) + - [Peer Handshake](#peer-handshake) + - [Ping-Pong Messages](#ping-pong-messages) +- [Peer Discovery](#peer-discovery) + - [Inbound Connections](#inbound-connections) + - [Outbound Connections](#outbound-connections) + - [IP Authentication](#ip-authentication) - [Bootstrapping](#bootstrapping) - - [Connecting](#connecting) - - [Peer Handshake](#peer-handshake) - - [Connected](#connected) - - [PeerList Gossip](#peerlist-gossip) - - [Messages](#messages) - - [Gossip](#gossip) + - [PeerList Gossip](#peerlist-gossip) + - [Bloom Filter](#bloom-filter) + - [GetPeerList](#getpeerlist) + - [PeerList](#peerlist) ## Overview @@ -19,6 +23,8 @@ Avalanche is a decentralized [p2p](https://en.wikipedia.org/wiki/Peer-to-peer) ( The `network` package implements the networking layer of the protocol which allows a node to discover, connect to, and communicate with other peers. +All connections are authenticated using [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security). However, there is no reliance on any certificate authorities. The `network` package identifies peers by the public key in the leaf certificate. + ## Peers Peers are defined as members of the network that communicate with one another to participate in the Avalanche protocol. @@ -27,117 +33,170 @@ Peers communicate by enqueuing messages between one another. Each peer on either ```mermaid sequenceDiagram + actor Alice + actor Bob loop - Peer-1->>Peer-2: Write outbound messages - Peer-2->>Peer-1: Read incoming messages + Alice->>Bob: Write outbound messages + Bob->>Alice: Read incoming messages end loop - Peer-2->>Peer-1: Write outbound messages - Peer-1->>Peer-2: Read incoming messages + Bob->>Alice: Write outbound messages + Alice->>Bob: Read incoming messages end ``` -### Lifecycle - -#### Bootstrapping - -When starting an Avalanche node, a node needs to be able to initiate some process that eventually allows itself to become a participating member of the network. In traditional web2 systems, it's common to use a web service by hitting the service's DNS and being routed to an available server behind a load balancer. In decentralized p2p systems however, connecting to a node is more complex as no single entity owns the network. [Avalanche consensus](https://docs.avax.network/overview/getting-started/avalanche-consensus) requires a node to repeatedly sample peers in the network, so each node needs some way of discovering and connecting to every other peer to participate in the protocol. +### Message Handling -In Avalanche, nodes connect to an initial set of bootstrapper nodes known as **beacons** (this is user-configurable). Once connected to a set of beacons, a node is able to discover other nodes in the network. Over time, a node eventually discovers other peers in the network through `PeerList` messages it receives through: +All messages are prefixed with their length. Reading a message first reads the 4-byte message length from the connection. The rate-limiting logic then waits until there is sufficient capacity to read these bytes from the connection. -- The handshake initiated between two peers when attempting to connect to a peer (see [Connecting](#connecting)). -- Responses to periodically sent `GetPeerList` messages requesting a `PeerList` of unknown peers (see [Connected](#connected)). +A peer will then read the full message and attempt to parse it into either a networking message or an application message. If the message is malformed the connection is not dropped. The peer will simply continue to the next sent message. -#### Connecting +### Peer Handshake -##### Peer Handshake +Upon connection to a new peer, a handshake is performed between the node attempting to establish the outbound connection to the peer and the peer receiving the inbound connection. -Upon connection to any peer, a handshake is performed between the node attempting to establish the outbound connection to the peer and the peer receiving the inbound connection. +When attempting to establish the connection, the first message that the node sends is a `Handshake` message describing the compatibility of the nodes. If the `Handshake` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. See [Peerlist Gossip](#peerlist-gossip). -When attempting to establish the connection, the first message that the node attempting to connect to the peer in the network is a `Handshake` message describing compatibility of the candidate node with the peer. As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected by the peer. +As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected. ```mermaid sequenceDiagram -Note over Node,Peer: Initiate Handshake -Note left of Node: I want to connect to you! -Note over Node,Peer: Handshake message -Node->>Peer: AvalancheGo v1.0.0 -Note right of Peer: My version v1.9.4 is incompatible with your version v1.0.0. -Peer-xNode: Connection dropped -Note over Node,Peer: Handshake Failed + actor Alice + actor Bob + Note over Alice,Bob: Connection Created + par + Alice->>Bob: AvalancheGo v1.0.0 + and + Bob->>Alice: AvalancheGo v1.11.4 + end + Note right of Bob: v1.0.0 is incompatible with v1.11.4. + Note left of Alice: v1.11.4 could be compatible with v1.0.0! + par + Bob-->>Alice: Disconnect + and + Alice-XBob: Peerlist + end + Note over Alice,Bob: Handshake Failed ``` -If the `Handshake` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. Upon reception of a `PeerList` message, a node will attempt to connect to any peers that the node is not already connected to to allow the node to discover more peers in the network. +Nodes that mutually desire the connection will both respond with `PeerList` messages and complete the handshake. ```mermaid sequenceDiagram -Note over Node,Peer: Initiate Handshake -Note left of Node: I want to connect to you! -Note over Node,Peer: Handshake message -Node->>Peer: AvalancheGo v1.9.4 -Note right of Peer: LGTM! -Note over Node,Peer: PeerList message -Peer->>Node: Peer-X, Peer-Y, Peer-Z -Note over Node,Peer: Handshake Complete + actor Alice + actor Bob + Note over Alice,Bob: Connection Created + par + Alice->>Bob: AvalancheGo v1.11.0 + and + Bob->>Alice: AvalancheGo v1.11.4 + end + Note right of Bob: v1.11.0 is compatible with v1.11.4! + Note left of Alice: v1.11.4 could be compatible with v1.11.0! + par + Bob->>Alice: Peerlist + and + Alice->>Bob: Peerlist + end + Note over Alice,Bob: Handshake Complete ``` -Once the node attempting to join the network receives this `PeerList` message, the handshake is complete and the node is now connected to the peer. The node attempts to connect to the new peers discovered in the `PeerList` message. Each connection results in another peer handshake, which results in the node incrementally discovering more and more peers in the network as more and more `PeerList` messages are exchanged. - -#### Connected +### Ping-Pong Messages -Some peers aren't discovered through the `PeerList` messages exchanged through peer handshakes. This can happen if a peer is either not randomly sampled, or if a new peer joins the network after the node has already connected to the network. +Peers periodically send `Ping` messages containing perceived uptime information. This information can be used to monitor how the node is considered to be performing by the network. It is expected for a node to reply to a `Ping` message with a `Pong` message. ```mermaid sequenceDiagram -Node ->> Peer-1: Handshake - v1.9.5 -Peer-1 ->> Node: PeerList - Peer-2 -Note left of Node: Node is connected to Peer-1 and now tries to connect to Peer-2. -Node ->> Peer-2: Handshake - v1.9.5 -Peer-2 ->> Node: PeerList - Peer-1 -Note left of Node: Peer-3 was never sampled, so we haven't connected yet! -Node --> Peer-3: No connection + actor Alice + actor Bob + Note left of Alice: Send Ping + Alice->>Bob: I think your uptime is 95% + Note right of Bob: Send Pong + Bob->>Alice: ACK ``` -To guarantee that a node can discover all peers, each node periodically sends a `GetPeerList` message to a random peer. +## Peer Discovery + +When starting an Avalanche node, a node needs to be able to initiate some process that eventually allows itself to become a participating member of the network. In traditional web2 systems, it's common to use a web service by hitting the service's DNS and being routed to an available server behind a load balancer. In decentralized p2p systems, however, connecting to a node is more complex as no single entity owns the network. [Avalanche consensus](https://docs.avax.network/overview/getting-started/avalanche-consensus) requires a node to repeatedly sample peers in the network, so each node needs some way of discovering and connecting to every other peer to participate in the protocol. + +### Inbound Connections + +It is expected for Avalanche nodes to allow inbound connections. If a validator does not allow inbound connections, its observed uptime may be reduced. + +### Outbound Connections + +Avalanche nodes that have identified the `IP:Port` pair of a node they want to connect to will initiate outbound connections to this `IP:Port` pair. If the connection is not able to complete the [Peer Handshake](#peer-handshake), the connection will be re-attempted with an [Exponential Backoff](https://en.wikipedia.org/wiki/Exponential_backoff). + +A node should initiate outbound connections to an `IP:Port` pair that is believed to belong to a node that is not connected and meets at least one of the following conditions: +- The node is in the initial bootstrapper set. +- The node is in the default bootstrapper set. +- The node in the current Primary Network validator set. + +#### IP Authentication + +To ensure that outbound connections are being made to the correct `IP:Port` pair of a node, all `IP:Port` pairs sent by the network are signed by the node that is claiming ownership of the pair. To prevent replays of these messages, the signature is over the `Timestamp` in addition to the `IP:Port` pair. + +The `Timestamp` guarantees that nodes provided an `IP:Port` pair can track the most up-to-date `IP:Port` pair of a peer. + +### Bootstrapping + +In Avalanche, nodes connect to an initial set (this is user-configurable) of bootstrap nodes. + +### PeerList Gossip + +Once connected to an initial set of peers, a node can use these connections to discover additional peers. + +Peers are discovered by receiving [`PeerList`](#peerlist) messages during the [Peer Handshake](#peer-handshake). These messages quickly provide a node with knowledge of peers in the network. However, they offer no guarantee that the node will connect to and maintain connections with every peer in the network. + +To provide an eventual guarantee that all peers learn of one another, nodes periodically send a [`GetPeerList`](#getpeerlist) message to a randomly selected validator with the node's current [Bloom Filter](#bloom-filter) and `Salt`. + +#### Bloom Filter -##### PeerList Gossip +A [Bloom Filter](https://en.wikipedia.org/wiki/Bloom_filter) is used to track which nodes are known. -###### Messages +The parameterization of the Bloom Filter is based on the number of desired peers. -A `GetPeerList` message requests that the peer sends a `PeerList` message. `GetPeerList` messages contain a bloom filter of already known peers to reduce useless bandwidth on `PeerList` messages. The bloom filter reduces bandwidth by enabling the `PeerList` message to only include peers that aren't already known. +Entries in the Bloom Filter are determined by a locally calculated [`Salt`](https://en.wikipedia.org/wiki/Salt_(cryptography)) along with the `NodeID` and `Timestamp` of the most recently known `IP:Port`. The `Salt` is added to prevent griefing attacks where malicious nodes intentionally generate hash collisions with other virtuous nodes to reduce their connectivity. -A `PeerList` is the message that is used to communicate the presence of peers in the network. Each `PeerList` message contains signed networking-level metadata about a peer that provides the necessary information to connect to it. +The Bloom Filter is reconstructed if there are more entries than expected to avoid increasing the false positive probability. It is also reconstructed periodically. When reconstructing the Bloom Filter, a new `Salt` is generated. -Once peer metadata is received, the node will add that data to its bloom filter to prevent learning about it again. +To prevent a malicious node from arbitrarily filling this Bloom Filter, only `2` entries are added to the Bloom Filter per node. If a node's `IP:Port` pair changes once, it will immediately be added to the Bloom Filter. If a node's `IP:Port` pair changes more than once, it will only be added to the Bloom Filter after the Bloom Filter is reconstructed. -###### Gossip +#### GetPeerList -Handshake messages provide a node with some knowledge of peers in the network, but offers no guarantee that learning about a subset of peers from each peer the node connects with will result in the node learning about every peer in the network. +A `GetPeerList` message contains the Bloom Filter of the currently known peers along with the `Salt` that was used to add entries to the Bloom Filter. Upon receipt of a `GetPeerList` message, a node is expected to respond with a `PeerList` message. -To provide an eventual guarantee that all peers learn of one another, each node periodically requests peers from a random peer. +#### PeerList -To optimize bandwidth, each node tracks the most recent IPs of validators. The validator's nodeID and timestamp are inserted into a bloom filter which is used to select only necessary IPs to gossip. +`PeerList` messages are expected to contain `IP:Port` pairs that satisfy all of the following constraints: +- The Bloom Filter sent when requesting the `PeerList` message does not contain the node claiming the `IP:Port` pair. +- The node claiming the `IP:Port` pair is currently connected. +- The `IP:Port` pair the node shared during the `Handshake` message is the node's most recently known `IP:Port` pair. +- The node claiming the `IP:Port` pair is either in the default bootstrapper set or is a current Primary Network validator. -As the number of entries increases in the bloom filter, the probability of a false positive increases. False positives can cause recent IPs not to be gossiped when they otherwise should be, slowing down the rate of `PeerList` gossip. To prevent the bloom filter from having too many false positives, a new bloom filter is periodically generated and the number of entries a validator is allowed to have in the bloom filter is capped. Generating the new bloom filter both removes stale entries and modifies the hash functions to avoid persistent hash collisions. +#### Example PeerList Gossip -A node follows the following steps for of `PeerList` gossip: +The following diagram shows an example of `Alice` repeatedly learning about new peers from `Bob`. ```mermaid sequenceDiagram -Note left of Node: Initialize bloom filter -Note left of Node: Bloom: [0, 0, 0] -Node->>Peer-123: GetPeerList [0, 0, 0] -Note right of Peer-123: Any peers can be sent. -Peer-123->>Node: PeerList - Peer-1 -Note left of Node: Bloom: [1, 0, 0] -Node->>Peer-123: GetPeerList [1, 0, 0] -Note right of Peer-123: Either Peer-2 or Peer-3 can be sent. -Peer-123->>Node: PeerList - Peer-3 -Note left of Node: Bloom: [1, 0, 1] -Node->>Peer-123: GetPeerList [1, 0, 1] -Note right of Peer-123: Only Peer-2 can be sent. -Peer-123->>Node: PeerList - Peer-2 -Note left of Node: Bloom: [1, 1, 1] -Node->>Peer-123: GetPeerList [1, 1, 1] -Note right of Peer-123: There are no more peers left to send! + actor Alice + actor Bob + Note left of Alice: Initialize Bloom Filter + Note left of Alice: Bloom: [0, 0, 0] + Alice->>Bob: GetPeerList [0, 0, 0] + Note right of Bob: Any peers can be sent. + Bob->>Alice: PeerList - Peer-1 + Note left of Alice: Bloom: [1, 0, 0] + Alice->>Bob: GetPeerList [1, 0, 0] + Note right of Bob: Either Peer-2 or Peer-3 can be sent. + Bob->>Alice: PeerList - Peer-3 + Note left of Alice: Bloom: [1, 0, 1] + Alice->>Bob: GetPeerList [1, 0, 1] + Note right of Bob: Only Peer-2 can be sent. + Bob->>Alice: PeerList - Peer-2 + Note left of Alice: Bloom: [1, 1, 1] + Alice->>Bob: GetPeerList [1, 1, 1] + Note right of Bob: There are no more peers left to send! + Bob->>Alice: PeerList - Empty ``` diff --git a/network/certs_test.go b/network/certs_test.go deleted file mode 100644 index a4b1642b3dca..000000000000 --- a/network/certs_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package network - -import ( - "crypto/tls" - "net" - "sync" - "testing" - - "github.com/stretchr/testify/require" - - _ "embed" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/peer" - "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/ips" -) - -var ( - //go:embed test_cert_1.crt - testCertBytes1 []byte - //go:embed test_key_1.key - testKeyBytes1 []byte - //go:embed test_cert_2.crt - testCertBytes2 []byte - //go:embed test_key_2.key - testKeyBytes2 []byte - //go:embed test_cert_3.crt - testCertBytes3 []byte - //go:embed test_key_3.key - testKeyBytes3 []byte - - ip *ips.ClaimedIPPort - otherIP *ips.ClaimedIPPort - - certLock sync.Mutex - tlsCerts []*tls.Certificate - tlsConfigs []*tls.Config -) - -func init() { - cert1, err := staking.LoadTLSCertFromBytes(testKeyBytes1, testCertBytes1) - if err != nil { - panic(err) - } - cert2, err := staking.LoadTLSCertFromBytes(testKeyBytes2, testCertBytes2) - if err != nil { - panic(err) - } - cert3, err := staking.LoadTLSCertFromBytes(testKeyBytes3, testCertBytes3) - if err != nil { - panic(err) - } - tlsCerts = []*tls.Certificate{ - cert1, cert2, cert3, - } - - ip = ips.NewClaimedIPPort( - staking.CertificateFromX509(cert1.Leaf), - ips.IPPort{ - IP: net.IPv4(127, 0, 0, 1), - Port: 9651, - }, - 1, // timestamp - nil, // signature - ) - otherIP = ips.NewClaimedIPPort( - staking.CertificateFromX509(cert2.Leaf), - ips.IPPort{ - IP: net.IPv4(127, 0, 0, 1), - Port: 9651, - }, - 1, // timestamp - nil, // signature - ) -} - -func getTLS(t *testing.T, index int) (ids.NodeID, *tls.Certificate, *tls.Config) { - certLock.Lock() - defer certLock.Unlock() - - for len(tlsCerts) <= index { - cert, err := staking.NewTLSCert() - require.NoError(t, err) - tlsCerts = append(tlsCerts, cert) - } - for len(tlsConfigs) <= index { - cert := tlsCerts[len(tlsConfigs)] - tlsConfig := peer.TLSConfig(*cert, nil) - tlsConfigs = append(tlsConfigs, tlsConfig) - } - - tlsCert := tlsCerts[index] - cert := staking.CertificateFromX509(tlsCert.Leaf) - nodeID := ids.NodeIDFromCert(cert) - return nodeID, tlsCert, tlsConfigs[index] -} diff --git a/network/config.go b/network/config.go index 64c57d120cd6..de8eb44e14a0 100644 --- a/network/config.go +++ b/network/config.go @@ -6,6 +6,7 @@ package network import ( "crypto" "crypto/tls" + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -14,9 +15,9 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/set" ) @@ -58,22 +59,6 @@ type PeerListGossipConfig struct { // gossip event. PeerListNumValidatorIPs uint32 `json:"peerListNumValidatorIPs"` - // PeerListValidatorGossipSize is the number of validators to gossip the IPs - // to in every IP gossip event. - PeerListValidatorGossipSize uint32 `json:"peerListValidatorGossipSize"` - - // PeerListNonValidatorGossipSize is the number of non-validators to gossip - // the IPs to in every IP gossip event. - PeerListNonValidatorGossipSize uint32 `json:"peerListNonValidatorGossipSize"` - - // PeerListPeersGossipSize is the number of peers to gossip - // the IPs to in every IP gossip event. - PeerListPeersGossipSize uint32 `json:"peerListPeersGossipSize"` - - // PeerListGossipFreq is the frequency that this node will attempt to gossip - // signed IPs to its peers. - PeerListGossipFreq time.Duration `json:"peerListGossipFreq"` - // PeerListPullGossipFreq is the frequency that this node will attempt to // request signed IPs from its peers. PeerListPullGossipFreq time.Duration `json:"peerListPullGossipFreq"` @@ -126,13 +111,12 @@ type Config struct { TLSKeyLogFile string `json:"tlsKeyLogFile"` - Namespace string `json:"namespace"` - MyNodeID ids.NodeID `json:"myNodeID"` - MyIPPort ips.DynamicIPPort `json:"myIP"` - NetworkID uint32 `json:"networkID"` - MaxClockDifference time.Duration `json:"maxClockDifference"` - PingFrequency time.Duration `json:"pingFrequency"` - AllowPrivateIPs bool `json:"allowPrivateIPs"` + MyNodeID ids.NodeID `json:"myNodeID"` + MyIPPort *utils.Atomic[netip.AddrPort] `json:"myIP"` + NetworkID uint32 `json:"networkID"` + MaxClockDifference time.Duration `json:"maxClockDifference"` + PingFrequency time.Duration `json:"pingFrequency"` + AllowPrivateIPs bool `json:"allowPrivateIPs"` SupportedACPs set.Set[uint32] `json:"supportedACPs"` ObjectedACPs set.Set[uint32] `json:"objectedACPs"` diff --git a/network/dialer/dialer.go b/network/dialer/dialer.go index 109b63cc2002..2517184fedcc 100644 --- a/network/dialer/dialer.go +++ b/network/dialer/dialer.go @@ -7,12 +7,12 @@ import ( "context" "fmt" "net" + "net/netip" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -22,7 +22,7 @@ var _ Dialer = (*dialer)(nil) type Dialer interface { // If [ctx] is canceled, gives up trying to connect to [ip] // and returns an error. - Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) + Dial(ctx context.Context, ip netip.AddrPort) (net.Conn, error) } type dialer struct { @@ -62,7 +62,7 @@ func NewDialer(network string, dialerConfig Config, log logging.Logger) Dialer { } } -func (d *dialer) Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) { +func (d *dialer) Dial(ctx context.Context, ip netip.AddrPort) (net.Conn, error) { if err := d.throttler.Acquire(ctx); err != nil { return nil, err } diff --git a/network/dialer/dialer_test.go b/network/dialer/dialer_test.go index a824b8b03e08..01b3f640667b 100644 --- a/network/dialer/dialer_test.go +++ b/network/dialer/dialer_test.go @@ -6,14 +6,12 @@ package dialer import ( "context" "net" - "strconv" - "strings" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -22,7 +20,11 @@ import ( func TestDialerCancelDial(t *testing.T) { require := require.New(t) - l, err := net.Listen("tcp", "127.0.0.1:") + listenAddrPort := netip.AddrPortFrom( + netip.AddrFrom4([4]byte{127, 0, 0, 1}), + 0, + ) + l, err := net.Listen("tcp", listenAddrPort.String()) require.NoError(err) done := make(chan struct{}) @@ -43,12 +45,8 @@ func TestDialerCancelDial(t *testing.T) { } }() - port, err := strconv.Atoi(strings.Split(l.Addr().String(), ":")[1]) + listenedAddrPort, err := netip.ParseAddrPort(l.Addr().String()) require.NoError(err) - myIP := ips.IPPort{ - IP: net.ParseIP("127.0.0.1"), - Port: uint16(port), - } // Create a dialer dialer := NewDialer( @@ -63,11 +61,11 @@ func TestDialerCancelDial(t *testing.T) { // Make an outgoing connection with a cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() - _, err = dialer.Dial(ctx, myIP) + _, err = dialer.Dial(ctx, listenedAddrPort) require.ErrorIs(err, context.Canceled) // Make an outgoing connection with a non-cancelled context - conn, err := dialer.Dial(context.Background(), myIP) + conn, err := dialer.Dial(context.Background(), listenedAddrPort) require.NoError(err) _ = conn.Close() diff --git a/network/dialer_test.go b/network/dialer_test.go index 7a60d056d66d..d1567d20ad1b 100644 --- a/network/dialer_test.go +++ b/network/dialer_test.go @@ -7,9 +7,9 @@ import ( "context" "errors" "net" + "net/netip" "github.com/ava-labs/avalanchego/network/dialer" - "github.com/ava-labs/avalanchego/utils/ips" ) var ( @@ -20,33 +20,32 @@ var ( type testDialer struct { // maps [ip.String] to a listener - listeners map[string]*testListener + listeners map[netip.AddrPort]*testListener } func newTestDialer() *testDialer { return &testDialer{ - listeners: make(map[string]*testListener), + listeners: make(map[netip.AddrPort]*testListener), } } -func (d *testDialer) NewListener() (ips.DynamicIPPort, *testListener) { +func (d *testDialer) NewListener() (netip.AddrPort, *testListener) { // Uses a private IP to easily enable testing AllowPrivateIPs - ip := ips.NewDynamicIPPort( - net.IPv4(10, 0, 0, 0), + addrPort := netip.AddrPortFrom( + netip.AddrFrom4([4]byte{10, 0, 0, 0}), uint16(len(d.listeners)+1), ) - staticIP := ip.IPPort() - listener := newTestListener(staticIP) - d.AddListener(staticIP, listener) - return ip, listener + listener := newTestListener(addrPort) + d.AddListener(addrPort, listener) + return addrPort, listener } -func (d *testDialer) AddListener(ip ips.IPPort, listener *testListener) { - d.listeners[ip.String()] = listener +func (d *testDialer) AddListener(ip netip.AddrPort, listener *testListener) { + d.listeners[ip] = listener } -func (d *testDialer) Dial(ctx context.Context, ip ips.IPPort) (net.Conn, error) { - listener, ok := d.listeners[ip.String()] +func (d *testDialer) Dial(ctx context.Context, ip netip.AddrPort) (net.Conn, error) { + listener, ok := d.listeners[ip] if !ok { return nil, errRefused } diff --git a/network/example_test.go b/network/example_test.go index bfac03fba44f..0fef075f8101 100644 --- a/network/example_test.go +++ b/network/example_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -110,7 +109,7 @@ func ExampleNewTestNetwork() { // gossip will enable connecting to all the remaining nodes in the network. bootstrappers := genesis.SampleBootstrappers(constants.FujiID, 5) for _, bootstrapper := range bootstrappers { - network.ManuallyTrack(bootstrapper.ID, ips.IPPort(bootstrapper.IP)) + network.ManuallyTrack(bootstrapper.ID, bootstrapper.IP) } // Typically network.StartClose() should be called based on receiving a diff --git a/network/ip_tracker.go b/network/ip_tracker.go index ed3935d4716c..370c7d47da92 100644 --- a/network/ip_tracker.go +++ b/network/ip_tracker.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" ) @@ -27,44 +26,46 @@ const ( minCountEstimate = 128 targetFalsePositiveProbability = .001 maxFalsePositiveProbability = .01 - // By setting maxIPEntriesPerValidator > 1, we allow validators to update - // their IP at least once per bloom filter reset. - maxIPEntriesPerValidator = 2 + // By setting maxIPEntriesPerNode > 1, we allow nodes to update their IP at + // least once per bloom filter reset. + maxIPEntriesPerNode = 2 + + untrackedTimestamp = -2 + olderTimestamp = -1 + sameTimestamp = 0 + newerTimestamp = 1 + newTimestamp = 2 ) var _ validators.SetCallbackListener = (*ipTracker)(nil) func newIPTracker( log logging.Logger, - namespace string, registerer prometheus.Registerer, ) (*ipTracker, error) { - bloomNamespace := metric.AppendNamespace(namespace, "ip_bloom") - bloomMetrics, err := bloom.NewMetrics(bloomNamespace, registerer) + bloomMetrics, err := bloom.NewMetrics("ip_bloom", registerer) if err != nil { return nil, err } tracker := &ipTracker{ log: log, - numValidatorIPs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validator_ips", - Help: "Number of known validator IPs", + numTrackedIPs: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "tracked_ips", + Help: "Number of IPs this node is willing to dial", }), - numGossipable: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "gossipable_ips", - Help: "Number of IPs this node is willing to gossip", + numGossipableIPs: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "gossipable_ips", + Help: "Number of IPs this node is willing to gossip", }), - bloomMetrics: bloomMetrics, - connected: make(map[ids.NodeID]*ips.ClaimedIPPort), - mostRecentValidatorIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), - gossipableIndicies: make(map[ids.NodeID]int), - bloomAdditions: make(map[ids.NodeID]int), + bloomMetrics: bloomMetrics, + mostRecentTrackedIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), + bloomAdditions: make(map[ids.NodeID]int), + connected: make(map[ids.NodeID]*ips.ClaimedIPPort), + gossipableIndices: make(map[ids.NodeID]int), } err = utils.Err( - registerer.Register(tracker.numValidatorIPs), - registerer.Register(tracker.numGossipable), + registerer.Register(tracker.numTrackedIPs), + registerer.Register(tracker.numGossipableIPs), ) if err != nil { return nil, err @@ -73,29 +74,31 @@ func newIPTracker( } type ipTracker struct { - log logging.Logger - numValidatorIPs prometheus.Gauge - numGossipable prometheus.Gauge - bloomMetrics *bloom.Metrics + log logging.Logger + numTrackedIPs prometheus.Gauge + numGossipableIPs prometheus.Gauge + bloomMetrics *bloom.Metrics lock sync.RWMutex - // Manually tracked nodes are always treated like validators + // manuallyTracked contains the nodeIDs of all nodes whose connection was + // manually requested. manuallyTracked set.Set[ids.NodeID] - // Connected tracks the currently connected peers, including validators and - // non-validators. The IP is not necessarily the same IP as in - // mostRecentIPs. - connected map[ids.NodeID]*ips.ClaimedIPPort - mostRecentValidatorIPs map[ids.NodeID]*ips.ClaimedIPPort - validators set.Set[ids.NodeID] - - // An IP is marked as gossipable if: + // manuallyGossipable contains the nodeIDs of all nodes whose IP was + // manually configured to be gossiped. + manuallyGossipable set.Set[ids.NodeID] + + // mostRecentTrackedIPs tracks the most recent IP of each node whose + // connection is desired. + // + // An IP is tracked if one of the following conditions are met: + // - The node was manually tracked + // - The node was manually requested to be gossiped // - The node is a validator - // - The node is connected - // - The IP the node connected with is its latest IP - gossipableIndicies map[ids.NodeID]int - gossipableIPs []*ips.ClaimedIPPort + mostRecentTrackedIPs map[ids.NodeID]*ips.ClaimedIPPort + // trackedIDs contains the nodeIDs of all nodes whose connection is desired. + trackedIDs set.Set[ids.NodeID] - // The bloom filter contains the most recent validator IPs to avoid + // The bloom filter contains the most recent tracked IPs to avoid // unnecessary IP gossip. bloom *bloom.Filter // To prevent validators from causing the bloom filter to have too many @@ -104,109 +107,146 @@ type ipTracker struct { bloomAdditions map[ids.NodeID]int // Number of IPs added to the bloom bloomSalt []byte maxBloomCount int + + // Connected tracks the IP of currently connected peers, including tracked + // and untracked nodes. The IP is not necessarily the same IP as in + // mostRecentTrackedIPs. + connected map[ids.NodeID]*ips.ClaimedIPPort + + // An IP is marked as gossipable if all of the following conditions are met: + // - The node is a validator or was manually requested to be gossiped + // - The node is connected + // - The IP the node connected with is its latest IP + gossipableIndices map[ids.NodeID]int + // gossipableIPs is guaranteed to be a subset of [mostRecentTrackedIPs]. + gossipableIPs []*ips.ClaimedIPPort + gossipableIDs set.Set[ids.NodeID] } +// ManuallyTrack marks the provided nodeID as being desirable to connect to. +// +// In order for a node to learn about these nodeIDs, other nodes in the network +// must have marked them as gossipable. +// +// Even if nodes disagree on the set of manually tracked nodeIDs, they will not +// introduce persistent network gossip. func (i *ipTracker) ManuallyTrack(nodeID ids.NodeID) { i.lock.Lock() defer i.lock.Unlock() - // We treat manually tracked nodes as if they were validators. - if !i.validators.Contains(nodeID) { - i.onValidatorAdded(nodeID) - } - // Now that the node is marked as a validator, freeze it's validation - // status. Future calls to OnValidatorAdded or OnValidatorRemoved will be - // treated as noops. + i.addTrackableID(nodeID) + i.manuallyTracked.Add(nodeID) +} + +// ManuallyGossip marks the provided nodeID as being desirable to connect to and +// marks the IPs that this node provides as being valid to gossip. +// +// In order to avoid persistent network gossip, it's important for nodes in the +// network to agree upon manually gossiped nodeIDs. +func (i *ipTracker) ManuallyGossip(nodeID ids.NodeID) { + i.lock.Lock() + defer i.lock.Unlock() + + i.addTrackableID(nodeID) i.manuallyTracked.Add(nodeID) + + i.addGossipableID(nodeID) + i.manuallyGossipable.Add(nodeID) } +// WantsConnection returns true if any of the following conditions are met: +// 1. The node has been manually tracked. +// 2. The node has been manually gossiped. +// 3. The node is currently a validator. func (i *ipTracker) WantsConnection(nodeID ids.NodeID) bool { i.lock.RLock() defer i.lock.RUnlock() - return i.validators.Contains(nodeID) + return i.trackedIDs.Contains(nodeID) } +// ShouldVerifyIP is used as an optimization to avoid unnecessary IP +// verification. It returns true if all of the following conditions are met: +// 1. The provided IP is from a node whose connection is desired. +// 2. This IP is newer than the most recent IP we know of for the node. func (i *ipTracker) ShouldVerifyIP(ip *ips.ClaimedIPPort) bool { i.lock.RLock() defer i.lock.RUnlock() - if !i.validators.Contains(ip.NodeID) { + if !i.trackedIDs.Contains(ip.NodeID) { return false } - prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] + prevIP, ok := i.mostRecentTrackedIPs[ip.NodeID] return !ok || // This would be the first IP prevIP.Timestamp < ip.Timestamp // This would be a newer IP } -// AddIP returns true if the addition of the provided IP updated the most -// recently known IP of a validator. +// AddIP attempts to update the node's IP to the provided IP. This function +// assumes the provided IP has been verified. Returns true if all of the +// following conditions are met: +// 1. The provided IP is from a node whose connection is desired. +// 2. This IP is newer than the most recent IP we know of for the node. +// +// If the previous IP was marked as gossipable, calling this function will +// remove the IP from the gossipable set. func (i *ipTracker) AddIP(ip *ips.ClaimedIPPort) bool { i.lock.Lock() defer i.lock.Unlock() - if !i.validators.Contains(ip.NodeID) { - return false - } - - prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] - if !ok { - // This is the first IP we've heard from the validator, so it is the - // most recent. - i.updateMostRecentValidatorIP(ip) - // Because we didn't previously have an IP, we know we aren't currently - // connected to them. - return true - } - - if prevIP.Timestamp >= ip.Timestamp { - // This IP is not newer than the previously known IP. - return false - } - - i.updateMostRecentValidatorIP(ip) - i.removeGossipableIP(ip.NodeID) - return true + return i.addIP(ip) > sameTimestamp } +// GetIP returns the most recent IP of the provided nodeID. If a connection to +// this nodeID is not desired, this function will return false. func (i *ipTracker) GetIP(nodeID ids.NodeID) (*ips.ClaimedIPPort, bool) { i.lock.RLock() defer i.lock.RUnlock() - ip, ok := i.mostRecentValidatorIPs[nodeID] + ip, ok := i.mostRecentTrackedIPs[nodeID] return ip, ok } +// Connected is called when a connection is established. The peer should have +// provided [ip] during the handshake. func (i *ipTracker) Connected(ip *ips.ClaimedIPPort) { i.lock.Lock() defer i.lock.Unlock() i.connected[ip.NodeID] = ip - if !i.validators.Contains(ip.NodeID) { - return + if i.addIP(ip) >= sameTimestamp && i.gossipableIDs.Contains(ip.NodeID) { + i.addGossipableIP(ip) } +} - prevIP, ok := i.mostRecentValidatorIPs[ip.NodeID] +func (i *ipTracker) addIP(ip *ips.ClaimedIPPort) int { + if !i.trackedIDs.Contains(ip.NodeID) { + return untrackedTimestamp + } + + prevIP, ok := i.mostRecentTrackedIPs[ip.NodeID] if !ok { // This is the first IP we've heard from the validator, so it is the // most recent. - i.updateMostRecentValidatorIP(ip) - i.addGossipableIP(ip) - return + i.updateMostRecentTrackedIP(ip) + // Because we didn't previously have an IP, we know we aren't currently + // connected to them. + return newTimestamp } if prevIP.Timestamp > ip.Timestamp { - // There is a more up-to-date IP than the one that was used to connect. - return + return olderTimestamp // This IP is old than the previously known IP. } - - if prevIP.Timestamp < ip.Timestamp { - i.updateMostRecentValidatorIP(ip) + if prevIP.Timestamp == ip.Timestamp { + return sameTimestamp // This IP is equal to the previously known IP. } - i.addGossipableIP(ip) + + i.updateMostRecentTrackedIP(ip) + i.removeGossipableIP(ip.NodeID) + return newerTimestamp } +// Disconnected is called when a connection to the peer is closed. func (i *ipTracker) Disconnected(nodeID ids.NodeID) { i.lock.Lock() defer i.lock.Unlock() @@ -219,24 +259,42 @@ func (i *ipTracker) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids. i.lock.Lock() defer i.lock.Unlock() - i.onValidatorAdded(nodeID) + i.addTrackableID(nodeID) + i.addGossipableID(nodeID) } -func (i *ipTracker) onValidatorAdded(nodeID ids.NodeID) { - if i.manuallyTracked.Contains(nodeID) { +func (i *ipTracker) addTrackableID(nodeID ids.NodeID) { + if i.trackedIDs.Contains(nodeID) { return } - i.validators.Add(nodeID) + i.trackedIDs.Add(nodeID) ip, connected := i.connected[nodeID] if !connected { return } - // Because we only track validator IPs, the from the connection is - // guaranteed to be the most up-to-date IP that we know. - i.updateMostRecentValidatorIP(ip) - i.addGossipableIP(ip) + // Because we previously weren't tracking this nodeID, the IP from the + // connection is guaranteed to be the most up-to-date IP that we know. + i.updateMostRecentTrackedIP(ip) +} + +func (i *ipTracker) addGossipableID(nodeID ids.NodeID) { + if i.gossipableIDs.Contains(nodeID) { + return + } + + i.gossipableIDs.Add(nodeID) + connectedIP, connected := i.connected[nodeID] + if !connected { + return + } + + if updatedIP, ok := i.mostRecentTrackedIPs[nodeID]; !ok || connectedIP.Timestamp != updatedIP.Timestamp { + return + } + + i.addGossipableIP(connectedIP) } func (*ipTracker) OnValidatorWeightChanged(ids.NodeID, uint64, uint64) {} @@ -245,23 +303,28 @@ func (i *ipTracker) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { i.lock.Lock() defer i.lock.Unlock() - if i.manuallyTracked.Contains(nodeID) { + if i.manuallyGossipable.Contains(nodeID) { return } - delete(i.mostRecentValidatorIPs, nodeID) - i.numValidatorIPs.Set(float64(len(i.mostRecentValidatorIPs))) - - i.validators.Remove(nodeID) + i.gossipableIDs.Remove(nodeID) i.removeGossipableIP(nodeID) + + if i.manuallyTracked.Contains(nodeID) { + return + } + + i.trackedIDs.Remove(nodeID) + delete(i.mostRecentTrackedIPs, nodeID) + i.numTrackedIPs.Set(float64(len(i.mostRecentTrackedIPs))) } -func (i *ipTracker) updateMostRecentValidatorIP(ip *ips.ClaimedIPPort) { - i.mostRecentValidatorIPs[ip.NodeID] = ip - i.numValidatorIPs.Set(float64(len(i.mostRecentValidatorIPs))) +func (i *ipTracker) updateMostRecentTrackedIP(ip *ips.ClaimedIPPort) { + i.mostRecentTrackedIPs[ip.NodeID] = ip + i.numTrackedIPs.Set(float64(len(i.mostRecentTrackedIPs))) oldCount := i.bloomAdditions[ip.NodeID] - if oldCount >= maxIPEntriesPerValidator { + if oldCount >= maxIPEntriesPerNode { return } @@ -288,13 +351,13 @@ func (i *ipTracker) updateMostRecentValidatorIP(ip *ips.ClaimedIPPort) { } func (i *ipTracker) addGossipableIP(ip *ips.ClaimedIPPort) { - i.gossipableIndicies[ip.NodeID] = len(i.gossipableIPs) + i.gossipableIndices[ip.NodeID] = len(i.gossipableIPs) i.gossipableIPs = append(i.gossipableIPs, ip) - i.numGossipable.Inc() + i.numGossipableIPs.Inc() } func (i *ipTracker) removeGossipableIP(nodeID ids.NodeID) { - indexToRemove, wasGossipable := i.gossipableIndicies[nodeID] + indexToRemove, wasGossipable := i.gossipableIndices[nodeID] if !wasGossipable { return } @@ -302,14 +365,14 @@ func (i *ipTracker) removeGossipableIP(nodeID ids.NodeID) { newNumGossipable := len(i.gossipableIPs) - 1 if newNumGossipable != indexToRemove { replacementIP := i.gossipableIPs[newNumGossipable] - i.gossipableIndicies[replacementIP.NodeID] = indexToRemove + i.gossipableIndices[replacementIP.NodeID] = indexToRemove i.gossipableIPs[indexToRemove] = replacementIP } - delete(i.gossipableIndicies, nodeID) + delete(i.gossipableIndices, nodeID) i.gossipableIPs[newNumGossipable] = nil i.gossipableIPs = i.gossipableIPs[:newNumGossipable] - i.numGossipable.Dec() + i.numGossipableIPs.Dec() } // GetGossipableIPs returns the latest IPs of connected validators. The returned @@ -332,8 +395,8 @@ func (i *ipTracker) GetGossipableIPs( uniform.Initialize(uint64(len(i.gossipableIPs))) for len(ips) < maxNumIPs { - index, err := uniform.Next() - if err != nil { + index, hasNext := uniform.Next() + if !hasNext { return ips } @@ -378,7 +441,7 @@ func (i *ipTracker) resetBloom() error { return err } - count := max(maxIPEntriesPerValidator*i.validators.Len(), minCountEstimate) + count := max(maxIPEntriesPerNode*i.trackedIDs.Len(), minCountEstimate) numHashes, numEntries := bloom.OptimalParameters( count, targetFalsePositiveProbability, @@ -393,7 +456,7 @@ func (i *ipTracker) resetBloom() error { i.bloomSalt = newSalt i.maxBloomCount = bloom.EstimateCount(numHashes, numEntries, maxFalsePositiveProbability) - for nodeID, ip := range i.mostRecentValidatorIPs { + for nodeID, ip := range i.mostRecentTrackedIPs { bloom.Add(newFilter, ip.GossipID[:], newSalt) i.bloomAdditions[nodeID] = 1 } diff --git a/network/ip_tracker_test.go b/network/ip_tracker_test.go index da088f4aae99..bbfbdb958773 100644 --- a/network/ip_tracker_test.go +++ b/network/ip_tracker_test.go @@ -17,7 +17,7 @@ import ( ) func newTestIPTracker(t *testing.T) *ipTracker { - tracker, err := newIPTracker(logging.NoLog{}, "", prometheus.NewRegistry()) + tracker, err := newIPTracker(logging.NoLog{}, prometheus.NewRegistry()) require.NoError(t, err) return tracker } @@ -25,7 +25,7 @@ func newTestIPTracker(t *testing.T) *ipTracker { func newerTestIP(ip *ips.ClaimedIPPort) *ips.ClaimedIPPort { return ips.NewClaimedIPPort( ip.Cert, - ip.IPPort, + ip.AddrPort, ip.Timestamp+1, ip.Signature, ) @@ -34,19 +34,21 @@ func newerTestIP(ip *ips.ClaimedIPPort) *ips.ClaimedIPPort { func requireEqual(t *testing.T, expected, actual *ipTracker) { require := require.New(t) require.Equal(expected.manuallyTracked, actual.manuallyTracked) - require.Equal(expected.connected, actual.connected) - require.Equal(expected.mostRecentValidatorIPs, actual.mostRecentValidatorIPs) - require.Equal(expected.validators, actual.validators) - require.Equal(expected.gossipableIndicies, actual.gossipableIndicies) - require.Equal(expected.gossipableIPs, actual.gossipableIPs) + require.Equal(expected.manuallyGossipable, actual.manuallyGossipable) + require.Equal(expected.mostRecentTrackedIPs, actual.mostRecentTrackedIPs) + require.Equal(expected.trackedIDs, actual.trackedIDs) require.Equal(expected.bloomAdditions, actual.bloomAdditions) require.Equal(expected.maxBloomCount, actual.maxBloomCount) + require.Equal(expected.connected, actual.connected) + require.Equal(expected.gossipableIndices, actual.gossipableIndices) + require.Equal(expected.gossipableIPs, actual.gossipableIPs) + require.Equal(expected.gossipableIDs, actual.gossipableIDs) } func requireMetricsConsistent(t *testing.T, tracker *ipTracker) { require := require.New(t) - require.Equal(float64(len(tracker.mostRecentValidatorIPs)), testutil.ToFloat64(tracker.numValidatorIPs)) - require.Equal(float64(len(tracker.gossipableIPs)), testutil.ToFloat64(tracker.numGossipable)) + require.Equal(float64(len(tracker.mostRecentTrackedIPs)), testutil.ToFloat64(tracker.numTrackedIPs)) + require.Equal(float64(len(tracker.gossipableIPs)), testutil.ToFloat64(tracker.numGossipableIPs)) require.Equal(float64(tracker.bloom.Count()), testutil.ToFloat64(tracker.bloomMetrics.Count)) require.Equal(float64(tracker.maxBloomCount), testutil.ToFloat64(tracker.bloomMetrics.MaxCount)) } @@ -64,8 +66,8 @@ func TestIPTracker_ManuallyTrack(t *testing.T) { nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.validators.Add(ip.NodeID) tracker.manuallyTracked.Add(ip.NodeID) + tracker.trackedIDs.Add(ip.NodeID) return tracker }(), }, @@ -80,14 +82,96 @@ func TestIPTracker_ManuallyTrack(t *testing.T) { expectedState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.Connected(ip) - tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.manuallyTracked.Add(ip.NodeID) + tracker.mostRecentTrackedIPs[ip.NodeID] = ip + tracker.trackedIDs.Add(ip.NodeID) tracker.bloomAdditions[ip.NodeID] = 1 - tracker.gossipableIndicies[ip.NodeID] = 0 + return tracker + }(), + }, + { + name: "non-connected validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.manuallyTracked.Add(ip.NodeID) + return tracker + }(), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.initialState.ManuallyTrack(test.nodeID) + requireEqual(t, test.expectedState, test.initialState) + requireMetricsConsistent(t, test.initialState) + }) + } +} + +func TestIPTracker_ManuallyGossip(t *testing.T) { + tests := []struct { + name string + initialState *ipTracker + nodeID ids.NodeID + expectedState *ipTracker + }{ + { + name: "non-connected non-validator", + initialState: newTestIPTracker(t), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.manuallyTracked.Add(ip.NodeID) + tracker.manuallyGossipable.Add(ip.NodeID) + tracker.trackedIDs.Add(ip.NodeID) + tracker.gossipableIDs.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "connected non-validator", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.manuallyTracked.Add(ip.NodeID) + tracker.manuallyGossipable.Add(ip.NodeID) + tracker.mostRecentTrackedIPs[ip.NodeID] = ip + tracker.trackedIDs.Add(ip.NodeID) + tracker.bloomAdditions[ip.NodeID] = 1 + tracker.gossipableIndices[ip.NodeID] = 0 tracker.gossipableIPs = []*ips.ClaimedIPPort{ ip, } - tracker.validators.Add(ip.NodeID) - tracker.manuallyTracked.Add(ip.NodeID) + tracker.gossipableIDs.Add(ip.NodeID) return tracker }(), }, @@ -95,14 +179,15 @@ func TestIPTracker_ManuallyTrack(t *testing.T) { name: "non-connected validator", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.manuallyTracked.Add(ip.NodeID) + tracker.manuallyGossipable.Add(ip.NodeID) return tracker }(), }, @@ -111,22 +196,23 @@ func TestIPTracker_ManuallyTrack(t *testing.T) { initialState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.Connected(ip) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.Connected(ip) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.manuallyTracked.Add(ip.NodeID) + tracker.manuallyGossipable.Add(ip.NodeID) return tracker }(), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - test.initialState.ManuallyTrack(test.nodeID) + test.initialState.ManuallyGossip(test.nodeID) requireEqual(t, test.expectedState, test.initialState) requireMetricsConsistent(t, test.initialState) }) @@ -153,15 +239,15 @@ func TestIPTracker_AddIP(t *testing.T) { name: "first known IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) return tracker }(), ip: ip, expectedUpdated: true, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) - tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.mostRecentTrackedIPs[ip.NodeID] = ip tracker.bloomAdditions[ip.NodeID] = 1 return tracker }(), @@ -170,7 +256,7 @@ func TestIPTracker_AddIP(t *testing.T) { name: "older IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(newerIP.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(newerIP)) return tracker }(), @@ -178,7 +264,7 @@ func TestIPTracker_AddIP(t *testing.T) { expectedUpdated: false, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(newerIP.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(newerIP)) return tracker }(), @@ -187,7 +273,7 @@ func TestIPTracker_AddIP(t *testing.T) { name: "same IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) return tracker }(), @@ -195,7 +281,7 @@ func TestIPTracker_AddIP(t *testing.T) { expectedUpdated: false, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) return tracker }(), @@ -204,7 +290,7 @@ func TestIPTracker_AddIP(t *testing.T) { name: "disconnected newer IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) return tracker }(), @@ -212,9 +298,9 @@ func TestIPTracker_AddIP(t *testing.T) { expectedUpdated: true, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) - tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.mostRecentTrackedIPs[newerIP.NodeID] = newerIP tracker.bloomAdditions[newerIP.NodeID] = 2 return tracker }(), @@ -223,7 +309,7 @@ func TestIPTracker_AddIP(t *testing.T) { name: "connected newer IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) return tracker }(), @@ -231,11 +317,11 @@ func TestIPTracker_AddIP(t *testing.T) { expectedUpdated: true, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) - tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.mostRecentTrackedIPs[newerIP.NodeID] = newerIP tracker.bloomAdditions[newerIP.NodeID] = 2 - delete(tracker.gossipableIndicies, newerIP.NodeID) + delete(tracker.gossipableIndices, newerIP.NodeID) tracker.gossipableIPs = tracker.gossipableIPs[:0] return tracker }(), @@ -273,17 +359,17 @@ func TestIPTracker_Connected(t *testing.T) { name: "first known IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) return tracker }(), ip: ip, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) - tracker.connected[ip.NodeID] = ip - tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.mostRecentTrackedIPs[ip.NodeID] = ip tracker.bloomAdditions[ip.NodeID] = 1 - tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.connected[ip.NodeID] = ip + tracker.gossipableIndices[ip.NodeID] = 0 tracker.gossipableIPs = []*ips.ClaimedIPPort{ ip, } @@ -294,14 +380,14 @@ func TestIPTracker_Connected(t *testing.T) { name: "connected with older IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(newerIP.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(newerIP)) return tracker }(), ip: ip, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(newerIP.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(newerIP)) tracker.connected[ip.NodeID] = ip return tracker @@ -311,19 +397,19 @@ func TestIPTracker_Connected(t *testing.T) { name: "connected with newer IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) return tracker }(), ip: newerIP, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) - tracker.connected[newerIP.NodeID] = newerIP - tracker.mostRecentValidatorIPs[newerIP.NodeID] = newerIP + tracker.mostRecentTrackedIPs[newerIP.NodeID] = newerIP tracker.bloomAdditions[newerIP.NodeID] = 2 - tracker.gossipableIndicies[newerIP.NodeID] = 0 + tracker.connected[newerIP.NodeID] = newerIP + tracker.gossipableIndices[newerIP.NodeID] = 0 tracker.gossipableIPs = []*ips.ClaimedIPPort{ newerIP, } @@ -334,17 +420,17 @@ func TestIPTracker_Connected(t *testing.T) { name: "connected with same IP", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) return tracker }(), ip: ip, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) tracker.connected[ip.NodeID] = ip - tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIndices[ip.NodeID] = 0 tracker.gossipableIPs = []*ips.ClaimedIPPort{ ip, } @@ -369,7 +455,7 @@ func TestIPTracker_Disconnected(t *testing.T) { expectedState *ipTracker }{ { - name: "not gossipable", + name: "not tracked", initialState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.Connected(ip) @@ -378,21 +464,38 @@ func TestIPTracker_Disconnected(t *testing.T) { nodeID: ip.NodeID, expectedState: newTestIPTracker(t), }, + { + name: "not gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.ManuallyTrack(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.Connected(ip) + tracker.ManuallyTrack(ip.NodeID) + delete(tracker.connected, ip.NodeID) + return tracker + }(), + }, { name: "latest gossipable", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) delete(tracker.connected, ip.NodeID) - delete(tracker.gossipableIndicies, ip.NodeID) + delete(tracker.gossipableIndices, ip.NodeID) tracker.gossipableIPs = tracker.gossipableIPs[:0] return tracker }(), @@ -401,21 +504,21 @@ func TestIPTracker_Disconnected(t *testing.T) { name: "non-latest gossipable", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) tracker.Connected(otherIP) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) tracker.Connected(otherIP) delete(tracker.connected, ip.NodeID) - tracker.gossipableIndicies = map[ids.NodeID]int{ + tracker.gossipableIndices = map[ids.NodeID]int{ otherIP.NodeID: 0, } tracker.gossipableIPs = []*ips.ClaimedIPPort{ @@ -435,6 +538,8 @@ func TestIPTracker_Disconnected(t *testing.T) { } func TestIPTracker_OnValidatorAdded(t *testing.T) { + newerIP := newerTestIP(ip) + tests := []struct { name string initialState *ipTracker @@ -452,6 +557,40 @@ func TestIPTracker_OnValidatorAdded(t *testing.T) { expectedState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.ManuallyTrack(ip.NodeID) + tracker.gossipableIDs.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "manually tracked and connected with older IP", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.Connected(ip) + require.True(t, tracker.AddIP(newerIP)) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.Connected(ip) + require.True(t, tracker.AddIP(newerIP)) + tracker.gossipableIDs.Add(ip.NodeID) + return tracker + }(), + }, + { + name: "manually gossiped", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyGossip(ip.NodeID) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyGossip(ip.NodeID) return tracker }(), }, @@ -461,7 +600,8 @@ func TestIPTracker_OnValidatorAdded(t *testing.T) { nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.validators.Add(ip.NodeID) + tracker.trackedIDs.Add(ip.NodeID) + tracker.gossipableIDs.Add(ip.NodeID) return tracker }(), }, @@ -476,13 +616,14 @@ func TestIPTracker_OnValidatorAdded(t *testing.T) { expectedState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.Connected(ip) - tracker.validators.Add(ip.NodeID) - tracker.mostRecentValidatorIPs[ip.NodeID] = ip + tracker.mostRecentTrackedIPs[ip.NodeID] = ip + tracker.trackedIDs.Add(ip.NodeID) tracker.bloomAdditions[ip.NodeID] = 1 - tracker.gossipableIndicies[ip.NodeID] = 0 + tracker.gossipableIndices[ip.NodeID] = 0 tracker.gossipableIPs = []*ips.ClaimedIPPort{ ip, } + tracker.gossipableIDs.Add(ip.NodeID) return tracker }(), }, @@ -504,11 +645,30 @@ func TestIPTracker_OnValidatorRemoved(t *testing.T) { expectedState *ipTracker }{ { - name: "manually tracked", + name: "manually tracked not gossipable", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + require.True(t, tracker.AddIP(ip)) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyTrack(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + require.True(t, tracker.AddIP(ip)) + tracker.gossipableIDs.Remove(ip.NodeID) + return tracker + }(), + }, + { + name: "manually tracked latest gossipable", initialState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.ManuallyTrack(ip.NodeID) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) return tracker }(), @@ -516,7 +676,28 @@ func TestIPTracker_OnValidatorRemoved(t *testing.T) { expectedState: func() *ipTracker { tracker := newTestIPTracker(t) tracker.ManuallyTrack(ip.NodeID) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.Connected(ip) + delete(tracker.gossipableIndices, ip.NodeID) + tracker.gossipableIPs = tracker.gossipableIPs[:0] + tracker.gossipableIDs.Remove(ip.NodeID) + return tracker + }(), + }, + { + name: "manually gossiped", + initialState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyGossip(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.Connected(ip) + return tracker + }(), + nodeID: ip.NodeID, + expectedState: func() *ipTracker { + tracker := newTestIPTracker(t) + tracker.ManuallyGossip(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) return tracker }(), @@ -525,17 +706,18 @@ func TestIPTracker_OnValidatorRemoved(t *testing.T) { name: "not gossipable", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) require.True(t, tracker.AddIP(ip)) - delete(tracker.mostRecentValidatorIPs, ip.NodeID) - tracker.validators.Remove(ip.NodeID) + delete(tracker.mostRecentTrackedIPs, ip.NodeID) + tracker.trackedIDs.Remove(ip.NodeID) + tracker.gossipableIDs.Remove(ip.NodeID) return tracker }(), }, @@ -543,19 +725,20 @@ func TestIPTracker_OnValidatorRemoved(t *testing.T) { name: "latest gossipable", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) - delete(tracker.mostRecentValidatorIPs, ip.NodeID) - tracker.validators.Remove(ip.NodeID) - delete(tracker.gossipableIndicies, ip.NodeID) + delete(tracker.mostRecentTrackedIPs, ip.NodeID) + tracker.trackedIDs.Remove(ip.NodeID) + delete(tracker.gossipableIndices, ip.NodeID) tracker.gossipableIPs = tracker.gossipableIPs[:0] + tracker.gossipableIDs.Remove(ip.NodeID) return tracker }(), }, @@ -563,27 +746,28 @@ func TestIPTracker_OnValidatorRemoved(t *testing.T) { name: "non-latest gossipable", initialState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) tracker.Connected(otherIP) return tracker }(), nodeID: ip.NodeID, expectedState: func() *ipTracker { tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.Connected(ip) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) tracker.Connected(otherIP) - delete(tracker.mostRecentValidatorIPs, ip.NodeID) - tracker.validators.Remove(ip.NodeID) - tracker.gossipableIndicies = map[ids.NodeID]int{ + delete(tracker.mostRecentTrackedIPs, ip.NodeID) + tracker.trackedIDs.Remove(ip.NodeID) + tracker.gossipableIndices = map[ids.NodeID]int{ otherIP.NodeID: 0, } tracker.gossipableIPs = []*ips.ClaimedIPPort{ otherIP, } + tracker.gossipableIDs.Remove(ip.NodeID) return tracker }(), }, @@ -603,8 +787,8 @@ func TestIPTracker_GetGossipableIPs(t *testing.T) { tracker := newTestIPTracker(t) tracker.Connected(ip) tracker.Connected(otherIP) - tracker.onValidatorAdded(ip.NodeID) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) gossipableIPs := tracker.GetGossipableIPs(ids.EmptyNodeID, bloom.EmptyFilter, nil, 2) require.ElementsMatch([]*ips.ClaimedIPPort{ip, otherIP}, gossipableIPs) @@ -632,8 +816,8 @@ func TestIPTracker_BloomFiltersEverything(t *testing.T) { tracker := newTestIPTracker(t) tracker.Connected(ip) tracker.Connected(otherIP) - tracker.onValidatorAdded(ip.NodeID) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) bloomBytes, salt := tracker.Bloom() readFilter, err := bloom.Parse(bloomBytes) @@ -645,19 +829,40 @@ func TestIPTracker_BloomFiltersEverything(t *testing.T) { require.NoError(tracker.ResetBloom()) } -func TestIPTracker_BloomGrowsWithValidatorSet(t *testing.T) { - require := require.New(t) - - tracker := newTestIPTracker(t) - initialMaxBloomCount := tracker.maxBloomCount - for i := 0; i < 2048; i++ { - tracker.onValidatorAdded(ids.GenerateTestNodeID()) +func TestIPTracker_BloomGrows(t *testing.T) { + tests := []struct { + name string + add func(tracker *ipTracker) + }{ + { + name: "Add Validator", + add: func(tracker *ipTracker) { + tracker.OnValidatorAdded(ids.GenerateTestNodeID(), nil, ids.Empty, 0) + }, + }, + { + name: "Manually Track", + add: func(tracker *ipTracker) { + tracker.ManuallyTrack(ids.GenerateTestNodeID()) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tracker := newTestIPTracker(t) + initialMaxBloomCount := tracker.maxBloomCount + for i := 0; i < 2048; i++ { + test.add(tracker) + } + requireMetricsConsistent(t, tracker) + + require.NoError(tracker.ResetBloom()) + require.Greater(tracker.maxBloomCount, initialMaxBloomCount) + requireMetricsConsistent(t, tracker) + }) } - requireMetricsConsistent(t, tracker) - - require.NoError(tracker.ResetBloom()) - require.Greater(tracker.maxBloomCount, initialMaxBloomCount) - requireMetricsConsistent(t, tracker) } func TestIPTracker_BloomResetsDynamically(t *testing.T) { @@ -665,11 +870,11 @@ func TestIPTracker_BloomResetsDynamically(t *testing.T) { tracker := newTestIPTracker(t) tracker.Connected(ip) - tracker.onValidatorAdded(ip.NodeID) + tracker.OnValidatorAdded(ip.NodeID, nil, ids.Empty, 0) tracker.OnValidatorRemoved(ip.NodeID, 0) tracker.maxBloomCount = 1 tracker.Connected(otherIP) - tracker.onValidatorAdded(otherIP.NodeID) + tracker.OnValidatorAdded(otherIP.NodeID, nil, ids.Empty, 0) requireMetricsConsistent(t, tracker) bloomBytes, salt := tracker.Bloom() @@ -687,11 +892,11 @@ func TestIPTracker_PreventBloomFilterAddition(t *testing.T) { newestIP := newerTestIP(newerIP) tracker := newTestIPTracker(t) - tracker.onValidatorAdded(ip.NodeID) + tracker.ManuallyGossip(ip.NodeID) require.True(tracker.AddIP(ip)) require.True(tracker.AddIP(newerIP)) require.True(tracker.AddIP(newestIP)) - require.Equal(maxIPEntriesPerValidator, tracker.bloomAdditions[ip.NodeID]) + require.Equal(maxIPEntriesPerNode, tracker.bloomAdditions[ip.NodeID]) requireMetricsConsistent(t, tracker) } @@ -702,7 +907,9 @@ func TestIPTracker_ShouldVerifyIP(t *testing.T) { tracker := newTestIPTracker(t) require.False(tracker.ShouldVerifyIP(ip)) - tracker.onValidatorAdded(ip.NodeID) + tracker.ManuallyTrack(ip.NodeID) + require.True(tracker.ShouldVerifyIP(ip)) + tracker.ManuallyGossip(ip.NodeID) require.True(tracker.ShouldVerifyIP(ip)) require.True(tracker.AddIP(ip)) require.False(tracker.ShouldVerifyIP(ip)) diff --git a/network/listener_test.go b/network/listener_test.go index 5d6073c6b383..a0167e817baa 100644 --- a/network/listener_test.go +++ b/network/listener_test.go @@ -5,19 +5,18 @@ package network import ( "net" - - "github.com/ava-labs/avalanchego/utils/ips" + "net/netip" ) var _ net.Listener = (*testListener)(nil) type testListener struct { - ip ips.IPPort + ip netip.AddrPort inbound chan net.Conn closed chan struct{} } -func newTestListener(ip ips.IPPort) *testListener { +func newTestListener(ip netip.AddrPort) *testListener { return &testListener{ ip: ip, inbound: make(chan net.Conn), @@ -41,7 +40,7 @@ func (l *testListener) Close() error { func (l *testListener) Addr() net.Addr { return &net.TCPAddr{ - IP: l.ip.IP, - Port: int(l.ip.Port), + IP: l.ip.Addr().AsSlice(), + Port: int(l.ip.Port()), } } diff --git a/network/metrics.go b/network/metrics.go index e2a3a363b403..8cc5155ec102 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -12,11 +12,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" ) type metrics struct { + // trackedSubnets does not include the primary network ID + trackedSubnets set.Set[ids.ID] + numTracked prometheus.Gauge numPeers prometheus.Gauge numSubnetPeers *prometheus.GaugeVec @@ -41,107 +43,93 @@ type metrics struct { peerConnectedStartTimesSum float64 } -func newMetrics(namespace string, registerer prometheus.Registerer, initialSubnetIDs set.Set[ids.ID]) (*metrics, error) { +func newMetrics( + registerer prometheus.Registerer, + trackedSubnets set.Set[ids.ID], +) (*metrics, error) { m := &metrics{ + trackedSubnets: trackedSubnets, numPeers: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "peers", - Help: "Number of network peers", + Name: "peers", + Help: "Number of network peers", }), numTracked: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tracked", - Help: "Number of currently tracked IPs attempting to be connected to", + Name: "tracked", + Help: "Number of currently tracked IPs attempting to be connected to", }), numSubnetPeers: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "peers_subnet", - Help: "Number of peers that are validating a particular subnet", + Name: "peers_subnet", + Help: "Number of peers that are validating a particular subnet", }, []string{"subnetID"}, ), timeSinceLastMsgReceived: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_since_last_msg_received", - Help: "Time (in ns) since the last msg was received", + Name: "time_since_last_msg_received", + Help: "Time (in ns) since the last msg was received", }), timeSinceLastMsgSent: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_since_last_msg_sent", - Help: "Time (in ns) since the last msg was sent", + Name: "time_since_last_msg_sent", + Help: "Time (in ns) since the last msg was sent", }), sendFailRate: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "send_fail_rate", - Help: "Portion of messages that recently failed to be sent over the network", + Name: "send_fail_rate", + Help: "Portion of messages that recently failed to be sent over the network", }), connected: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "times_connected", - Help: "Times this node successfully completed a handshake with a peer", + Name: "times_connected", + Help: "Times this node successfully completed a handshake with a peer", }), disconnected: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "times_disconnected", - Help: "Times this node disconnected from a peer it had completed a handshake with", + Name: "times_disconnected", + Help: "Times this node disconnected from a peer it had completed a handshake with", }), acceptFailed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accept_failed", - Help: "Times this node's listener failed to accept an inbound connection", + Name: "accept_failed", + Help: "Times this node's listener failed to accept an inbound connection", }), inboundConnAllowed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "inbound_conn_throttler_allowed", - Help: "Times this node allowed (attempted to upgrade) an inbound connection", + Name: "inbound_conn_throttler_allowed", + Help: "Times this node allowed (attempted to upgrade) an inbound connection", }), tlsConnRejected: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tls_conn_rejected", - Help: "Times this node rejected a connection due to an unsupported TLS certificate", + Name: "tls_conn_rejected", + Help: "Times this node rejected a connection due to an unsupported TLS certificate", }), numUselessPeerListBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_peerlist_bytes", - Help: "Amount of useless bytes (i.e. information about nodes we already knew/don't want to connect to) received in PeerList messages", + Name: "num_useless_peerlist_bytes", + Help: "Amount of useless bytes (i.e. information about nodes we already knew/don't want to connect to) received in PeerList messages", }), inboundConnRateLimited: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "inbound_conn_throttler_rate_limited", - Help: "Times this node rejected an inbound connection due to rate-limiting", + Name: "inbound_conn_throttler_rate_limited", + Help: "Times this node rejected an inbound connection due to rate-limiting", }), nodeUptimeWeightedAverage: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_uptime_weighted_average", - Help: "This node's uptime average weighted by observing peer stakes", + Name: "node_uptime_weighted_average", + Help: "This node's uptime average weighted by observing peer stakes", }), nodeUptimeRewardingStake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_uptime_rewarding_stake", - Help: "The percentage of total stake which thinks this node is eligible for rewards", + Name: "node_uptime_rewarding_stake", + Help: "The percentage of total stake which thinks this node is eligible for rewards", }), nodeSubnetUptimeWeightedAverage: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_subnet_uptime_weighted_average", - Help: "This node's subnet uptime averages weighted by observing subnet peer stakes", + Name: "node_subnet_uptime_weighted_average", + Help: "This node's subnet uptime averages weighted by observing subnet peer stakes", }, []string{"subnetID"}, ), nodeSubnetUptimeRewardingStake: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "node_subnet_uptime_rewarding_stake", - Help: "The percentage of subnet's total stake which thinks this node is eligible for subnet's rewards", + Name: "node_subnet_uptime_rewarding_stake", + Help: "The percentage of subnet's total stake which thinks this node is eligible for subnet's rewards", }, []string{"subnetID"}, ), peerConnectedLifetimeAverage: prometheus.NewGauge( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "peer_connected_duration_average", - Help: "The average duration of all peer connections in nanoseconds", + Name: "peer_connected_duration_average", + Help: "The average duration of all peer connections in nanoseconds", }, ), peerConnectedStartTimes: make(map[ids.NodeID]float64), @@ -169,11 +157,7 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne ) // init subnet tracker metrics with tracked subnets - for subnetID := range initialSubnetIDs { - // no need to track primary network ID - if subnetID == constants.PrimaryNetworkID { - continue - } + for subnetID := range trackedSubnets { // initialize to 0 subnetIDStr := subnetID.String() m.numSubnetPeers.WithLabelValues(subnetIDStr).Set(0) @@ -189,8 +173,10 @@ func (m *metrics) markConnected(peer peer.Peer) { m.connected.Inc() trackedSubnets := peer.TrackedSubnets() - for subnetID := range trackedSubnets { - m.numSubnetPeers.WithLabelValues(subnetID.String()).Inc() + for subnetID := range m.trackedSubnets { + if trackedSubnets.Contains(subnetID) { + m.numSubnetPeers.WithLabelValues(subnetID.String()).Inc() + } } m.lock.Lock() @@ -206,8 +192,10 @@ func (m *metrics) markDisconnected(peer peer.Peer) { m.disconnected.Inc() trackedSubnets := peer.TrackedSubnets() - for subnetID := range trackedSubnets { - m.numSubnetPeers.WithLabelValues(subnetID.String()).Dec() + for subnetID := range m.trackedSubnets { + if trackedSubnets.Contains(subnetID) { + m.numSubnetPeers.WithLabelValues(subnetID.String()).Dec() + } } m.lock.Lock() diff --git a/network/network.go b/network/network.go index 5e4b3cdc4e82..2aee13a910d9 100644 --- a/network/network.go +++ b/network/network.go @@ -9,6 +9,7 @@ import ( "fmt" "math" "net" + "net/netip" "strings" "sync" "sync/atomic" @@ -25,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/subnets" @@ -47,8 +49,7 @@ const ( ) var ( - _ sender.ExternalSender = (*network)(nil) - _ Network = (*network)(nil) + _ Network = (*network)(nil) errNotValidator = errors.New("node is not a validator") errNotTracked = errors.New("subnet is not tracked") @@ -77,7 +78,7 @@ type Network interface { // Attempt to connect to this IP. The network will never stop attempting to // connect to this ID. - ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) + ManuallyTrack(nodeID ids.NodeID, ip netip.AddrPort) // PeerInfo returns information about peers. If [nodeIDs] is empty, returns // info about all peers that have finished the handshake. Otherwise, returns @@ -202,7 +203,6 @@ func NewNetwork( inboundMsgThrottler, err := throttling.NewInboundMsgThrottler( log, - config.Namespace, metricsRegisterer, config.Validators, config.ThrottlerConfig.InboundMsgThrottlerConfig, @@ -216,7 +216,6 @@ func NewNetwork( outboundMsgThrottler, err := throttling.NewSybilOutboundMsgThrottler( log, - config.Namespace, metricsRegisterer, config.Validators, config.ThrottlerConfig.OutboundMsgThrottlerConfig, @@ -225,26 +224,31 @@ func NewNetwork( return nil, fmt.Errorf("initializing outbound message throttler failed with: %w", err) } - peerMetrics, err := peer.NewMetrics(log, config.Namespace, metricsRegisterer) + peerMetrics, err := peer.NewMetrics(metricsRegisterer) if err != nil { return nil, fmt.Errorf("initializing peer metrics failed with: %w", err) } - metrics, err := newMetrics(config.Namespace, metricsRegisterer, config.TrackedSubnets) + metrics, err := newMetrics(metricsRegisterer, config.TrackedSubnets) if err != nil { return nil, fmt.Errorf("initializing network metrics failed with: %w", err) } - ipTracker, err := newIPTracker(log, config.Namespace, metricsRegisterer) + ipTracker, err := newIPTracker(log, metricsRegisterer) if err != nil { return nil, fmt.Errorf("initializing ip tracker failed with: %w", err) } - config.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, ipTracker) + config.Validators.RegisterSetCallbackListener(constants.PrimaryNetworkID, ipTracker) // Track all default bootstrappers to ensure their current IPs are gossiped // like validator IPs. for _, bootstrapper := range genesis.GetBootstrappers(config.NetworkID) { - ipTracker.ManuallyTrack(bootstrapper.ID) + ipTracker.ManuallyGossip(bootstrapper.ID) + } + // Track all recent validators to optimistically connect to them before the + // P-chain has finished syncing. + for nodeID := range genesis.GetValidators(config.NetworkID) { + ipTracker.ManuallyTrack(nodeID) } peerConfig := &peer.Config{ @@ -272,12 +276,6 @@ func NewNetwork( IPSigner: peer.NewIPSigner(config.MyIPPort, config.TLSKey, config.BLSKey), } - // Invariant: We delay the activation of durango during the TLS handshake to - // avoid gossiping any TLS certs that anyone else in the network may - // consider invalid. Recall that if a peer gossips an invalid cert, the - // connection is terminated. - durangoTime := version.GetDurangoTime(config.NetworkID) - durangoTimeWithClockSkew := durangoTime.Add(config.MaxClockDifference) onCloseCtx, cancel := context.WithCancel(context.Background()) n := &network{ config: config, @@ -288,8 +286,8 @@ func NewNetwork( inboundConnUpgradeThrottler: throttling.NewInboundConnUpgradeThrottler(log, config.ThrottlerConfig.InboundConnUpgradeThrottlerConfig), listener: listener, dialer: dialer, - serverUpgrader: peer.NewTLSServerUpgrader(config.TLSConfig, metrics.tlsConnRejected, durangoTimeWithClockSkew), - clientUpgrader: peer.NewTLSClientUpgrader(config.TLSConfig, metrics.tlsConnRejected, durangoTimeWithClockSkew), + serverUpgrader: peer.NewTLSServerUpgrader(config.TLSConfig, metrics.tlsConnRejected), + clientUpgrader: peer.NewTLSClientUpgrader(config.TLSConfig, metrics.tlsConnRejected), onCloseCtx: onCloseCtx, onCloseCtxCancel: cancel, @@ -310,25 +308,42 @@ func NewNetwork( return n, nil } -func (n *network) Send(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] { - peers := n.getPeers(nodeIDs, subnetID, allower) - n.peerConfig.Metrics.MultipleSendsFailed( - msg.Op(), - nodeIDs.Len()-len(peers), - ) - return n.send(msg, peers) -} - -func (n *network) Gossip( +func (n *network) Send( msg message.OutboundMessage, + config common.SendConfig, subnetID ids.ID, - numValidatorsToSend int, - numNonValidatorsToSend int, - numPeersToSend int, allower subnets.Allower, ) set.Set[ids.NodeID] { - peers := n.samplePeers(subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) - return n.send(msg, peers) + namedPeers := n.getPeers(config.NodeIDs, subnetID, allower) + n.peerConfig.Metrics.MultipleSendsFailed( + msg.Op(), + config.NodeIDs.Len()-len(namedPeers), + ) + + var ( + sampledPeers = n.samplePeers(config, subnetID, allower) + sentTo = set.NewSet[ids.NodeID](len(namedPeers) + len(sampledPeers)) + now = n.peerConfig.Clock.Time() + ) + + // send to peers and update metrics + // + // Note: It is guaranteed that namedPeers and sampledPeers are disjoint. + for _, peers := range [][]peer.Peer{namedPeers, sampledPeers} { + for _, peer := range peers { + if peer.Send(n.onCloseCtx, msg) { + sentTo.Add(peer.ID()) + + // TODO: move send fail rate calculations into the peer metrics + // record metrics for success + n.sendFailRateCalculator.Observe(0, now) + } else { + // record metrics for failure + n.sendFailRateCalculator.Observe(1, now) + } + } + } + return sentTo } // HealthCheck returns information about several network layer health checks. @@ -434,7 +449,7 @@ func (n *network) Connected(nodeID ids.NodeID) { peerIP := peer.IP() newIP := ips.NewClaimedIPPort( peer.Cert(), - peerIP.IPPort, + peerIP.AddrPort, peerIP.Timestamp, peerIP.TLSSignature, ) @@ -444,8 +459,12 @@ func (n *network) Connected(nodeID ids.NodeID) { peerVersion := peer.Version() n.router.Connected(nodeID, peerVersion, constants.PrimaryNetworkID) - for subnetID := range peer.TrackedSubnets() { - n.router.Connected(nodeID, peerVersion, subnetID) + + trackedSubnets := peer.TrackedSubnets() + for subnetID := range n.peerConfig.MySubnets { + if trackedSubnets.Contains(subnetID) { + n.router.Connected(nodeID, peerVersion, subnetID) + } } } @@ -530,7 +549,7 @@ func (n *network) Dispatch() error { // call this function inside the go-routine, rather than the main // accept loop. remoteAddr := conn.RemoteAddr().String() - ip, err := ips.ToIPPort(remoteAddr) + ip, err := ips.ParseAddrPort(remoteAddr) if err != nil { n.peerConfig.Log.Error("failed to parse remote address", zap.String("peerIP", remoteAddr), @@ -579,7 +598,7 @@ func (n *network) Dispatch() error { return errs.Err } -func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { +func (n *network) ManuallyTrack(nodeID ids.NodeID, ip netip.AddrPort) { n.ipTracker.ManuallyTrack(nodeID) n.peersLock.Lock() @@ -619,7 +638,7 @@ func (n *network) track(ip *ips.ClaimedIPPort) error { // lock. signedIP := peer.SignedIP{ UnsignedIP: peer.UnsignedIP{ - IPPort: ip.IPPort, + AddrPort: ip.AddrPort, Timestamp: ip.Timestamp, }, TLSSignature: ip.Signature, @@ -645,9 +664,9 @@ func (n *network) track(ip *ips.ClaimedIPPort) error { tracked, isTracked := n.trackedIPs[ip.NodeID] if isTracked { // Stop tracking the old IP and start tracking the new one. - tracked = tracked.trackNewIP(ip.IPPort) + tracked = tracked.trackNewIP(ip.AddrPort) } else { - tracked = newTrackedIP(ip.IPPort) + tracked = newTrackedIP(ip.AddrPort) } n.trackedIPs[ip.NodeID] = tracked n.dial(ip.NodeID, tracked) @@ -678,8 +697,7 @@ func (n *network) getPeers( continue } - trackedSubnets := peer.TrackedSubnets() - if subnetID != constants.PrimaryNetworkID && !trackedSubnets.Contains(subnetID) { + if trackedSubnets := peer.TrackedSubnets(); !trackedSubnets.Contains(subnetID) { continue } @@ -695,41 +713,45 @@ func (n *network) getPeers( return peers } +// samplePeers samples connected peers attempting to align with the number of +// requested validators, non-validators, and peers. This function will +// explicitly ignore nodeIDs already included in the send config. func (n *network) samplePeers( + config common.SendConfig, subnetID ids.ID, - numValidatorsToSample, - numNonValidatorsToSample int, - numPeersToSample int, allower subnets.Allower, ) []peer.Peer { - // If there are fewer validators than [numValidatorsToSample], then only - // sample [numValidatorsToSample] validators. - subnetValidatorsLen := n.config.Validators.Count(subnetID) - if subnetValidatorsLen < numValidatorsToSample { - numValidatorsToSample = subnetValidatorsLen - } + // As an optimization, if there are fewer validators than + // [numValidatorsToSample], only attempt to sample [numValidatorsToSample] + // validators to potentially avoid iterating over the entire peer set. + numValidatorsToSample := min(config.Validators, n.config.Validators.Count(subnetID)) n.peersLock.RLock() defer n.peersLock.RUnlock() return n.connectedPeers.Sample( - numValidatorsToSample+numNonValidatorsToSample+numPeersToSample, + numValidatorsToSample+config.NonValidators+config.Peers, func(p peer.Peer) bool { // Only return peers that are tracking [subnetID] - trackedSubnets := p.TrackedSubnets() - if subnetID != constants.PrimaryNetworkID && !trackedSubnets.Contains(subnetID) { + if trackedSubnets := p.TrackedSubnets(); !trackedSubnets.Contains(subnetID) { return false } peerID := p.ID() + // if the peer was already explicitly included, don't include in the + // sample + if config.NodeIDs.Contains(peerID) { + return false + } + _, isValidator := n.config.Validators.GetValidator(subnetID, peerID) // check if the peer is allowed to connect to the subnet if !allower.IsAllowed(peerID, isValidator) { return false } - if numPeersToSample > 0 { - numPeersToSample-- + if config.Peers > 0 { + config.Peers-- return true } @@ -738,37 +760,12 @@ func (n *network) samplePeers( return numValidatorsToSample >= 0 } - numNonValidatorsToSample-- - return numNonValidatorsToSample >= 0 + config.NonValidators-- + return config.NonValidators >= 0 }, ) } -// send the message to the provided peers. -// -// send takes ownership of the provided message reference. So, the provided -// message should only be inspected if the reference has been externally -// increased. -func (n *network) send(msg message.OutboundMessage, peers []peer.Peer) set.Set[ids.NodeID] { - sentTo := set.NewSet[ids.NodeID](len(peers)) - now := n.peerConfig.Clock.Time() - - // send to peer and update metrics - for _, peer := range peers { - if peer.Send(n.onCloseCtx, msg) { - sentTo.Add(peer.ID()) - - // TODO: move send fail rate calculations into the peer metrics - // record metrics for success - n.sendFailRateCalculator.Observe(0, now) - } else { - // record metrics for failure - n.sendFailRateCalculator.Observe(1, now) - } - } - return sentTo -} - func (n *network) disconnectedFromConnecting(nodeID ids.NodeID) { n.peersLock.Lock() defer n.peersLock.Unlock() @@ -802,7 +799,7 @@ func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { // The peer that is disconnecting from us finished the handshake if ip, wantsConnection := n.ipTracker.GetIP(nodeID); wantsConnection { - tracked := newTrackedIP(ip.IPPort) + tracked := newTrackedIP(ip.AddrPort) n.trackedIPs[nodeID] = tracked n.dial(nodeID, tracked) } @@ -902,7 +899,7 @@ func (n *network) dial(nodeID ids.NodeID, ip *trackedIP) { // nodeID leaves the validator set. This is why we continue the loop // rather than returning even though we will never initiate an // outbound connection with this IP. - if !n.config.AllowPrivateIPs && ip.ip.IP.IsPrivate() { + if !n.config.AllowPrivateIPs && !ips.IsPublic(ip.ip.Addr()) { n.peerConfig.Log.Verbo("skipping connection dial", zap.String("reason", "outbound connections to private IPs are prohibited"), zap.Stringer("nodeID", nodeID), @@ -1157,12 +1154,10 @@ func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { } func (n *network) runTimers() { - pushGossipPeerlists := time.NewTicker(n.config.PeerListGossipFreq) pullGossipPeerlists := time.NewTicker(n.config.PeerListPullGossipFreq) resetPeerListBloom := time.NewTicker(n.config.PeerListBloomResetFreq) updateUptimes := time.NewTicker(n.config.UptimeMetricFreq) defer func() { - pushGossipPeerlists.Stop() resetPeerListBloom.Stop() updateUptimes.Stop() }() @@ -1171,8 +1166,6 @@ func (n *network) runTimers() { select { case <-n.onCloseCtx.Done(): return - case <-pushGossipPeerlists.C: - n.pushGossipPeerLists() case <-pullGossipPeerlists.C: n.pullGossipPeerLists() case <-resetPeerListBloom.C: @@ -1209,28 +1202,13 @@ func (n *network) runTimers() { } } -// pushGossipPeerLists gossips validators to peers in the network -func (n *network) pushGossipPeerLists() { - peers := n.samplePeers( - constants.PrimaryNetworkID, - int(n.config.PeerListValidatorGossipSize), - int(n.config.PeerListNonValidatorGossipSize), - int(n.config.PeerListPeersGossipSize), - subnets.NoOpAllower, - ) - - for _, p := range peers { - p.StartSendPeerList() - } -} - // pullGossipPeerLists requests validators from peers in the network func (n *network) pullGossipPeerLists() { peers := n.samplePeers( + common.SendConfig{ + Validators: 1, + }, constants.PrimaryNetworkID, - 1, // numValidatorsToSample - 0, // numNonValidatorsToSample - 0, // numPeersToSample subnets.NoOpAllower, ) diff --git a/network/network_test.go b/network/network_test.go index 0f95d722b654..85390da90dff 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -6,8 +6,7 @@ package network import ( "context" "crypto" - "crypto/rsa" - "net" + "net/netip" "sync" "testing" "time" @@ -20,13 +19,14 @@ import ( "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/ips" @@ -49,13 +49,9 @@ var ( SendFailRateHalflife: time.Second, } defaultPeerListGossipConfig = PeerListGossipConfig{ - PeerListNumValidatorIPs: 100, - PeerListValidatorGossipSize: 100, - PeerListNonValidatorGossipSize: 100, - PeerListPeersGossipSize: 100, - PeerListGossipFreq: time.Second, - PeerListPullGossipFreq: time.Second, - PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, + PeerListNumValidatorIPs: 100, + PeerListPullGossipFreq: time.Second, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, } defaultTimeoutConfig = TimeoutConfig{ PingPongTimeout: 30 * time.Second, @@ -109,7 +105,6 @@ var ( DialerConfig: defaultDialerConfig, - Namespace: "", NetworkID: 49463, MaxClockDifference: time.Minute, PingFrequency: constants.DefaultPingFrequency, @@ -170,15 +165,21 @@ func newTestNetwork(t *testing.T, count int) (*testDialer, []*testListener, []id ) for i := 0; i < count; i++ { ip, listener := dialer.NewListener() - nodeID, tlsCert, tlsConfig := getTLS(t, i) + + tlsCert, err := staking.NewTLSCert() + require.NoError(t, err) + + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(t, err) + nodeID := ids.NodeIDFromCert(cert) blsKey, err := bls.NewSecretKey() require.NoError(t, err) config := defaultConfig - config.TLSConfig = tlsConfig + config.TLSConfig = peer.TLSConfig(*tlsCert, nil) config.MyNodeID = nodeID - config.MyIPPort = ip + config.MyIPPort = utils.NewAtomic(ip) config.TLSKey = tlsCert.PrivateKey.(crypto.Signer) config.BLSKey = blsKey @@ -195,7 +196,6 @@ func newMessageCreator(t *testing.T) message.Creator { mc, err := message.NewCreator( logging.NoLog{}, prometheus.NewRegistry(), - "", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -280,7 +280,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler for i, net := range networks { if i != 0 { config := configs[0] - net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.Get()) } go func(net Network) { @@ -327,11 +327,18 @@ func TestSend(t *testing.T) { net0 := networks[0] mc := newMessageCreator(t) - outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(err) toSend := set.Of(nodeIDs[1]) - sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, subnets.NoOpAllower) + sentTo := net0.Send( + outboundGetMsg, + common.SendConfig{ + NodeIDs: toSend, + }, + constants.PrimaryNetworkID, + subnets.NoOpAllower, + ) require.Equal(toSend, sentTo) inboundGetMsg := <-received @@ -343,7 +350,7 @@ func TestSend(t *testing.T) { wg.Wait() } -func TestSendAndGossipWithFilter(t *testing.T) { +func TestSendWithFilter(t *testing.T) { require := require.New(t) received := make(chan message.InboundMessage) @@ -365,26 +372,25 @@ func TestSendAndGossipWithFilter(t *testing.T) { net0 := networks[0] mc := newMessageCreator(t) - outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(err) toSend := set.Of(nodeIDs...) validNodeID := nodeIDs[1] - sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, newNodeIDConnector(validNodeID)) + sentTo := net0.Send( + outboundGetMsg, + common.SendConfig{ + NodeIDs: toSend, + }, + constants.PrimaryNetworkID, + newNodeIDConnector(validNodeID), + ) require.Len(sentTo, 1) require.Contains(sentTo, validNodeID) inboundGetMsg := <-received require.Equal(message.GetOp, inboundGetMsg.Op()) - // Test Gossip now - sentTo = net0.Gossip(outboundGetMsg, constants.PrimaryNetworkID, 0, 0, len(nodeIDs), newNodeIDConnector(validNodeID)) - require.Len(sentTo, 1) - require.Contains(sentTo, validNodeID) - - inboundGetMsg = <-received - require.Equal(message.GetOp, inboundGetMsg.Op()) - for _, net := range networks { net.StartClose() } @@ -397,22 +403,32 @@ func TestTrackVerifiesSignatures(t *testing.T) { _, networks, wg := newFullyConnectedTestNetwork(t, []router.InboundHandler{nil}) network := networks[0] - nodeID, tlsCert, _ := getTLS(t, 1) + + tlsCert, err := staking.NewTLSCert() + require.NoError(err) + + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) + nodeID := ids.NodeIDFromCert(cert) + require.NoError(network.config.Validators.AddStaker(constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1)) - err := network.Track([]*ips.ClaimedIPPort{ + stakingCert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) + + err = network.Track([]*ips.ClaimedIPPort{ ips.NewClaimedIPPort( - staking.CertificateFromX509(tlsCert.Leaf), - ips.IPPort{ - IP: net.IPv4(123, 132, 123, 123), - Port: 10000, - }, + stakingCert, + netip.AddrPortFrom( + netip.AddrFrom4([4]byte{123, 132, 123, 123}), + 10000, + ), 1000, // timestamp nil, // signature ), }) // The signature is wrong so this peer tracking info isn't useful. - require.ErrorIs(err, rsa.ErrVerification) + require.ErrorIs(err, staking.ErrECDSAVerificationFailure) network.peersLock.RLock() require.Empty(network.trackedIPs) @@ -472,7 +488,7 @@ func TestTrackDoesNotDialPrivateIPs(t *testing.T) { for i, net := range networks { if i != 0 { config := configs[0] - net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.Get()) } go func(net Network) { @@ -555,15 +571,17 @@ func TestDialDeletesNonValidators(t *testing.T) { wg.Add(len(networks)) for i, net := range networks { if i != 0 { - err := net.Track([]*ips.ClaimedIPPort{ + stakingCert, err := staking.ParseCertificate(config.TLSConfig.Certificates[0].Leaf.Raw) + require.NoError(err) + + require.NoError(net.Track([]*ips.ClaimedIPPort{ ips.NewClaimedIPPort( - staking.CertificateFromX509(config.TLSConfig.Certificates[0].Leaf), - ip.IPPort, + stakingCert, + ip.AddrPort, ip.Timestamp, ip.TLSSignature, ), - }) - require.NoError(err) + })) } go func(net Network) { @@ -611,23 +629,23 @@ func TestDialContext(t *testing.T) { neverDialedNodeID = ids.GenerateTestNodeID() dialedNodeID = ids.GenerateTestNodeID() - dynamicNeverDialedIP, neverDialedListener = dialer.NewListener() - dynamicDialedIP, dialedListener = dialer.NewListener() + neverDialedIP, neverDialedListener = dialer.NewListener() + dialedIP, dialedListener = dialer.NewListener() - neverDialedIP = &trackedIP{ - ip: dynamicNeverDialedIP.IPPort(), + neverDialedTrackedIP = &trackedIP{ + ip: neverDialedIP, } - dialedIP = &trackedIP{ - ip: dynamicDialedIP.IPPort(), + dialedTrackedIP = &trackedIP{ + ip: dialedIP, } ) - network.ManuallyTrack(neverDialedNodeID, neverDialedIP.ip) - network.ManuallyTrack(dialedNodeID, dialedIP.ip) + network.ManuallyTrack(neverDialedNodeID, neverDialedIP) + network.ManuallyTrack(dialedNodeID, dialedIP) // Sanity check that when a non-cancelled context is given, // we actually dial the peer. - network.dial(dialedNodeID, dialedIP) + network.dial(dialedNodeID, dialedTrackedIP) gotDialedIPConn := make(chan struct{}) go func() { @@ -639,7 +657,7 @@ func TestDialContext(t *testing.T) { // Asset that when [n.onCloseCtx] is cancelled, dial returns immediately. // That is, [neverDialedListener] doesn't accept a connection. network.onCloseCtxCancel() - network.dial(neverDialedNodeID, neverDialedIP) + network.dial(neverDialedNodeID, neverDialedTrackedIP) gotNeverDialedIPConn := make(chan struct{}) go func() { @@ -701,7 +719,7 @@ func TestAllowConnectionAsAValidator(t *testing.T) { for i, net := range networks { if i != 0 { config := configs[0] - net.ManuallyTrack(config.MyNodeID, config.MyIPPort.IPPort()) + net.ManuallyTrack(config.MyNodeID, config.MyIPPort.Get()) } go func(net Network) { diff --git a/network/p2p/client.go b/network/p2p/client.go index b506baf9c630..18556bfad1fa 100644 --- a/network/p2p/client.go +++ b/network/p2p/client.go @@ -8,7 +8,10 @@ import ( "errors" "fmt" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/set" ) @@ -72,6 +75,14 @@ func (c *Client) AppRequest( appRequestBytes []byte, onResponse AppResponseCallback, ) error { + // Cancellation is removed from this context to avoid erroring unexpectedly. + // SendAppRequest should be non-blocking and any error other than context + // cancellation is unexpected. + // + // This guarantees that the router should never receive an unexpected + // AppResponse. + ctxWithoutCancel := context.WithoutCancel(ctx) + c.router.lock.Lock() defer c.router.lock.Unlock() @@ -87,11 +98,17 @@ func (c *Client) AppRequest( } if err := c.sender.SendAppRequest( - ctx, + ctxWithoutCancel, set.Of(nodeID), requestID, appRequestBytes, ); err != nil { + c.router.log.Error("unexpected error when sending message", + zap.Stringer("op", message.AppRequestOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) return err } @@ -108,23 +125,17 @@ func (c *Client) AppRequest( // AppGossip sends a gossip message to a random set of peers. func (c *Client) AppGossip( ctx context.Context, + config common.SendConfig, appGossipBytes []byte, ) error { - return c.sender.SendAppGossip( - ctx, - PrefixMessage(c.handlerPrefix, appGossipBytes), - ) -} + // Cancellation is removed from this context to avoid erroring unexpectedly. + // SendAppGossip should be non-blocking and any error other than context + // cancellation is unexpected. + ctxWithoutCancel := context.WithoutCancel(ctx) -// AppGossipSpecific sends a gossip message to a predetermined set of peers. -func (c *Client) AppGossipSpecific( - ctx context.Context, - nodeIDs set.Set[ids.NodeID], - appGossipBytes []byte, -) error { - return c.sender.SendAppGossipSpecific( - ctx, - nodeIDs, + return c.sender.SendAppGossip( + ctxWithoutCancel, + config, PrefixMessage(c.handlerPrefix, appGossipBytes), ) } @@ -137,6 +148,14 @@ func (c *Client) CrossChainAppRequest( appRequestBytes []byte, onResponse CrossChainAppResponseCallback, ) error { + // Cancellation is removed from this context to avoid erroring unexpectedly. + // SendCrossChainAppRequest should be non-blocking and any error other than + // context cancellation is unexpected. + // + // This guarantees that the router should never receive an unexpected + // CrossChainAppResponse. + ctxWithoutCancel := context.WithoutCancel(ctx) + c.router.lock.Lock() defer c.router.lock.Unlock() @@ -150,11 +169,17 @@ func (c *Client) CrossChainAppRequest( } if err := c.sender.SendCrossChainAppRequest( - ctx, + ctxWithoutCancel, chainID, requestID, PrefixMessage(c.handlerPrefix, appRequestBytes), ); err != nil { + c.router.log.Error("unexpected error when sending message", + zap.Stringer("op", message.CrossChainAppRequestOp), + zap.Stringer("chainID", chainID), + zap.Uint32("requestID", requestID), + zap.Error(err), + ) return err } diff --git a/network/p2p/gossip/gossip.go b/network/p2p/gossip/gossip.go index 3b910216d87c..918f19ca5ba0 100644 --- a/network/p2p/gossip/gossip.go +++ b/network/p2p/gossip/gossip.go @@ -13,36 +13,73 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) const ( - typeLabel = "type" - pushType = "push" - pullType = "pull" + ioLabel = "io" + sentIO = "sent" + receivedIO = "received" + + typeLabel = "type" + pushType = "push" + pullType = "pull" + unsentType = "unsent" + sentType = "sent" + + defaultGossipableCount = 64 ) var ( _ Gossiper = (*ValidatorGossiper)(nil) _ Gossiper = (*PullGossiper[*testTx])(nil) _ Gossiper = (*NoOpGossiper)(nil) - _ Gossiper = (*TestGossiper)(nil) - _ Accumulator[*testTx] = (*PushGossiper[*testTx])(nil) - _ Accumulator[*testTx] = (*NoOpAccumulator[*testTx])(nil) - _ Accumulator[*testTx] = (*TestAccumulator[*testTx])(nil) + _ Set[*testTx] = (*EmptySet[*testTx])(nil) + _ Set[*testTx] = (*FullSet[*testTx])(nil) - metricLabels = []string{typeLabel} - pushLabels = prometheus.Labels{ + ioTypeLabels = []string{ioLabel, typeLabel} + sentPushLabels = prometheus.Labels{ + ioLabel: sentIO, + typeLabel: pushType, + } + receivedPushLabels = prometheus.Labels{ + ioLabel: receivedIO, typeLabel: pushType, } - pullLabels = prometheus.Labels{ + sentPullLabels = prometheus.Labels{ + ioLabel: sentIO, typeLabel: pullType, } + receivedPullLabels = prometheus.Labels{ + ioLabel: receivedIO, + typeLabel: pullType, + } + typeLabels = []string{typeLabel} + unsentLabels = prometheus.Labels{ + typeLabel: unsentType, + } + sentLabels = prometheus.Labels{ + typeLabel: sentType, + } + + ErrInvalidNumValidators = errors.New("num validators cannot be negative") + ErrInvalidNumNonValidators = errors.New("num non-validators cannot be negative") + ErrInvalidNumPeers = errors.New("num peers cannot be negative") + ErrInvalidNumToGossip = errors.New("must gossip to at least one peer") + ErrInvalidDiscardedSize = errors.New("discarded size cannot be negative") + ErrInvalidTargetGossipSize = errors.New("target gossip size cannot be negative") + ErrInvalidRegossipFrequency = errors.New("re-gossip frequency cannot be negative") + + errEmptySetCantAdd = errors.New("empty set can not add") ) // Gossiper gossips Gossipables to other nodes @@ -51,13 +88,6 @@ type Gossiper interface { Gossip(ctx context.Context) error } -// Accumulator allows a caller to accumulate gossipables to be gossiped -type Accumulator[T Gossipable] interface { - Gossiper - // Add queues gossipables to be gossiped - Add(gossipables ...T) -} - // ValidatorGossiper only calls [Gossip] if the given node is a validator type ValidatorGossiper struct { Gossiper @@ -69,10 +99,11 @@ type ValidatorGossiper struct { // Metrics that are tracked across a gossip protocol. A given protocol should // only use a single instance of Metrics. type Metrics struct { - sentCount *prometheus.CounterVec - sentBytes *prometheus.CounterVec - receivedCount *prometheus.CounterVec - receivedBytes *prometheus.CounterVec + count *prometheus.CounterVec + bytes *prometheus.CounterVec + tracking *prometheus.GaugeVec + trackingLifetimeAverage prometheus.Gauge + topValidators *prometheus.GaugeVec } // NewMetrics returns a common set of metrics @@ -81,36 +112,70 @@ func NewMetrics( namespace string, ) (Metrics, error) { m := Metrics{ - sentCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "gossip_sent_count", - Help: "amount of gossip sent (n)", - }, metricLabels), - sentBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "gossip_sent_bytes", - Help: "amount of gossip sent (bytes)", - }, metricLabels), - receivedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ + count: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_count", + Help: "amount of gossip (n)", + }, + ioTypeLabels, + ), + bytes: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "gossip_bytes", + Help: "amount of gossip (bytes)", + }, + ioTypeLabels, + ), + tracking: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "gossip_tracking", + Help: "number of gossipables being tracked", + }, + typeLabels, + ), + trackingLifetimeAverage: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, - Name: "gossip_received_count", - Help: "amount of gossip received (n)", - }, metricLabels), - receivedBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "gossip_received_bytes", - Help: "amount of gossip received (bytes)", - }, metricLabels), + Name: "gossip_tracking_lifetime_average", + Help: "average duration a gossipable has been tracked (ns)", + }), + topValidators: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "top_validators", + Help: "number of validators gossipables are sent to due to stake", + }, + typeLabels, + ), } err := utils.Err( - metrics.Register(m.sentCount), - metrics.Register(m.sentBytes), - metrics.Register(m.receivedCount), - metrics.Register(m.receivedBytes), + metrics.Register(m.count), + metrics.Register(m.bytes), + metrics.Register(m.tracking), + metrics.Register(m.trackingLifetimeAverage), + metrics.Register(m.topValidators), ) return m, err } +func (m *Metrics) observeMessage(labels prometheus.Labels, count int, bytes int) error { + countMetric, err := m.count.GetMetricWith(labels) + if err != nil { + return fmt.Errorf("failed to get count metric: %w", err) + } + + bytesMetric, err := m.bytes.GetMetricWith(labels) + if err != nil { + return fmt.Errorf("failed to get bytes metric: %w", err) + } + + countMetric.Add(float64(count)) + bytesMetric.Add(float64(bytes)) + return nil +} + func (v ValidatorGossiper) Gossip(ctx context.Context) error { if !v.Validators.Has(ctx, v.NodeID) { return nil @@ -197,118 +262,314 @@ func (p *PullGossiper[_]) handleResponse( continue } - hash := gossipable.GossipID() + gossipID := gossipable.GossipID() p.log.Debug( "received gossip", zap.Stringer("nodeID", nodeID), - zap.Stringer("id", hash), + zap.Stringer("id", gossipID), ) if err := p.set.Add(gossipable); err != nil { p.log.Debug( "failed to add gossip to the known set", zap.Stringer("nodeID", nodeID), - zap.Stringer("id", hash), + zap.Stringer("id", gossipID), zap.Error(err), ) continue } } - receivedCountMetric, err := p.metrics.receivedCount.GetMetricWith(pullLabels) - if err != nil { - p.log.Error("failed to get received count metric", zap.Error(err)) - return - } - - receivedBytesMetric, err := p.metrics.receivedBytes.GetMetricWith(pullLabels) - if err != nil { - p.log.Error("failed to get received bytes metric", zap.Error(err)) - return + if err := p.metrics.observeMessage(receivedPullLabels, len(gossip), receivedBytes); err != nil { + p.log.Error("failed to update metrics", + zap.Error(err), + ) } - - receivedCountMetric.Add(float64(len(gossip))) - receivedBytesMetric.Add(float64(receivedBytes)) } // NewPushGossiper returns an instance of PushGossiper -func NewPushGossiper[T Gossipable](marshaller Marshaller[T], client *p2p.Client, metrics Metrics, targetGossipSize int) *PushGossiper[T] { - return &PushGossiper[T]{ - marshaller: marshaller, - client: client, - metrics: metrics, - targetGossipSize: targetGossipSize, - pending: buffer.NewUnboundedDeque[T](0), +func NewPushGossiper[T Gossipable]( + marshaller Marshaller[T], + mempool Set[T], + validators p2p.ValidatorSubset, + client *p2p.Client, + metrics Metrics, + gossipParams BranchingFactor, + regossipParams BranchingFactor, + discardedSize int, + targetGossipSize int, + maxRegossipFrequency time.Duration, +) (*PushGossiper[T], error) { + if err := gossipParams.Verify(); err != nil { + return nil, fmt.Errorf("invalid gossip params: %w", err) + } + if err := regossipParams.Verify(); err != nil { + return nil, fmt.Errorf("invalid regossip params: %w", err) + } + switch { + case discardedSize < 0: + return nil, ErrInvalidDiscardedSize + case targetGossipSize < 0: + return nil, ErrInvalidTargetGossipSize + case maxRegossipFrequency < 0: + return nil, ErrInvalidRegossipFrequency } + + return &PushGossiper[T]{ + marshaller: marshaller, + set: mempool, + validators: validators, + client: client, + metrics: metrics, + gossipParams: gossipParams, + regossipParams: regossipParams, + targetGossipSize: targetGossipSize, + maxRegossipFrequency: maxRegossipFrequency, + + tracking: make(map[ids.ID]*tracking), + toGossip: buffer.NewUnboundedDeque[T](0), + toRegossip: buffer.NewUnboundedDeque[T](0), + discarded: &cache.LRU[ids.ID, struct{}]{Size: discardedSize}, + }, nil } // PushGossiper broadcasts gossip to peers randomly in the network type PushGossiper[T Gossipable] struct { - marshaller Marshaller[T] - client *p2p.Client - metrics Metrics - targetGossipSize int + marshaller Marshaller[T] + set Set[T] + validators p2p.ValidatorSubset + client *p2p.Client + metrics Metrics + + gossipParams BranchingFactor + regossipParams BranchingFactor + targetGossipSize int + maxRegossipFrequency time.Duration + + lock sync.Mutex + tracking map[ids.ID]*tracking + addedTimeSum float64 // unix nanoseconds + toGossip buffer.Deque[T] + toRegossip buffer.Deque[T] + discarded *cache.LRU[ids.ID, struct{}] // discarded attempts to avoid overgossiping transactions that are frequently dropped +} + +type BranchingFactor struct { + // StakePercentage determines the percentage of stake that should have + // gossip sent to based on the inverse CDF of stake weights. This value does + // not account for the connectivity of the nodes. + StakePercentage float64 + // Validators specifies the number of connected validators, in addition to + // any validators sent from the StakePercentage parameter, to send gossip + // to. These validators are sampled uniformly rather than by stake. + Validators int + // NonValidators specifies the number of connected non-validators to send + // gossip to. + NonValidators int + // Peers specifies the number of connected validators or non-validators, in + // addition to the number sent due to other configs, to send gossip to. + Peers int +} - lock sync.Mutex - pending buffer.Deque[T] +func (b *BranchingFactor) Verify() error { + switch { + case b.Validators < 0: + return ErrInvalidNumValidators + case b.NonValidators < 0: + return ErrInvalidNumNonValidators + case b.Peers < 0: + return ErrInvalidNumPeers + case max(b.Validators, b.NonValidators, b.Peers) == 0: + return ErrInvalidNumToGossip + default: + return nil + } +} + +type tracking struct { + addedTime float64 // unix nanoseconds + lastGossiped time.Time } -// Gossip flushes any queued gossipables +// Gossip flushes any queued gossipables. func (p *PushGossiper[T]) Gossip(ctx context.Context) error { + var ( + now = time.Now() + nowUnixNano = float64(now.UnixNano()) + ) + p.lock.Lock() - defer p.lock.Unlock() + defer func() { + p.updateMetrics(nowUnixNano) + p.lock.Unlock() + }() - if p.pending.Len() == 0 { + if len(p.tracking) == 0 { return nil } - sentBytes := 0 - gossip := make([][]byte, 0, p.pending.Len()) + if err := p.gossip( + ctx, + now, + p.gossipParams, + p.toGossip, + p.toRegossip, + &cache.Empty[ids.ID, struct{}]{}, // Don't mark dropped unsent transactions as discarded + unsentLabels, + ); err != nil { + return fmt.Errorf("unexpected error during gossip: %w", err) + } + + if err := p.gossip( + ctx, + now, + p.regossipParams, + p.toRegossip, + p.toRegossip, + p.discarded, // Mark dropped sent transactions as discarded + sentLabels, + ); err != nil { + return fmt.Errorf("unexpected error during regossip: %w", err) + } + return nil +} + +func (p *PushGossiper[T]) gossip( + ctx context.Context, + now time.Time, + gossipParams BranchingFactor, + toGossip buffer.Deque[T], + toRegossip buffer.Deque[T], + discarded cache.Cacher[ids.ID, struct{}], + metricsLabels prometheus.Labels, +) error { + var ( + sentBytes = 0 + gossip = make([][]byte, 0, defaultGossipableCount) + maxLastGossipTimeToRegossip = now.Add(-p.maxRegossipFrequency) + ) + for sentBytes < p.targetGossipSize { - gossipable, ok := p.pending.PeekLeft() + gossipable, ok := toGossip.PopLeft() if !ok { break } + // Ensure item is still in the set before we gossip. + gossipID := gossipable.GossipID() + tracking := p.tracking[gossipID] + if !p.set.Has(gossipID) { + delete(p.tracking, gossipID) + p.addedTimeSum -= tracking.addedTime + discarded.Put(gossipID, struct{}{}) // Cache that the item was dropped + continue + } + + // Ensure we don't attempt to send a gossipable too frequently. + if maxLastGossipTimeToRegossip.Before(tracking.lastGossiped) { + // Put the gossipable on the front of the queue to keep items sorted + // by last issuance time. + toGossip.PushLeft(gossipable) + break + } + bytes, err := p.marshaller.MarshalGossip(gossipable) if err != nil { - // remove this item so we don't get stuck in a loop - _, _ = p.pending.PopLeft() + delete(p.tracking, gossipID) + p.addedTimeSum -= tracking.addedTime return err } gossip = append(gossip, bytes) sentBytes += len(bytes) - p.pending.PopLeft() + toRegossip.PushRight(gossipable) + tracking.lastGossiped = now + } + + // If there is nothing to gossip, we can exit early. + if len(gossip) == 0 { + return nil } + // Send gossipables to peers msgBytes, err := MarshalAppGossip(gossip) if err != nil { return err } - sentCountMetric, err := p.metrics.sentCount.GetMetricWith(pushLabels) - if err != nil { - return fmt.Errorf("failed to get sent count metric: %w", err) + if err := p.metrics.observeMessage(sentPushLabels, len(gossip), sentBytes); err != nil { + return err } - sentBytesMetric, err := p.metrics.sentBytes.GetMetricWith(pushLabels) + topValidatorsMetric, err := p.metrics.topValidators.GetMetricWith(metricsLabels) if err != nil { - return fmt.Errorf("failed to get sent bytes metric: %w", err) + return fmt.Errorf("failed to get top validators metric: %w", err) } - sentCountMetric.Add(float64(len(gossip))) - sentBytesMetric.Add(float64(sentBytes)) - - return p.client.AppGossip(ctx, msgBytes) + validatorsByStake := p.validators.Top(ctx, gossipParams.StakePercentage) + topValidatorsMetric.Set(float64(len(validatorsByStake))) + + return p.client.AppGossip( + ctx, + common.SendConfig{ + NodeIDs: set.Of(validatorsByStake...), + Validators: gossipParams.Validators, + NonValidators: gossipParams.NonValidators, + Peers: gossipParams.Peers, + }, + msgBytes, + ) } +// Add enqueues new gossipables to be pushed. If a gossiable is already tracked, +// it is not added again. func (p *PushGossiper[T]) Add(gossipables ...T) { + var ( + now = time.Now() + nowUnixNano = float64(now.UnixNano()) + ) + p.lock.Lock() - defer p.lock.Unlock() + defer func() { + p.updateMetrics(nowUnixNano) + p.lock.Unlock() + }() + // Add new gossipables to be sent. for _, gossipable := range gossipables { - p.pending.PushRight(gossipable) + gossipID := gossipable.GossipID() + if _, ok := p.tracking[gossipID]; ok { + continue + } + + tracking := &tracking{ + addedTime: nowUnixNano, + } + if _, ok := p.discarded.Get(gossipID); ok { + // Pretend that recently discarded transactions were just gossiped. + tracking.lastGossiped = now + p.toRegossip.PushRight(gossipable) + } else { + p.toGossip.PushRight(gossipable) + } + p.tracking[gossipID] = tracking + p.addedTimeSum += nowUnixNano + } +} + +func (p *PushGossiper[_]) updateMetrics(nowUnixNano float64) { + var ( + numUnsent = float64(p.toGossip.Len()) + numSent = float64(p.toRegossip.Len()) + numTracking = numUnsent + numSent + averageLifetime float64 + ) + if numTracking != 0 { + averageLifetime = nowUnixNano - p.addedTimeSum/numTracking } + + p.metrics.tracking.With(unsentLabels).Set(numUnsent) + p.metrics.tracking.With(sentLabels).Set(numSent) + p.metrics.trackingLifetimeAverage.Set(averageLifetime) } // Every calls [Gossip] every [frequency] amount of time. @@ -335,39 +596,50 @@ func (NoOpGossiper) Gossip(context.Context) error { return nil } -type NoOpAccumulator[T Gossipable] struct{} +type TestGossiper struct { + GossipF func(ctx context.Context) error +} + +func (t *TestGossiper) Gossip(ctx context.Context) error { + return t.GossipF(ctx) +} -func (NoOpAccumulator[_]) Gossip(context.Context) error { +type EmptySet[T Gossipable] struct{} + +func (EmptySet[_]) Gossip(context.Context) error { return nil } -func (NoOpAccumulator[T]) Add(...T) {} +func (EmptySet[T]) Add(T) error { + return errEmptySetCantAdd +} -type TestGossiper struct { - GossipF func(ctx context.Context) error +func (EmptySet[T]) Has(ids.ID) bool { + return false } -func (t *TestGossiper) Gossip(ctx context.Context) error { - return t.GossipF(ctx) +func (EmptySet[T]) Iterate(func(gossipable T) bool) {} + +func (EmptySet[_]) GetFilter() ([]byte, []byte) { + return bloom.EmptyFilter.Marshal(), ids.Empty[:] } -type TestAccumulator[T Gossipable] struct { - GossipF func(ctx context.Context) error - AddF func(...T) +type FullSet[T Gossipable] struct{} + +func (FullSet[_]) Gossip(context.Context) error { + return nil } -func (t TestAccumulator[T]) Gossip(ctx context.Context) error { - if t.GossipF == nil { - return nil - } +func (FullSet[T]) Add(T) error { + return nil +} - return t.GossipF(ctx) +func (FullSet[T]) Has(ids.ID) bool { + return true } -func (t TestAccumulator[T]) Add(gossipables ...T) { - if t.AddF == nil { - return - } +func (FullSet[T]) Iterate(func(gossipable T) bool) {} - t.AddF(gossipables...) +func (FullSet[_]) GetFilter() ([]byte, []byte) { + return bloom.FullFilter.Marshal(), ids.Empty[:] } diff --git a/network/p2p/gossip/gossip_test.go b/network/p2p/gossip/gossip_test.go index 5d6fe9914d4c..a1eed80bed3b 100644 --- a/network/p2p/gossip/gossip_test.go +++ b/network/p2p/gossip/gossip_test.go @@ -18,6 +18,8 @@ import ( "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" @@ -124,7 +126,6 @@ func TestGossiperGossip(t *testing.T) { handler := NewHandler[*testTx]( logging.NoLog{}, marshaller, - NoOpAccumulator[*testTx]{}, responseSet, metrics, tt.targetResponseSize, @@ -233,70 +234,273 @@ func TestValidatorGossiper(t *testing.T) { require.Equal(2, calls) } +func TestPushGossiperNew(t *testing.T) { + tests := []struct { + name string + gossipParams BranchingFactor + regossipParams BranchingFactor + discardedSize int + targetGossipSize int + maxRegossipFrequency time.Duration + expected error + }{ + { + name: "invalid gossip num validators", + gossipParams: BranchingFactor{ + Validators: -1, + }, + regossipParams: BranchingFactor{ + Peers: 1, + }, + expected: ErrInvalidNumValidators, + }, + { + name: "invalid gossip num non-validators", + gossipParams: BranchingFactor{ + NonValidators: -1, + }, + regossipParams: BranchingFactor{ + Peers: 1, + }, + expected: ErrInvalidNumNonValidators, + }, + { + name: "invalid gossip num peers", + gossipParams: BranchingFactor{ + Peers: -1, + }, + regossipParams: BranchingFactor{ + Peers: 1, + }, + expected: ErrInvalidNumPeers, + }, + { + name: "invalid gossip num to gossip", + gossipParams: BranchingFactor{}, + regossipParams: BranchingFactor{ + Peers: 1, + }, + expected: ErrInvalidNumToGossip, + }, + { + name: "invalid regossip num validators", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{ + Validators: -1, + }, + expected: ErrInvalidNumValidators, + }, + { + name: "invalid regossip num non-validators", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{ + NonValidators: -1, + }, + expected: ErrInvalidNumNonValidators, + }, + { + name: "invalid regossip num peers", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{ + Peers: -1, + }, + expected: ErrInvalidNumPeers, + }, + { + name: "invalid regossip num to gossip", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{}, + expected: ErrInvalidNumToGossip, + }, + { + name: "invalid discarded size", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{ + Validators: 1, + }, + discardedSize: -1, + expected: ErrInvalidDiscardedSize, + }, + { + name: "invalid target gossip size", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{ + Validators: 1, + }, + targetGossipSize: -1, + expected: ErrInvalidTargetGossipSize, + }, + { + name: "invalid max re-gossip frequency", + gossipParams: BranchingFactor{ + Validators: 1, + }, + regossipParams: BranchingFactor{ + Validators: 1, + }, + maxRegossipFrequency: -1, + expected: ErrInvalidRegossipFrequency, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := NewPushGossiper[*testTx]( + nil, + nil, + nil, + nil, + Metrics{}, + tt.gossipParams, + tt.regossipParams, + tt.discardedSize, + tt.targetGossipSize, + tt.maxRegossipFrequency, + ) + require.ErrorIs(t, err, tt.expected) + }) + } +} + // Tests that the outgoing gossip is equivalent to what was accumulated func TestPushGossiper(t *testing.T) { + type cycle struct { + toAdd []*testTx + expected [][]*testTx + } tests := []struct { - name string - cycles [][]*testTx + name string + cycles []cycle + shouldRegossip bool }{ { - name: "single cycle", - cycles: [][]*testTx{ + name: "single cycle with regossip", + cycles: []cycle{ { - &testTx{ - id: ids.ID{0}, - }, - &testTx{ - id: ids.ID{1}, + toAdd: []*testTx{ + { + id: ids.ID{0}, + }, + { + id: ids.ID{1}, + }, + { + id: ids.ID{2}, + }, }, - &testTx{ - id: ids.ID{2}, + expected: [][]*testTx{ + { + { + id: ids.ID{0}, + }, + { + id: ids.ID{1}, + }, + { + id: ids.ID{2}, + }, + }, }, }, }, + shouldRegossip: true, }, { - name: "multiple cycles", - cycles: [][]*testTx{ + name: "multiple cycles with regossip", + cycles: []cycle{ { - &testTx{ - id: ids.ID{0}, + toAdd: []*testTx{ + { + id: ids.ID{0}, + }, + }, + expected: [][]*testTx{ + { + { + id: ids.ID{0}, + }, + }, }, }, { - &testTx{ - id: ids.ID{1}, + toAdd: []*testTx{ + { + id: ids.ID{1}, + }, }, - &testTx{ - id: ids.ID{2}, + expected: [][]*testTx{ + { + { + id: ids.ID{1}, + }, + }, + { + { + id: ids.ID{0}, + }, + }, }, }, { - &testTx{ - id: ids.ID{3}, - }, - &testTx{ - id: ids.ID{4}, + toAdd: []*testTx{ + { + id: ids.ID{2}, + }, }, - &testTx{ - id: ids.ID{5}, + expected: [][]*testTx{ + { + { + id: ids.ID{2}, + }, + }, + { + { + id: ids.ID{1}, + }, + { + id: ids.ID{0}, + }, + }, }, }, + }, + shouldRegossip: true, + }, + { + name: "verify that we don't gossip empty messages", + cycles: []cycle{ { - &testTx{ - id: ids.ID{6}, + toAdd: []*testTx{ + { + id: ids.ID{0}, + }, }, - &testTx{ - id: ids.ID{7}, - }, - &testTx{ - id: ids.ID{8}, - }, - &testTx{ - id: ids.ID{9}, + expected: [][]*testTx{ + { + { + id: ids.ID{0}, + }, + }, }, }, + { + toAdd: []*testTx{}, + expected: [][]*testTx{}, + }, }, + shouldRegossip: false, }, } @@ -306,7 +510,7 @@ func TestPushGossiper(t *testing.T) { ctx := context.Background() sender := &common.FakeSender{ - SentAppGossip: make(chan []byte, 1), + SentAppGossip: make(chan []byte, 2), } network, err := p2p.NewNetwork( logging.NoLog{}, @@ -316,144 +520,89 @@ func TestPushGossiper(t *testing.T) { ) require.NoError(err) client := network.NewClient(0) + validators := p2p.NewValidators( + &p2p.Peers{}, + logging.NoLog{}, + constants.PrimaryNetworkID, + &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 1, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return nil, nil + }, + }, + time.Hour, + ) metrics, err := NewMetrics(prometheus.NewRegistry(), "") require.NoError(err) marshaller := testMarshaller{} - gossiper := NewPushGossiper[*testTx]( + + regossipTime := time.Hour + if tt.shouldRegossip { + regossipTime = time.Nanosecond + } + + gossiper, err := NewPushGossiper[*testTx]( marshaller, + FullSet[*testTx]{}, + validators, client, metrics, + BranchingFactor{ + Validators: 1, + }, + BranchingFactor{ + Validators: 1, + }, + 0, // the discarded cache size doesn't matter for this test units.MiB, + regossipTime, ) + require.NoError(err) - for _, gossipables := range tt.cycles { - gossiper.Add(gossipables...) + for _, cycle := range tt.cycles { + gossiper.Add(cycle.toAdd...) require.NoError(gossiper.Gossip(ctx)) - want := &sdk.PushGossip{ - Gossip: make([][]byte, 0, len(tt.cycles)), + for _, expected := range cycle.expected { + want := &sdk.PushGossip{ + Gossip: make([][]byte, 0, len(expected)), + } + + for _, gossipable := range expected { + bytes, err := marshaller.MarshalGossip(gossipable) + require.NoError(err) + + want.Gossip = append(want.Gossip, bytes) + } + + if len(want.Gossip) > 0 { + // remove the handler prefix + sentMsg := <-sender.SentAppGossip + got := &sdk.PushGossip{} + require.NoError(proto.Unmarshal(sentMsg[1:], got)) + + require.Equal(want.Gossip, got.Gossip) + } else { + select { + case <-sender.SentAppGossip: + require.FailNow("unexpectedly sent gossip message") + default: + } + } } - for _, gossipable := range gossipables { - bytes, err := marshaller.MarshalGossip(gossipable) - require.NoError(err) - - want.Gossip = append(want.Gossip, bytes) + if tt.shouldRegossip { + // Ensure that subsequent calls to `time.Now()` are + // sufficient for regossip. + time.Sleep(regossipTime + time.Nanosecond) } - - // remove the handler prefix - sentMsg := <-sender.SentAppGossip - got := &sdk.PushGossip{} - require.NoError(proto.Unmarshal(sentMsg[1:], got)) - - require.Equal(want.Gossip, got.Gossip) } }) } } -// Tests that gossip to a peer should forward the gossip if it was not -// previously known -func TestPushGossipE2E(t *testing.T) { - require := require.New(t) - - // tx known by both the sender and the receiver which should not be - // forwarded - knownTx := &testTx{id: ids.GenerateTestID()} - - log := logging.NoLog{} - bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 100, 0.01, 0.05) - require.NoError(err) - set := &testSet{ - txs: make(map[ids.ID]*testTx), - bloom: bloom, - } - require.NoError(set.Add(knownTx)) - - forwarder := &common.FakeSender{ - SentAppGossip: make(chan []byte, 1), - } - forwarderNetwork, err := p2p.NewNetwork(log, forwarder, prometheus.NewRegistry(), "") - require.NoError(err) - handlerID := uint64(123) - client := forwarderNetwork.NewClient(handlerID) - - metrics, err := NewMetrics(prometheus.NewRegistry(), "") - require.NoError(err) - marshaller := testMarshaller{} - forwarderGossiper := NewPushGossiper[*testTx]( - marshaller, - client, - metrics, - units.MiB, - ) - - handler := NewHandler[*testTx]( - log, - marshaller, - forwarderGossiper, - set, - metrics, - 0, - ) - require.NoError(err) - require.NoError(forwarderNetwork.AddHandler(handlerID, handler)) - - issuer := &common.FakeSender{ - SentAppGossip: make(chan []byte, 1), - } - issuerNetwork, err := p2p.NewNetwork(log, issuer, prometheus.NewRegistry(), "") - require.NoError(err) - issuerClient := issuerNetwork.NewClient(handlerID) - require.NoError(err) - issuerGossiper := NewPushGossiper[*testTx]( - marshaller, - issuerClient, - metrics, - units.MiB, - ) - - want := []*testTx{ - {id: ids.GenerateTestID()}, - {id: ids.GenerateTestID()}, - {id: ids.GenerateTestID()}, - } - - // gossip both some unseen txs and one the receiver already knows about - var gossiped []*testTx - gossiped = append(gossiped, want...) - gossiped = append(gossiped, knownTx) - - issuerGossiper.Add(gossiped...) - addedToSet := make([]*testTx, 0, len(want)) - set.onAdd = func(tx *testTx) { - addedToSet = append(addedToSet, tx) - } - - ctx := context.Background() - require.NoError(issuerGossiper.Gossip(ctx)) - - // make sure that we only add new txs someone gossips to us - require.NoError(forwarderNetwork.AppGossip(ctx, ids.EmptyNodeID, <-issuer.SentAppGossip)) - require.Equal(want, addedToSet) - - // make sure that we only forward txs we have not already seen before - forwardedBytes := <-forwarder.SentAppGossip - forwardedMsg := &sdk.PushGossip{} - require.NoError(proto.Unmarshal(forwardedBytes[1:], forwardedMsg)) - require.Len(forwardedMsg.Gossip, len(want)) - - gotForwarded := make([]*testTx, 0, len(addedToSet)) - - for _, bytes := range forwardedMsg.Gossip { - tx, err := marshaller.UnmarshalGossip(bytes) - require.NoError(err) - gotForwarded = append(gotForwarded, tx) - } - - require.Equal(want, gotForwarded) -} - type testValidatorSet struct { validators set.Set[ids.NodeID] } diff --git a/network/p2p/gossip/gossipable.go b/network/p2p/gossip/gossipable.go index 238c62b4641c..6af60d666bb6 100644 --- a/network/p2p/gossip/gossipable.go +++ b/network/p2p/gossip/gossipable.go @@ -21,6 +21,8 @@ type Set[T Gossipable] interface { // Add adds a Gossipable to the set. Returns an error if gossipable was not // added. Add(gossipable T) error + // Has returns true if the gossipable is in the set. + Has(gossipID ids.ID) bool // Iterate iterates over elements until [f] returns false Iterate(f func(gossipable T) bool) // GetFilter returns the byte representation of bloom filter and its diff --git a/network/p2p/gossip/handler.go b/network/p2p/gossip/handler.go index 7e9145f79091..e0dd405eaf0c 100644 --- a/network/p2p/gossip/handler.go +++ b/network/p2p/gossip/handler.go @@ -21,7 +21,6 @@ var _ p2p.Handler = (*Handler[*testTx])(nil) func NewHandler[T Gossipable]( log logging.Logger, marshaller Marshaller[T], - accumulator Accumulator[T], set Set[T], metrics Metrics, targetResponseSize int, @@ -30,7 +29,6 @@ func NewHandler[T Gossipable]( Handler: p2p.NoOpHandler{}, log: log, marshaller: marshaller, - accumulator: accumulator, set: set, metrics: metrics, targetResponseSize: targetResponseSize, @@ -40,7 +38,6 @@ func NewHandler[T Gossipable]( type Handler[T Gossipable] struct { p2p.Handler marshaller Marshaller[T] - accumulator Accumulator[T] log logging.Logger set Set[T] metrics Metrics @@ -76,7 +73,6 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req return responseSize <= h.targetResponseSize }) - if err != nil { return nil, p2p.ErrUnexpected } @@ -102,7 +98,7 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req return bytes, nil } -func (h Handler[_]) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { +func (h Handler[_]) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) { gossip, err := ParseAppGossip(gossipBytes) if err != nil { h.log.Debug("failed to unmarshal gossip", zap.Error(err)) @@ -128,30 +124,12 @@ func (h Handler[_]) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipByte zap.Stringer("id", gossipable.GossipID()), zap.Error(err), ) - continue } - - // continue gossiping messages we have not seen to other peers - h.accumulator.Add(gossipable) } - if err := h.accumulator.Gossip(ctx); err != nil { - h.log.Error("failed to forward gossip", zap.Error(err)) - return + if err := h.metrics.observeMessage(receivedPushLabels, len(gossip), receivedBytes); err != nil { + h.log.Error("failed to update metrics", + zap.Error(err), + ) } - - receivedCountMetric, err := h.metrics.receivedCount.GetMetricWith(pushLabels) - if err != nil { - h.log.Error("failed to get received count metric", zap.Error(err)) - return - } - - receivedBytesMetric, err := h.metrics.receivedBytes.GetMetricWith(pushLabels) - if err != nil { - h.log.Error("failed to get received bytes metric", zap.Error(err)) - return - } - - receivedCountMetric.Add(float64(len(gossip))) - receivedBytesMetric.Add(float64(receivedBytes)) } diff --git a/network/p2p/gossip/test_gossip.go b/network/p2p/gossip/test_gossip.go index 03098399462e..7f8782b65916 100644 --- a/network/p2p/gossip/test_gossip.go +++ b/network/p2p/gossip/test_gossip.go @@ -56,6 +56,11 @@ func (t *testSet) Add(gossipable *testTx) error { return nil } +func (t *testSet) Has(gossipID ids.ID) bool { + _, ok := t.txs[gossipID] + return ok +} + func (t *testSet) Iterate(f func(gossipable *testTx) bool) { for _, tx := range t.txs { if !f(tx) { diff --git a/network/p2p/handler.go b/network/p2p/handler.go index 9fbc2965ce2c..0b172abcc44c 100644 --- a/network/p2p/handler.go +++ b/network/p2p/handler.go @@ -82,8 +82,7 @@ type ValidatorHandler struct { func (v ValidatorHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { if !v.validatorSet.Has(ctx, nodeID) { - v.log.Debug( - "dropping message", + v.log.Debug("dropping message", zap.Stringer("nodeID", nodeID), zap.String("reason", "not a validator"), ) @@ -124,6 +123,7 @@ func (r *responder) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID zap.Time("deadline", deadline), zap.Uint64("handlerID", r.handlerID), zap.Binary("message", request), + zap.Error(err), ) return r.sender.SendAppError(ctx, nodeID, requestID, err.Code, err.Message) } diff --git a/network/p2p/network.go b/network/p2p/network.go index a98579c44183..dd7bac73aa7b 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -26,8 +26,9 @@ var ( _ common.AppHandler = (*Network)(nil) _ NodeSampler = (*peerSampler)(nil) + opLabel = "op" handlerLabel = "handlerID" - labelNames = []string{handlerLabel} + labelNames = []string{opLabel, handlerLabel} ) // ClientOption configures Client @@ -62,93 +63,27 @@ func NewNetwork( namespace string, ) (*Network, error) { metrics := metrics{ - appRequestTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_request_time", - Help: "app request time (ns)", - }, labelNames), - appRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_request_count", - Help: "app request count (n)", - }, labelNames), - appResponseTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_response_time", - Help: "app response time (ns)", - }, labelNames), - appResponseCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_response_count", - Help: "app response count (n)", - }, labelNames), - appRequestFailedTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_request_failed_time", - Help: "app request failed time (ns)", - }, labelNames), - appRequestFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_request_failed_count", - Help: "app request failed count (ns)", - }, labelNames), - appGossipTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_gossip_time", - Help: "app gossip time (ns)", - }, labelNames), - appGossipCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "app_gossip_count", - Help: "app gossip count (n)", - }, labelNames), - crossChainAppRequestTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "cross_chain_app_request_time", - Help: "cross chain app request time (ns)", - }, labelNames), - crossChainAppRequestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "cross_chain_app_request_count", - Help: "cross chain app request count (n)", - }, labelNames), - crossChainAppResponseTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "cross_chain_app_response_time", - Help: "cross chain app response time (ns)", - }, labelNames), - crossChainAppResponseCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "cross_chain_app_response_count", - Help: "cross chain app response count (n)", - }, labelNames), - crossChainAppRequestFailedTime: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "cross_chain_app_request_failed_time", - Help: "cross chain app request failed time (ns)", - }, labelNames), - crossChainAppRequestFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "cross_chain_app_request_failed_count", - Help: "cross chain app request failed count (n)", - }, labelNames), + msgTime: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "msg_time", + Help: "message handling time (ns)", + }, + labelNames, + ), + msgCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "msg_count", + Help: "message count (n)", + }, + labelNames, + ), } err := utils.Err( - registerer.Register(metrics.appRequestTime), - registerer.Register(metrics.appRequestCount), - registerer.Register(metrics.appResponseTime), - registerer.Register(metrics.appResponseCount), - registerer.Register(metrics.appRequestFailedTime), - registerer.Register(metrics.appRequestFailedCount), - registerer.Register(metrics.appGossipTime), - registerer.Register(metrics.appGossipCount), - registerer.Register(metrics.crossChainAppRequestTime), - registerer.Register(metrics.crossChainAppRequestCount), - registerer.Register(metrics.crossChainAppResponseTime), - registerer.Register(metrics.crossChainAppResponseCount), - registerer.Register(metrics.crossChainAppRequestFailedTime), - registerer.Register(metrics.crossChainAppRequestFailedCount), + registerer.Register(metrics.msgTime), + registerer.Register(metrics.msgCount), ) if err != nil { return nil, err diff --git a/network/p2p/network_test.go b/network/p2p/network_test.go index 0d193d866a25..67a5edf5e72f 100644 --- a/network/p2p/network_test.go +++ b/network/p2p/network_test.go @@ -69,7 +69,13 @@ func TestMessageRouting(t *testing.T) { require.NoError(network.AddHandler(1, testHandler)) client := network.NewClient(1) - require.NoError(client.AppGossip(ctx, wantMsg)) + require.NoError(client.AppGossip( + ctx, + common.SendConfig{ + Peers: 1, + }, + wantMsg, + )) require.NoError(network.AppGossip(ctx, wantNodeID, <-sender.SentAppGossip)) require.True(appGossipCalled) @@ -90,7 +96,6 @@ func TestClientPrefixesMessages(t *testing.T) { sender := common.FakeSender{ SentAppRequest: make(chan []byte, 1), SentAppGossip: make(chan []byte, 1), - SentAppGossipSpecific: make(chan []byte, 1), SentCrossChainAppRequest: make(chan []byte, 1), } @@ -130,15 +135,16 @@ func TestClientPrefixesMessages(t *testing.T) { require.Equal(handlerPrefix, gotCrossChainAppRequest[0]) require.Equal(want, gotCrossChainAppRequest[1:]) - require.NoError(client.AppGossip(ctx, want)) + require.NoError(client.AppGossip( + ctx, + common.SendConfig{ + Peers: 1, + }, + want, + )) gotAppGossip := <-sender.SentAppGossip require.Equal(handlerPrefix, gotAppGossip[0]) require.Equal(want, gotAppGossip[1:]) - - require.NoError(client.AppGossipSpecific(ctx, set.Of(ids.EmptyNodeID), want)) - gotAppGossip = <-sender.SentAppGossipSpecific - require.Equal(handlerPrefix, gotAppGossip[0]) - require.Equal(want, gotAppGossip[1:]) } // Tests that the Client callback is called on a successful response @@ -175,6 +181,48 @@ func TestAppRequestResponse(t *testing.T) { <-done } +// Tests that the Client does not provide a cancelled context to the AppSender. +func TestAppRequestCancelledContext(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sentMessages := make(chan []byte, 1) + sender := &common.SenderTest{ + SendAppRequestF: func(ctx context.Context, _ set.Set[ids.NodeID], _ uint32, msgBytes []byte) error { + require.NoError(ctx.Err()) + sentMessages <- msgBytes + return nil + }, + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + wantResponse := []byte("response") + wantNodeID := ids.GenerateTestNodeID() + done := make(chan struct{}) + + callback := func(_ context.Context, gotNodeID ids.NodeID, gotResponse []byte, err error) { + require.Equal(wantNodeID, gotNodeID) + require.NoError(err) + require.Equal(wantResponse, gotResponse) + + close(done) + } + + cancelledCtx, cancel := context.WithCancel(ctx) + cancel() + + want := []byte("request") + require.NoError(client.AppRequest(cancelledCtx, set.Of(wantNodeID), want, callback)) + got := <-sentMessages + require.Equal(handlerPrefix, got[0]) + require.Equal(want, got[1:]) + + require.NoError(network.AppResponse(ctx, wantNodeID, 1, wantResponse)) + <-done +} + // Tests that the Client callback is given an error if the request fails func TestAppRequestFailed(t *testing.T) { require := require.New(t) @@ -236,6 +284,44 @@ func TestCrossChainAppRequestResponse(t *testing.T) { <-done } +// Tests that the Client does not provide a cancelled context to the AppSender. +func TestCrossChainAppRequestCancelledContext(t *testing.T) { + require := require.New(t) + ctx := context.Background() + + sentMessages := make(chan []byte, 1) + sender := &common.SenderTest{ + SendCrossChainAppRequestF: func(ctx context.Context, _ ids.ID, _ uint32, msgBytes []byte) { + require.NoError(ctx.Err()) + sentMessages <- msgBytes + }, + } + network, err := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(err) + client := network.NewClient(handlerID) + + cancelledCtx, cancel := context.WithCancel(ctx) + cancel() + + wantChainID := ids.GenerateTestID() + wantResponse := []byte("response") + done := make(chan struct{}) + + callback := func(_ context.Context, gotChainID ids.ID, gotResponse []byte, err error) { + require.Equal(wantChainID, gotChainID) + require.NoError(err) + require.Equal(wantResponse, gotResponse) + + close(done) + } + + require.NoError(client.CrossChainAppRequest(cancelledCtx, wantChainID, []byte("request"), callback)) + <-sentMessages + + require.NoError(network.CrossChainAppResponse(ctx, wantChainID, 1, wantResponse)) + <-done +} + // Tests that the Client callback is given an error if the request fails func TestCrossChainAppRequestFailed(t *testing.T) { require := require.New(t) @@ -736,7 +822,10 @@ func TestNodeSamplerClientOption(t *testing.T) { }, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { return map[ids.NodeID]*validators.GetValidatorOutput{ - nodeID1: nil, + nodeID1: { + NodeID: nodeID1, + Weight: 1, + }, }, nil }, } @@ -756,7 +845,10 @@ func TestNodeSamplerClientOption(t *testing.T) { }, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { return map[ids.NodeID]*validators.GetValidatorOutput{ - nodeID1: nil, + nodeID1: { + NodeID: nodeID1, + Weight: 1, + }, }, nil }, } diff --git a/network/p2p/router.go b/network/p2p/router.go index 5df81c8806d2..ef03fd0cadf2 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -45,20 +45,24 @@ type meteredHandler struct { } type metrics struct { - appRequestTime *prometheus.CounterVec - appRequestCount *prometheus.CounterVec - appResponseTime *prometheus.CounterVec - appResponseCount *prometheus.CounterVec - appRequestFailedTime *prometheus.CounterVec - appRequestFailedCount *prometheus.CounterVec - appGossipTime *prometheus.CounterVec - appGossipCount *prometheus.CounterVec - crossChainAppRequestTime *prometheus.CounterVec - crossChainAppRequestCount *prometheus.CounterVec - crossChainAppResponseTime *prometheus.CounterVec - crossChainAppResponseCount *prometheus.CounterVec - crossChainAppRequestFailedTime *prometheus.CounterVec - crossChainAppRequestFailedCount *prometheus.CounterVec + msgTime *prometheus.GaugeVec + msgCount *prometheus.CounterVec +} + +func (m *metrics) observe(labels prometheus.Labels, start time.Time) error { + metricTime, err := m.msgTime.GetMetricWith(labels) + if err != nil { + return err + } + + metricCount, err := m.msgCount.GetMetricWith(labels) + if err != nil { + return err + } + + metricTime.Add(float64(time.Since(start))) + metricCount.Inc() + return nil } // router routes incoming application messages to the corresponding registered @@ -140,24 +144,13 @@ func (r *router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui return err } - labels := prometheus.Labels{ - handlerLabel: handlerID, - } - - metricCount, err := r.metrics.appRequestCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.appRequestTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.AppRequestOp.String(), + handlerLabel: handlerID, + }, + start, + ) } // AppRequestFailed routes an AppRequestFailed message to the callback @@ -175,24 +168,13 @@ func (r *router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, reques pending.callback(ctx, nodeID, nil, appErr) - labels := prometheus.Labels{ - handlerLabel: pending.handlerID, - } - - metricCount, err := r.metrics.appRequestFailedCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.appRequestFailedTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.AppErrorOp.String(), + handlerLabel: pending.handlerID, + }, + start, + ) } // AppResponse routes an AppResponse message to the callback corresponding to @@ -210,24 +192,13 @@ func (r *router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID u pending.callback(ctx, nodeID, response, nil) - labels := prometheus.Labels{ - handlerLabel: pending.handlerID, - } - - metricCount, err := r.metrics.appResponseCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.appResponseTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.AppResponseOp.String(), + handlerLabel: pending.handlerID, + }, + start, + ) } // AppGossip routes an AppGossip message to a Handler based on the handler @@ -249,24 +220,13 @@ func (r *router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte handler.AppGossip(ctx, nodeID, parsedMsg) - labels := prometheus.Labels{ - handlerLabel: handlerID, - } - - metricCount, err := r.metrics.appGossipCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.appGossipTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.AppGossipOp.String(), + handlerLabel: handlerID, + }, + start, + ) } // CrossChainAppRequest routes a CrossChainAppRequest message to a Handler @@ -300,24 +260,13 @@ func (r *router) CrossChainAppRequest( return err } - labels := prometheus.Labels{ - handlerLabel: handlerID, - } - - metricCount, err := r.metrics.crossChainAppRequestCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.crossChainAppRequestTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.CrossChainAppRequestOp.String(), + handlerLabel: handlerID, + }, + start, + ) } // CrossChainAppRequestFailed routes a CrossChainAppRequestFailed message to @@ -335,24 +284,13 @@ func (r *router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, pending.callback(ctx, chainID, nil, appErr) - labels := prometheus.Labels{ - handlerLabel: pending.handlerID, - } - - metricCount, err := r.metrics.crossChainAppRequestFailedCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.crossChainAppRequestFailedTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.CrossChainAppErrorOp.String(), + handlerLabel: pending.handlerID, + }, + start, + ) } // CrossChainAppResponse routes a CrossChainAppResponse message to the callback @@ -370,24 +308,13 @@ func (r *router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requ pending.callback(ctx, chainID, response, nil) - labels := prometheus.Labels{ - handlerLabel: pending.handlerID, - } - - metricCount, err := r.metrics.crossChainAppResponseCount.GetMetricWith(labels) - if err != nil { - return err - } - - metricTime, err := r.metrics.crossChainAppResponseTime.GetMetricWith(labels) - if err != nil { - return err - } - - metricCount.Inc() - metricTime.Add(float64(time.Since(start))) - - return nil + return r.metrics.observe( + prometheus.Labels{ + opLabel: message.CrossChainAppResponseOp.String(), + handlerLabel: pending.handlerID, + }, + start, + ) } // Parse parses a gossip or request message and maps it to a corresponding diff --git a/network/p2p/throttler_handler.go b/network/p2p/throttler_handler.go index bf4ebf0cea64..d8006a8dead8 100644 --- a/network/p2p/throttler_handler.go +++ b/network/p2p/throttler_handler.go @@ -32,8 +32,7 @@ type ThrottlerHandler struct { func (t ThrottlerHandler) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { if !t.throttler.Handle(nodeID) { - t.log.Debug( - "dropping message", + t.log.Debug("dropping message", zap.Stringer("nodeID", nodeID), zap.String("reason", "throttled"), ) diff --git a/network/p2p/validators.go b/network/p2p/validators.go index 3ece6559af42..161d84d88372 100644 --- a/network/p2p/validators.go +++ b/network/p2p/validators.go @@ -4,7 +4,9 @@ package p2p import ( + "cmp" "context" + "math" "sync" "time" @@ -12,19 +14,26 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" ) var ( - _ ValidatorSet = (*Validators)(nil) - _ NodeSampler = (*Validators)(nil) + _ ValidatorSet = (*Validators)(nil) + _ ValidatorSubset = (*Validators)(nil) + _ NodeSampler = (*Validators)(nil) ) type ValidatorSet interface { Has(ctx context.Context, nodeID ids.NodeID) bool // TODO return error } +type ValidatorSubset interface { + Top(ctx context.Context, percentage float64) []ids.NodeID // TODO return error +} + func NewValidators( peers *Peers, log logging.Logger, @@ -43,15 +52,29 @@ func NewValidators( // Validators contains a set of nodes that are staking. type Validators struct { - peers *Peers - log logging.Logger - subnetID ids.ID - validators validators.State - - lock sync.Mutex - validatorIDs set.SampleableSet[ids.NodeID] - lastUpdated time.Time + peers *Peers + log logging.Logger + subnetID ids.ID + validators validators.State maxValidatorSetStaleness time.Duration + + lock sync.Mutex + validatorList []validator + validatorSet set.Set[ids.NodeID] + totalWeight uint64 + lastUpdated time.Time +} + +type validator struct { + nodeID ids.NodeID + weight uint64 +} + +func (v validator) Compare(other validator) int { + if weightCmp := cmp.Compare(v.weight, other.weight); weightCmp != 0 { + return -weightCmp // Sort in decreasing order of stake + } + return v.nodeID.Compare(other.nodeID) } func (v *Validators) refresh(ctx context.Context) { @@ -59,7 +82,10 @@ func (v *Validators) refresh(ctx context.Context) { return } - v.validatorIDs.Clear() + // Even though validatorList may be nil, truncating will not panic. + v.validatorList = v.validatorList[:0] + v.validatorSet.Clear() + v.totalWeight = 0 height, err := v.validators.GetCurrentHeight(ctx) if err != nil { @@ -72,9 +98,15 @@ func (v *Validators) refresh(ctx context.Context) { return } - for nodeID := range validatorSet { - v.validatorIDs.Add(nodeID) + for nodeID, vdr := range validatorSet { + v.validatorList = append(v.validatorList, validator{ + nodeID: nodeID, + weight: vdr.Weight, + }) + v.validatorSet.Add(nodeID) + v.totalWeight += vdr.Weight } + utils.Sort(v.validatorList) v.lastUpdated = time.Now() } @@ -86,22 +118,57 @@ func (v *Validators) Sample(ctx context.Context, limit int) []ids.NodeID { v.refresh(ctx) - // TODO: Account for peer connectivity during the sampling of validators - // rather than filtering sampled validators. - validatorIDs := v.validatorIDs.Sample(limit) - sampled := validatorIDs[:0] + var ( + uniform = sampler.NewUniform() + sampled = make([]ids.NodeID, 0, limit) + ) - for _, validatorID := range validatorIDs { - if !v.peers.has(validatorID) { + uniform.Initialize(uint64(len(v.validatorList))) + for len(sampled) < limit { + i, hasNext := uniform.Next() + if !hasNext { + break + } + + nodeID := v.validatorList[i].nodeID + if !v.peers.has(nodeID) { continue } - sampled = append(sampled, validatorID) + sampled = append(sampled, nodeID) } return sampled } +// Top returns the top [percentage] of validators, regardless of if they are +// connected or not. +func (v *Validators) Top(ctx context.Context, percentage float64) []ids.NodeID { + percentage = max(0, min(1, percentage)) // bound percentage inside [0, 1] + + v.lock.Lock() + defer v.lock.Unlock() + + v.refresh(ctx) + + var ( + maxSize = int(math.Ceil(percentage * float64(len(v.validatorList)))) + top = make([]ids.NodeID, 0, maxSize) + currentStake uint64 + targetStake = uint64(math.Ceil(percentage * float64(v.totalWeight))) + ) + + for _, vdr := range v.validatorList { + if currentStake >= targetStake { + break + } + top = append(top, vdr.nodeID) + currentStake += vdr.weight + } + + return top +} + // Has returns if nodeID is a connected validator func (v *Validators) Has(ctx context.Context, nodeID ids.NodeID) bool { v.lock.Lock() @@ -109,5 +176,5 @@ func (v *Validators) Has(ctx context.Context, nodeID ids.NodeID) bool { v.refresh(ctx) - return v.peers.has(nodeID) && v.validatorIDs.Contains(nodeID) + return v.peers.has(nodeID) && v.validatorSet.Contains(nodeID) } diff --git a/network/p2p/validators_test.go b/network/p2p/validators_test.go index 7239ac01bc8b..49b2197f132a 100644 --- a/network/p2p/validators_test.go +++ b/network/p2p/validators_test.go @@ -23,6 +23,7 @@ func TestValidatorsSample(t *testing.T) { errFoobar := errors.New("foobar") nodeID1 := ids.GenerateTestNodeID() nodeID2 := ids.GenerateTestNodeID() + nodeID3 := ids.GenerateTestNodeID() type call struct { limit int @@ -44,6 +45,20 @@ func TestValidatorsSample(t *testing.T) { maxStaleness time.Duration calls []call }{ + { + // if we aren't connected to a validator, we shouldn't return it + name: "drop disconnected validators", + maxStaleness: time.Hour, + calls: []call{ + { + time: time.Time{}.Add(time.Second), + limit: 2, + height: 1, + validators: []ids.NodeID{nodeID1, nodeID3}, + expected: []ids.NodeID{nodeID1}, + }, + }, + }, { // if we don't have as many validators as requested by the caller, // we should return all the validators we have @@ -167,7 +182,10 @@ func TestValidatorsSample(t *testing.T) { validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, 0) for _, validator := range call.validators { - validatorSet[validator] = nil + validatorSet[validator] = &validators.GetValidatorOutput{ + NodeID: validator, + Weight: 1, + } } calls = append(calls, @@ -194,3 +212,119 @@ func TestValidatorsSample(t *testing.T) { }) } } + +func TestValidatorsTop(t *testing.T) { + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + nodeID3 := ids.GenerateTestNodeID() + + tests := []struct { + name string + validators []validator + percentage float64 + expected []ids.NodeID + }{ + { + name: "top 0% is empty", + validators: []validator{ + { + nodeID: nodeID1, + weight: 1, + }, + { + nodeID: nodeID2, + weight: 1, + }, + }, + percentage: 0, + expected: []ids.NodeID{}, + }, + { + name: "top 100% is full", + validators: []validator{ + { + nodeID: nodeID1, + weight: 2, + }, + { + nodeID: nodeID2, + weight: 1, + }, + }, + percentage: 1, + expected: []ids.NodeID{ + nodeID1, + nodeID2, + }, + }, + { + name: "top 50% takes larger validator", + validators: []validator{ + { + nodeID: nodeID1, + weight: 2, + }, + { + nodeID: nodeID2, + weight: 1, + }, + }, + percentage: .5, + expected: []ids.NodeID{ + nodeID1, + }, + }, + { + name: "top 50% bound", + validators: []validator{ + { + nodeID: nodeID1, + weight: 2, + }, + { + nodeID: nodeID2, + weight: 1, + }, + { + nodeID: nodeID3, + weight: 1, + }, + }, + percentage: .5, + expected: []ids.NodeID{ + nodeID1, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, 0) + for _, validator := range test.validators { + validatorSet[validator.nodeID] = &validators.GetValidatorOutput{ + NodeID: validator.nodeID, + Weight: validator.weight, + } + } + + subnetID := ids.GenerateTestID() + mockValidators := validators.NewMockState(ctrl) + + mockValidators.EXPECT().GetCurrentHeight(gomock.Any()).Return(uint64(1), nil) + mockValidators.EXPECT().GetValidatorSet(gomock.Any(), uint64(1), subnetID).Return(validatorSet, nil) + + network, err := NewNetwork(logging.NoLog{}, &common.FakeSender{}, prometheus.NewRegistry(), "") + require.NoError(err) + + ctx := context.Background() + require.NoError(network.Connected(ctx, nodeID1, nil)) + require.NoError(network.Connected(ctx, nodeID2, nil)) + + v := NewValidators(network.Peers, network.log, subnetID, mockValidators, time.Second) + nodeIDs := v.Top(ctx, test.percentage) + require.Equal(test.expected, nodeIDs) + }) + } +} diff --git a/network/peer/config.go b/network/peer/config.go index 3eb8319216d7..8aa12820cc41 100644 --- a/network/peer/config.go +++ b/network/peer/config.go @@ -33,13 +33,14 @@ type Config struct { Network Network Router router.InboundHandler VersionCompatibility version.Compatibility - MySubnets set.Set[ids.ID] - Beacons validators.Manager - Validators validators.Manager - NetworkID uint32 - PingFrequency time.Duration - PongTimeout time.Duration - MaxClockDifference time.Duration + // MySubnets does not include the primary network ID + MySubnets set.Set[ids.ID] + Beacons validators.Manager + Validators validators.Manager + NetworkID uint32 + PingFrequency time.Duration + PongTimeout time.Duration + MaxClockDifference time.Duration SupportedACPs []uint32 ObjectedACPs []uint32 diff --git a/network/peer/example_test.go b/network/peer/example_test.go index d6c8ba20c913..59e5268fb623 100644 --- a/network/peer/example_test.go +++ b/network/peer/example_test.go @@ -6,13 +6,12 @@ package peer import ( "context" "fmt" - "net" + "net/netip" "time" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/ips" ) func ExampleStartTestPeer() { @@ -20,10 +19,10 @@ func ExampleStartTestPeer() { ctx, cancel := context.WithTimeout(ctx, 15*time.Second) defer cancel() - peerIP := ips.IPPort{ - IP: net.IPv6loopback, - Port: 9651, - } + peerIP := netip.AddrPortFrom( + netip.IPv6Loopback(), + 9651, + ) peer, err := StartTestPeer( ctx, peerIP, diff --git a/network/peer/info.go b/network/peer/info.go index 00ccaec7953b..928c47ff26ee 100644 --- a/network/peer/info.go +++ b/network/peer/info.go @@ -4,6 +4,7 @@ package peer import ( + "net/netip" "time" "github.com/ava-labs/avalanchego/ids" @@ -12,8 +13,8 @@ import ( ) type Info struct { - IP string `json:"ip"` - PublicIP string `json:"publicIP,omitempty"` + IP netip.AddrPort `json:"ip"` + PublicIP netip.AddrPort `json:"publicIP,omitempty"` ID ids.NodeID `json:"nodeID"` Version string `json:"version"` LastSent time.Time `json:"lastSent"` diff --git a/network/peer/ip.go b/network/peer/ip.go index a873f1668d6a..443396d344d2 100644 --- a/network/peer/ip.go +++ b/network/peer/ip.go @@ -8,12 +8,13 @@ import ( "crypto/rand" "errors" "fmt" + "net" + "net/netip" "time" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -26,7 +27,7 @@ var ( // ensure that the most updated IP claim is tracked by peers for a given // validator. type UnsignedIP struct { - ips.IPPort + AddrPort netip.AddrPort Timestamp uint64 } @@ -49,9 +50,11 @@ func (ip *UnsignedIP) Sign(tlsSigner crypto.Signer, blsSigner *bls.SecretKey) (* func (ip *UnsignedIP) bytes() []byte { p := wrappers.Packer{ - Bytes: make([]byte, ips.IPPortLen+wrappers.LongLen), + Bytes: make([]byte, net.IPv6len+wrappers.ShortLen+wrappers.LongLen), } - ips.PackIP(&p, ip.IPPort) + addrBytes := ip.AddrPort.Addr().As16() + p.PackFixedBytes(addrBytes[:]) + p.PackShort(ip.AddrPort.Port()) p.PackLong(ip.Timestamp) return p.Bytes } diff --git a/network/peer/ip_signer.go b/network/peer/ip_signer.go index 1c38d4e67528..1053cfce3e62 100644 --- a/network/peer/ip_signer.go +++ b/network/peer/ip_signer.go @@ -5,16 +5,17 @@ package peer import ( "crypto" + "net/netip" "sync" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) // IPSigner will return a signedIP for the current value of our dynamic IP. type IPSigner struct { - ip ips.DynamicIPPort + ip *utils.Atomic[netip.AddrPort] clock mockable.Clock tlsSigner crypto.Signer blsSigner *bls.SecretKey @@ -27,7 +28,7 @@ type IPSigner struct { } func NewIPSigner( - ip ips.DynamicIPPort, + ip *utils.Atomic[netip.AddrPort], tlsSigner crypto.Signer, blsSigner *bls.SecretKey, ) *IPSigner { @@ -49,8 +50,8 @@ func (s *IPSigner) GetSignedIP() (*SignedIP, error) { s.signedIPLock.RLock() signedIP := s.signedIP s.signedIPLock.RUnlock() - ip := s.ip.IPPort() - if signedIP != nil && signedIP.IPPort.Equal(ip) { + ip := s.ip.Get() + if signedIP != nil && signedIP.AddrPort == ip { return signedIP, nil } @@ -62,13 +63,13 @@ func (s *IPSigner) GetSignedIP() (*SignedIP, error) { // same time, we should verify that we are the first thread to attempt to // update it. signedIP = s.signedIP - if signedIP != nil && signedIP.IPPort.Equal(ip) { + if signedIP != nil && signedIP.AddrPort == ip { return signedIP, nil } // We should now sign our new IP at the current timestamp. unsignedIP := UnsignedIP{ - IPPort: ip, + AddrPort: ip, Timestamp: s.clock.Unix(), } signedIP, err := unsignedIP.Sign(s.tlsSigner, s.blsSigner) diff --git a/network/peer/ip_signer_test.go b/network/peer/ip_signer_test.go index 315becd8f082..cff9b2cbbda2 100644 --- a/network/peer/ip_signer_test.go +++ b/network/peer/ip_signer_test.go @@ -5,24 +5,24 @@ package peer import ( "crypto" - "net" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" ) func TestIPSigner(t *testing.T) { require := require.New(t) - dynIP := ips.NewDynamicIPPort( - net.IPv6loopback, + dynIP := utils.NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), 0, - ) + )) tlsCert, err := staking.NewTLSCert() require.NoError(err) @@ -37,22 +37,25 @@ func TestIPSigner(t *testing.T) { signedIP1, err := s.GetSignedIP() require.NoError(err) - require.Equal(dynIP.IPPort(), signedIP1.IPPort) + require.Equal(dynIP.Get(), signedIP1.AddrPort) require.Equal(uint64(10), signedIP1.Timestamp) s.clock.Set(time.Unix(11, 0)) signedIP2, err := s.GetSignedIP() require.NoError(err) - require.Equal(dynIP.IPPort(), signedIP2.IPPort) + require.Equal(dynIP.Get(), signedIP2.AddrPort) require.Equal(uint64(10), signedIP2.Timestamp) require.Equal(signedIP1.TLSSignature, signedIP2.TLSSignature) - dynIP.SetIP(net.IPv4(1, 2, 3, 4)) + dynIP.Set(netip.AddrPortFrom( + netip.AddrFrom4([4]byte{1, 2, 3, 4}), + dynIP.Get().Port(), + )) signedIP3, err := s.GetSignedIP() require.NoError(err) - require.Equal(dynIP.IPPort(), signedIP3.IPPort) + require.Equal(dynIP.Get(), signedIP3.AddrPort) require.Equal(uint64(11), signedIP3.Timestamp) require.NotEqual(signedIP2.TLSSignature, signedIP3.TLSSignature) } diff --git a/network/peer/ip_test.go b/network/peer/ip_test.go index dd39d5a8a8b9..4c3f62d27694 100644 --- a/network/peer/ip_test.go +++ b/network/peer/ip_test.go @@ -5,7 +5,7 @@ package peer import ( "crypto" - "net" + "net/netip" "testing" "time" @@ -13,24 +13,27 @@ import ( "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" ) func TestSignedIpVerify(t *testing.T) { tlsCert1, err := staking.NewTLSCert() require.NoError(t, err) - cert1 := staking.CertificateFromX509(tlsCert1.Leaf) - require.NoError(t, staking.ValidateCertificate(cert1)) + cert1, err := staking.ParseCertificate(tlsCert1.Leaf.Raw) + require.NoError(t, err) tlsKey1 := tlsCert1.PrivateKey.(crypto.Signer) blsKey1, err := bls.NewSecretKey() require.NoError(t, err) tlsCert2, err := staking.NewTLSCert() require.NoError(t, err) - cert2 := staking.CertificateFromX509(tlsCert2.Leaf) - require.NoError(t, staking.ValidateCertificate(cert2)) + cert2, err := staking.ParseCertificate(tlsCert2.Leaf.Raw) + require.NoError(t, err) now := time.Now() + addrPort := netip.AddrPortFrom( + netip.AddrFrom4([4]byte{1, 2, 3, 4}), + 1, + ) type test struct { name string @@ -49,10 +52,7 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert1, ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, + AddrPort: addrPort, Timestamp: uint64(now.Unix()) - 1, }, maxTimestamp: now, @@ -64,10 +64,7 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert1, ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, + AddrPort: addrPort, Timestamp: uint64(now.Unix()), }, maxTimestamp: now, @@ -79,10 +76,7 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert1, ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, + AddrPort: addrPort, Timestamp: uint64(now.Unix()) + 1, }, maxTimestamp: now, @@ -94,10 +88,6 @@ func TestSignedIpVerify(t *testing.T) { blsSigner: blsKey1, expectedCert: cert2, // note this isn't cert1 ip: UnsignedIP{ - IPPort: ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - Port: 1, - }, Timestamp: uint64(now.Unix()), }, maxTimestamp: now, diff --git a/network/peer/metrics.go b/network/peer/metrics.go index 1dcfcdd6389a..7547d7a827d4 100644 --- a/network/peer/metrics.go +++ b/network/peer/metrics.go @@ -4,180 +4,146 @@ package peer import ( - "fmt" + "strconv" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) -type MessageMetrics struct { - ReceivedBytes, SentBytes, NumSent, NumFailed, NumReceived prometheus.Counter - SavedReceivedBytes, SavedSentBytes metric.Averager -} +const ( + ioLabel = "io" + opLabel = "op" + compressedLabel = "compressed" -func NewMessageMetrics( - op message.Op, - namespace string, - metrics prometheus.Registerer, - errs *wrappers.Errs, -) *MessageMetrics { - msg := &MessageMetrics{ - NumSent: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_sent", op), - Help: fmt.Sprintf("Number of %s messages sent over the network", op), - }), - NumFailed: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_failed", op), - Help: fmt.Sprintf("Number of %s messages that failed to be sent over the network", op), - }), - NumReceived: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_received", op), - Help: fmt.Sprintf("Number of %s messages received from the network", op), - }), - ReceivedBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_received_bytes", op), - Help: fmt.Sprintf("Number of bytes of %s messages received from the network", op), - }), - SentBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: fmt.Sprintf("%s_sent_bytes", op), - Help: fmt.Sprintf("Size of bytes of %s messages received from the network", op), - }), - } - errs.Add( - metrics.Register(msg.NumSent), - metrics.Register(msg.NumFailed), - metrics.Register(msg.NumReceived), - metrics.Register(msg.ReceivedBytes), - metrics.Register(msg.SentBytes), - ) + sentLabel = "sent" + receivedLabel = "received" +) - msg.SavedReceivedBytes = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_compression_saved_received_bytes", op), - fmt.Sprintf("bytes saved (not received) due to compression of %s messages", op), - metrics, - errs, - ) - msg.SavedSentBytes = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_compression_saved_sent_bytes", op), - fmt.Sprintf("bytes saved (not sent) due to compression of %s messages", op), - metrics, - errs, - ) - return msg -} +var ( + opLabels = []string{opLabel} + ioOpLabels = []string{ioLabel, opLabel} + ioOpCompressedLabels = []string{ioLabel, opLabel, compressedLabel} +) type Metrics struct { - Log logging.Logger - ClockSkew metric.Averager - FailedToParse prometheus.Counter - MessageMetrics map[message.Op]*MessageMetrics + ClockSkewCount prometheus.Counter + ClockSkewSum prometheus.Gauge + + NumFailedToParse prometheus.Counter + NumSendFailed *prometheus.CounterVec // op + + Messages *prometheus.CounterVec // io + op + compressed + Bytes *prometheus.CounterVec // io + op + BytesSaved *prometheus.GaugeVec // io + op } -func NewMetrics( - log logging.Logger, - namespace string, - registerer prometheus.Registerer, -) (*Metrics, error) { +func NewMetrics(registerer prometheus.Registerer) (*Metrics, error) { m := &Metrics{ - Log: log, - FailedToParse: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "msgs_failed_to_parse", - Help: "Number of messages that could not be parsed or were invalidly formed", + ClockSkewCount: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "clock_skew_count", + Help: "number of handshake timestamps inspected (n)", }), - MessageMetrics: make(map[message.Op]*MessageMetrics, len(message.ExternalOps)), - } - - errs := wrappers.Errs{} - errs.Add( - registerer.Register(m.FailedToParse), - ) - for _, op := range message.ExternalOps { - m.MessageMetrics[op] = NewMessageMetrics(op, namespace, registerer, &errs) + ClockSkewSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "clock_skew_sum", + Help: "sum of (peer timestamp - local timestamp) from handshake messages (s)", + }), + NumFailedToParse: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "msgs_failed_to_parse", + Help: "number of received messages that could not be parsed", + }), + NumSendFailed: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "msgs_failed_to_send", + Help: "number of messages that failed to be sent", + }, + opLabels, + ), + Messages: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "msgs", + Help: "number of handled messages", + }, + ioOpCompressedLabels, + ), + Bytes: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "msgs_bytes", + Help: "number of message bytes", + }, + ioOpLabels, + ), + BytesSaved: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "msgs_bytes_saved", + Help: "number of message bytes saved", + }, + ioOpLabels, + ), } - - m.ClockSkew = metric.NewAveragerWithErrs( - namespace, - "clock_skew", - "clock skew during peer handshake", - registerer, - &errs, + return m, utils.Err( + registerer.Register(m.ClockSkewCount), + registerer.Register(m.ClockSkewSum), + registerer.Register(m.NumFailedToParse), + registerer.Register(m.NumSendFailed), + registerer.Register(m.Messages), + registerer.Register(m.Bytes), + registerer.Register(m.BytesSaved), ) - return m, errs.Err } // Sent updates the metrics for having sent [msg]. func (m *Metrics) Sent(msg message.OutboundMessage) { - op := msg.Op() - msgMetrics := m.MessageMetrics[op] - if msgMetrics == nil { - m.Log.Error( - "unknown message being sent", - zap.Stringer("messageOp", op), - ) - return - } - msgMetrics.NumSent.Inc() - msgMetrics.SentBytes.Add(float64(len(msg.Bytes()))) - // assume that if [saved] == 0, [msg] wasn't compressed - if saved := msg.BytesSavedCompression(); saved != 0 { - msgMetrics.SavedSentBytes.Observe(float64(saved)) + op := msg.Op().String() + saved := msg.BytesSavedCompression() + compressed := saved != 0 // assume that if [saved] == 0, [msg] wasn't compressed + compressedStr := strconv.FormatBool(compressed) + + m.Messages.With(prometheus.Labels{ + ioLabel: sentLabel, + opLabel: op, + compressedLabel: compressedStr, + }).Inc() + + bytesLabel := prometheus.Labels{ + ioLabel: sentLabel, + opLabel: op, } + m.Bytes.With(bytesLabel).Add(float64(len(msg.Bytes()))) + m.BytesSaved.With(bytesLabel).Add(float64(saved)) } func (m *Metrics) MultipleSendsFailed(op message.Op, count int) { - msgMetrics := m.MessageMetrics[op] - if msgMetrics == nil { - m.Log.Error( - "unknown messages failed to be sent", - zap.Stringer("messageOp", op), - zap.Int("messageCount", count), - ) - return - } - msgMetrics.NumFailed.Add(float64(count)) + m.NumSendFailed.With(prometheus.Labels{ + opLabel: op.String(), + }).Add(float64(count)) } // SendFailed updates the metrics for having failed to send [msg]. func (m *Metrics) SendFailed(msg message.OutboundMessage) { - op := msg.Op() - msgMetrics := m.MessageMetrics[op] - if msgMetrics == nil { - m.Log.Error( - "unknown message failed to be sent", - zap.Stringer("messageOp", op), - ) - return - } - msgMetrics.NumFailed.Inc() + op := msg.Op().String() + m.NumSendFailed.With(prometheus.Labels{ + opLabel: op, + }).Inc() } func (m *Metrics) Received(msg message.InboundMessage, msgLen uint32) { - op := msg.Op() - msgMetrics := m.MessageMetrics[op] - if msgMetrics == nil { - m.Log.Error( - "unknown message received", - zap.Stringer("messageOp", op), - ) - return - } - msgMetrics.NumReceived.Inc() - msgMetrics.ReceivedBytes.Add(float64(msgLen)) - // assume that if [saved] == 0, [msg] wasn't compressed - if saved := msg.BytesSavedCompression(); saved != 0 { - msgMetrics.SavedReceivedBytes.Observe(float64(saved)) + op := msg.Op().String() + saved := msg.BytesSavedCompression() + compressed := saved != 0 // assume that if [saved] == 0, [msg] wasn't compressed + compressedStr := strconv.FormatBool(compressed) + + m.Messages.With(prometheus.Labels{ + ioLabel: receivedLabel, + opLabel: op, + compressedLabel: compressedStr, + }).Inc() + + bytesLabel := prometheus.Labels{ + ioLabel: receivedLabel, + opLabel: op, } + m.Bytes.With(bytesLabel).Add(float64(msgLen)) + m.BytesSaved.With(bytesLabel).Add(float64(saved)) } diff --git a/network/peer/msg_length.go b/network/peer/msg_length.go index 625034913d9f..00f3396672b3 100644 --- a/network/peer/msg_length.go +++ b/network/peer/msg_length.go @@ -12,29 +12,19 @@ import ( ) var ( - errInvalidMaxMessageLength = errors.New("invalid maximum message length") errInvalidMessageLength = errors.New("invalid message length") errMaxMessageLengthExceeded = errors.New("maximum message length exceeded") ) -// Used to mask the most significant bit that was used to indicate that the -// message format uses protocol buffers. -// -// TODO: Once the v1.11 is activated, this mask should be removed. -const bitmaskCodec = uint32(1 << 31) - -// Assumes the specified [msgLen] will never >= 1<<31. func writeMsgLen(msgLen uint32, maxMsgLen uint32) ([wrappers.IntLen]byte, error) { - if maxMsgLen >= bitmaskCodec { + if msgLen > maxMsgLen { return [wrappers.IntLen]byte{}, fmt.Errorf( - "%w; maximum message length must be <%d to be able to embed codec information at most significant bit", - errInvalidMaxMessageLength, - bitmaskCodec, + "%w; the message length %d exceeds the specified limit %d", + errMaxMessageLengthExceeded, + msgLen, + maxMsgLen, ) } - if msgLen > maxMsgLen { - return [wrappers.IntLen]byte{}, fmt.Errorf("%w; the message length %d exceeds the specified limit %d", errMaxMessageLengthExceeded, msgLen, maxMsgLen) - } b := [wrappers.IntLen]byte{} binary.BigEndian.PutUint32(b[:], msgLen) @@ -42,15 +32,7 @@ func writeMsgLen(msgLen uint32, maxMsgLen uint32) ([wrappers.IntLen]byte, error) return b, nil } -// Assumes the read [msgLen] will never >= 1<<31. func readMsgLen(b []byte, maxMsgLen uint32) (uint32, error) { - if maxMsgLen >= bitmaskCodec { - return 0, fmt.Errorf( - "%w; maximum message length must be <%d to be able to embed codec information at most significant bit", - errInvalidMaxMessageLength, - bitmaskCodec, - ) - } if len(b) != wrappers.IntLen { return 0, fmt.Errorf( "%w; readMsgLen only supports 4 bytes (got %d bytes)", @@ -61,12 +43,6 @@ func readMsgLen(b []byte, maxMsgLen uint32) (uint32, error) { // parse the message length msgLen := binary.BigEndian.Uint32(b) - - // Because we always use proto messages, there's no need to check the most - // significant bit to inspect the message format. So, we just zero the proto - // flag. - msgLen &^= bitmaskCodec - if msgLen > maxMsgLen { return 0, fmt.Errorf( "%w; the message length %d exceeds the specified limit %d", diff --git a/network/peer/msg_length_test.go b/network/peer/msg_length_test.go index 97866a7d95cf..c7a587638148 100644 --- a/network/peer/msg_length_test.go +++ b/network/peer/msg_length_test.go @@ -21,19 +21,9 @@ func TestWriteMsgLen(t *testing.T) { expectedErr error }{ { - msgLen: math.MaxUint32, - msgLimit: math.MaxUint32, - expectedErr: errInvalidMaxMessageLength, - }, - { - msgLen: bitmaskCodec, - msgLimit: bitmaskCodec, - expectedErr: errInvalidMaxMessageLength, - }, - { - msgLen: bitmaskCodec - 1, - msgLimit: bitmaskCodec - 1, - expectedErr: nil, + msgLen: constants.DefaultMaxMessageSize, + msgLimit: 1, + expectedErr: errMaxMessageLengthExceeded, }, { msgLen: constants.DefaultMaxMessageSize, @@ -45,11 +35,6 @@ func TestWriteMsgLen(t *testing.T) { msgLimit: constants.DefaultMaxMessageSize, expectedErr: nil, }, - { - msgLen: constants.DefaultMaxMessageSize, - msgLimit: 1, - expectedErr: errMaxMessageLengthExceeded, - }, } for _, tv := range tt { msgLenBytes, err := writeMsgLen(tv.msgLen, tv.msgLimit) @@ -73,12 +58,6 @@ func TestReadMsgLen(t *testing.T) { expectedErr error expectedMsgLen uint32 }{ - { - msgLenBytes: []byte{0xFF, 0xFF, 0xFF, 0xFF}, - msgLimit: math.MaxUint32, - expectedErr: errInvalidMaxMessageLength, - expectedMsgLen: 0, - }, { msgLenBytes: []byte{0b11111111, 0xFF}, msgLimit: math.MaxInt32, @@ -86,26 +65,20 @@ func TestReadMsgLen(t *testing.T) { expectedMsgLen: 0, }, { - msgLenBytes: []byte{0b11111111, 0xFF, 0xFF, 0xFF}, + msgLenBytes: []byte{0xFF, 0xFF, 0xFF, 0xFF}, msgLimit: constants.DefaultMaxMessageSize, expectedErr: errMaxMessageLengthExceeded, expectedMsgLen: 0, }, { - msgLenBytes: []byte{0b11111111, 0xFF, 0xFF, 0xFF}, - msgLimit: math.MaxInt32, - expectedErr: nil, - expectedMsgLen: math.MaxInt32, - }, - { - msgLenBytes: []byte{0b10000000, 0x00, 0x00, 0x01}, - msgLimit: math.MaxInt32, + msgLenBytes: []byte{0xFF, 0xFF, 0xFF, 0xFF}, + msgLimit: math.MaxUint32, expectedErr: nil, - expectedMsgLen: 1, + expectedMsgLen: math.MaxUint32, }, { - msgLenBytes: []byte{0b10000000, 0x00, 0x00, 0x01}, - msgLimit: 1, + msgLenBytes: []byte{0x00, 0x00, 0x00, 0x01}, + msgLimit: 10, expectedErr: nil, expectedMsgLen: 1, }, @@ -126,34 +99,3 @@ func TestReadMsgLen(t *testing.T) { require.Equal(tv.expectedMsgLen, msgLenAfterWrite) } } - -func TestBackwardsCompatibleReadMsgLen(t *testing.T) { - require := require.New(t) - - tt := []struct { - msgLenBytes []byte - msgLimit uint32 - expectedMsgLen uint32 - }{ - { - msgLenBytes: []byte{0b01111111, 0xFF, 0xFF, 0xFF}, - msgLimit: math.MaxInt32, - expectedMsgLen: math.MaxInt32, - }, - { - msgLenBytes: []byte{0b00000000, 0x00, 0x00, 0x01}, - msgLimit: math.MaxInt32, - expectedMsgLen: 1, - }, - { - msgLenBytes: []byte{0b00000000, 0x00, 0x00, 0x01}, - msgLimit: 1, - expectedMsgLen: 1, - }, - } - for _, tv := range tt { - msgLen, err := readMsgLen(tv.msgLenBytes, tv.msgLimit) - require.NoError(err) - require.Equal(tv.expectedMsgLen, msgLen) - } -} diff --git a/network/peer/peer.go b/network/peer/peer.go index 58b4e648125e..a92791ff72ee 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -10,6 +10,7 @@ import ( "io" "math" "net" + "net/netip" "sync" "sync/atomic" "time" @@ -31,9 +32,20 @@ import ( "github.com/ava-labs/avalanchego/version" ) -// maxBloomSaltLen restricts the allowed size of the bloom salt to prevent -// excessively expensive bloom filter contains checks. -const maxBloomSaltLen = 32 +const ( + // maxBloomSaltLen restricts the allowed size of the bloom salt to prevent + // excessively expensive bloom filter contains checks. + maxBloomSaltLen = 32 + // maxNumTrackedSubnets limits how many subnets a peer can track to prevent + // excessive memory usage. + maxNumTrackedSubnets = 16 + + disconnectingLog = "disconnecting from peer" + failedToCreateMessageLog = "failed to create message" + failedToSetDeadlineLog = "failed to set connection deadline" + failedToGetUptimeLog = "failed to get peer uptime percentage" + malformedMessageLog = "malformed message" +) var ( errClosed = errors.New("closed") @@ -92,11 +104,6 @@ type Peer interface { // guaranteed not to be delivered to the peer. Send(ctx context.Context, msg message.OutboundMessage) bool - // StartSendPeerList attempts to send a PeerList message to this peer on - // this peer's gossip routine. It is not guaranteed that a PeerList will be - // sent. - StartSendPeerList() - // StartSendGetPeerList attempts to send a GetPeerList message to this peer // on this peer's gossip routine. It is not guaranteed that a GetPeerList // will be sent. @@ -136,8 +143,8 @@ type peer struct { // version is the claimed version the peer is running that we received in // the Handshake message. version *version.Application - // trackedSubnets is the subset of subnetIDs the peer sent us in the Handshake - // message that we are also tracking. + // trackedSubnets are the subnetIDs the peer sent us in the Handshake + // message. The primary network ID is always included. trackedSubnets set.Set[ids.ID] // options of ACPs provided in the Handshake message. supportedACPs set.Set[uint32] @@ -186,10 +193,6 @@ type peer struct { // Must only be accessed atomically lastSent, lastReceived int64 - // peerListChan signals that we should attempt to send a PeerList to this - // peer - peerListChan chan struct{} - // getPeerListChan signals that we should attempt to send a GetPeerList to // this peer getPeerListChan chan struct{} @@ -219,7 +222,6 @@ func Start( onClosingCtxCancel: onClosingCtxCancel, onClosed: make(chan struct{}), observedUptimes: make(map[ids.ID]uint32), - peerListChan: make(chan struct{}, 1), getPeerListChan: make(chan struct{}, 1), } @@ -268,14 +270,8 @@ func (p *peer) AwaitReady(ctx context.Context) error { } func (p *peer) Info() Info { - publicIPStr := "" - if !p.ip.IsZero() { - publicIPStr = p.ip.IPPort.String() - } - - uptimes := make(map[ids.ID]json.Uint32, p.trackedSubnets.Len()) - - for subnetID := range p.trackedSubnets { + uptimes := make(map[ids.ID]json.Uint32, p.MySubnets.Len()) + for subnetID := range p.MySubnets { uptime, exist := p.ObservedUptime(subnetID) if !exist { continue @@ -288,9 +284,10 @@ func (p *peer) Info() Info { primaryUptime = 0 } + ip, _ := ips.ParseAddrPort(p.conn.RemoteAddr().String()) return Info{ - IP: p.conn.RemoteAddr().String(), - PublicIP: publicIPStr, + IP: ip, + PublicIP: p.ip.AddrPort, ID: p.id, Version: p.version.String(), LastSent: p.LastSent(), @@ -327,13 +324,6 @@ func (p *peer) Send(ctx context.Context, msg message.OutboundMessage) bool { return p.messageQueue.Push(ctx, msg) } -func (p *peer) StartSendPeerList() { - select { - case p.peerListChan <- struct{}{}: - default: - } -} - func (p *peer) StartSendGetPeerList() { select { case p.getPeerListChan <- struct{}{}: @@ -401,8 +391,9 @@ func (p *peer) readMessages() { for { // Time out and close connection if we can't read the message length if err := p.conn.SetReadDeadline(p.nextTimeout()); err != nil { - p.Log.Verbo("error setting the connection read timeout", + p.Log.Verbo(failedToSetDeadlineLog, zap.Stringer("nodeID", p.id), + zap.String("direction", "read"), zap.Error(err), ) return @@ -410,7 +401,7 @@ func (p *peer) readMessages() { // Read the message length if _, err := io.ReadFull(reader, msgLenBytes); err != nil { - p.Log.Verbo("error reading message", + p.Log.Verbo("error reading message length", zap.Stringer("nodeID", p.id), zap.Error(err), ) @@ -420,7 +411,7 @@ func (p *peer) readMessages() { // Parse the message length msgLen, err := readMsgLen(msgLenBytes, constants.DefaultMaxMessageSize) if err != nil { - p.Log.Verbo("error reading message length", + p.Log.Verbo("error parsing message length", zap.Stringer("nodeID", p.id), zap.Error(err), ) @@ -454,8 +445,9 @@ func (p *peer) readMessages() { // Time out and close connection if we can't read message if err := p.conn.SetReadDeadline(p.nextTimeout()); err != nil { - p.Log.Verbo("error setting the connection read timeout", + p.Log.Verbo(failedToSetDeadlineLog, zap.Stringer("nodeID", p.id), + zap.String("direction", "read"), zap.Error(err), ) onFinishedHandling() @@ -495,7 +487,7 @@ func (p *peer) readMessages() { zap.Error(err), ) - p.Metrics.FailedToParse.Inc() + p.Metrics.NumFailedToParse.Inc() // Couldn't parse the message. Read the next one. onFinishedHandling() @@ -531,29 +523,21 @@ func (p *peer) writeMessages() { ) return } - if mySignedIP.Port == 0 { + if port := mySignedIP.AddrPort.Port(); port == 0 { p.Log.Error("signed IP has invalid port", zap.Stringer("nodeID", p.id), - zap.Uint16("port", mySignedIP.Port), + zap.Uint16("port", port), ) return } myVersion := p.VersionCompatibility.Version() - legacyApplication := &version.Application{ - Name: version.LegacyAppName, - Major: myVersion.Major, - Minor: myVersion.Minor, - Patch: myVersion.Patch, - } - knownPeersFilter, knownPeersSalt := p.Network.KnownPeers() msg, err := p.MessageCreator.Handshake( p.NetworkID, p.Clock.Unix(), - mySignedIP.IPPort, - legacyApplication.String(), + mySignedIP.AddrPort, myVersion.Name, uint32(myVersion.Major), uint32(myVersion.Minor), @@ -568,9 +552,9 @@ func (p *peer) writeMessages() { knownPeersSalt, ) if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.HandshakeOp), + p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), zap.Error(err), ) return @@ -613,8 +597,9 @@ func (p *peer) writeMessage(writer io.Writer, msg message.OutboundMessage) { ) if err := p.conn.SetWriteDeadline(p.nextTimeout()); err != nil { - p.Log.Verbo("error setting write deadline", + p.Log.Verbo(failedToSetDeadlineLog, zap.Stringer("nodeID", p.id), + zap.String("direction", "write"), zap.Error(err), ) return @@ -656,51 +641,22 @@ func (p *peer) sendNetworkMessages() { for { select { - case <-p.peerListChan: - peerIPs := p.Config.Network.Peers(p.id, bloom.EmptyFilter, nil) - if len(peerIPs) == 0 { - p.Log.Verbo( - "skipping peer gossip as there are no unknown peers", - zap.Stringer("nodeID", p.id), - ) - continue - } - - // Bypass throttling is disabled here to follow the non-handshake - // message sending pattern. - msg, err := p.Config.MessageCreator.PeerList(peerIPs, false /*=bypassThrottling*/) - if err != nil { - p.Log.Error("failed to create peer list message", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - continue - } - - if !p.Send(p.onClosingCtx, msg) { - p.Log.Debug("failed to send peer list", - zap.Stringer("nodeID", p.id), - ) - } case <-p.getPeerListChan: knownPeersFilter, knownPeersSalt := p.Config.Network.KnownPeers() msg, err := p.Config.MessageCreator.GetPeerList(knownPeersFilter, knownPeersSalt) if err != nil { - p.Log.Error("failed to create get peer list message", + p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.GetPeerListOp), zap.Error(err), ) - continue + return } - if !p.Send(p.onClosingCtx, msg) { - p.Log.Debug("failed to send get peer list", - zap.Stringer("nodeID", p.id), - ) - } + p.Send(p.onClosingCtx, msg) case <-sendPingsTicker.C: if !p.Network.AllowConnection(p.id) { - p.Log.Debug("disconnecting from peer", + p.Log.Debug(disconnectingLog, zap.String("reason", "connection is no longer desired"), zap.Stringer("nodeID", p.id), ) @@ -717,9 +673,9 @@ func (p *peer) sendNetworkMessages() { primaryUptime, subnetUptimes := p.getUptimes() pingMessage, err := p.MessageCreator.Ping(primaryUptime, subnetUptimes) if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.PingOp), + p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PingOp), zap.Error(err), ) return @@ -743,7 +699,7 @@ func (p *peer) sendNetworkMessages() { // callback to avoid signature verification on the P-chain accept path. func (p *peer) shouldDisconnect() bool { if err := p.VersionCompatibility.Compatible(p.version); err != nil { - p.Log.Debug("disconnecting from peer", + p.Log.Debug(disconnectingLog, zap.String("reason", "version not compatible"), zap.Stringer("nodeID", p.id), zap.Stringer("peerVersion", p.version), @@ -759,36 +715,20 @@ func (p *peer) shouldDisconnect() bool { return false } - postDurango := p.Clock.Time().After(version.GetDurangoTime(constants.MainnetID)) - if postDurango && p.ip.BLSSignature == nil { - p.Log.Debug("disconnecting from peer", - zap.String("reason", "missing BLS signature"), - zap.Stringer("nodeID", p.id), - ) - return true - } - - // If Durango hasn't activated on mainnet yet, we don't require BLS - // signatures to be provided. However, if they are provided, verify that - // they are correct. - if p.ip.BLSSignature == nil { - return false - } - validSignature := bls.VerifyProofOfPossession( vdr.PublicKey, p.ip.BLSSignature, p.ip.UnsignedIP.bytes(), ) if !validSignature { - p.Log.Debug("disconnecting from peer", + p.Log.Debug(disconnectingLog, zap.String("reason", "invalid BLS signature"), zap.Stringer("nodeID", p.id), ) return true } - // Avoid unnecessary signature verifications by only verifing the signature + // Avoid unnecessary signature verifications by only verifying the signature // once per validation period. p.txIDOfVerifiedBLSKey = vdr.TxID return false @@ -818,11 +758,10 @@ func (p *peer) handle(msg message.InboundMessage) { return } if !p.finishedHandshake.Get() { - p.Log.Debug( - "dropping message", - zap.String("reason", "handshake isn't finished"), + p.Log.Debug("dropping message", zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", msg.Op()), + zap.String("reason", "handshake isn't finished"), ) msg.OnFinishedHandling() return @@ -833,18 +772,67 @@ func (p *peer) handle(msg message.InboundMessage) { } func (p *peer) handlePing(msg *p2p.Ping) { - p.observeUptimes(msg.Uptime, msg.SubnetUptimes) + if msg.Uptime > 100 { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PingOp), + zap.Stringer("subnetID", constants.PrimaryNetworkID), + zap.Uint32("uptime", msg.Uptime), + ) + p.StartClose() + return + } + p.observeUptime(constants.PrimaryNetworkID, msg.Uptime) + + for _, subnetUptime := range msg.SubnetUptimes { + subnetID, err := ids.ToID(subnetUptime.SubnetId) + if err != nil { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PingOp), + zap.String("field", "subnetID"), + zap.Error(err), + ) + p.StartClose() + return + } + + if !p.MySubnets.Contains(subnetID) { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PingOp), + zap.Stringer("subnetID", subnetID), + zap.String("reason", "not tracking subnet"), + ) + p.StartClose() + return + } + + uptime := subnetUptime.Uptime + if uptime > 100 { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PingOp), + zap.Stringer("subnetID", subnetID), + zap.Uint32("uptime", uptime), + ) + p.StartClose() + return + } + p.observeUptime(subnetID, uptime) + } - primaryUptime, subnetUptimes := p.getUptimes() - pongMessage, err := p.MessageCreator.Pong(primaryUptime, subnetUptimes) + pongMessage, err := p.MessageCreator.Pong() if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.PongOp), + p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PongOp), zap.Error(err), ) + p.StartClose() return } + p.Send(p.onClosingCtx, pongMessage) } @@ -854,7 +842,7 @@ func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { constants.PrimaryNetworkID, ) if err != nil { - p.Log.Debug("failed to get peer primary uptime percentage", + p.Log.Debug(failedToGetUptimeLog, zap.Stringer("nodeID", p.id), zap.Stringer("subnetID", constants.PrimaryNetworkID), zap.Error(err), @@ -862,11 +850,15 @@ func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { primaryUptime = 0 } - subnetUptimes := make([]*p2p.SubnetUptime, 0, p.trackedSubnets.Len()) - for subnetID := range p.trackedSubnets { + subnetUptimes := make([]*p2p.SubnetUptime, 0, p.MySubnets.Len()) + for subnetID := range p.MySubnets { + if !p.trackedSubnets.Contains(subnetID) { + continue + } + subnetUptime, err := p.UptimeCalculator.CalculateUptimePercent(p.id, subnetID) if err != nil { - p.Log.Debug("failed to get peer uptime percentage", + p.Log.Debug(failedToGetUptimeLog, zap.Stringer("nodeID", p.id), zap.Stringer("subnetID", subnetID), zap.Error(err), @@ -885,64 +877,7 @@ func (p *peer) getUptimes() (uint32, []*p2p.SubnetUptime) { return primaryUptimePercent, subnetUptimes } -func (p *peer) handlePong(msg *p2p.Pong) { - // TODO: Remove once everyone sends uptimes in Ping messages. - p.observeUptimes(msg.Uptime, msg.SubnetUptimes) -} - -func (p *peer) observeUptimes(primaryUptime uint32, subnetUptimes []*p2p.SubnetUptime) { - // TODO: Remove once everyone sends uptimes in Ping messages. - // - // If primaryUptime is 0, the message may not include any uptimes. This may - // happen with old Ping messages or new Pong messages. - if primaryUptime == 0 { - return - } - - if primaryUptime > 100 { - p.Log.Debug("dropping message with invalid uptime", - zap.Stringer("nodeID", p.id), - zap.Stringer("subnetID", constants.PrimaryNetworkID), - zap.Uint32("uptime", primaryUptime), - ) - p.StartClose() - return - } - p.observeUptime(constants.PrimaryNetworkID, primaryUptime) - - for _, subnetUptime := range subnetUptimes { - subnetID, err := ids.ToID(subnetUptime.SubnetId) - if err != nil { - p.Log.Debug("dropping message with invalid subnetID", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - p.StartClose() - return - } - - if !p.MySubnets.Contains(subnetID) { - p.Log.Debug("dropping message with unexpected subnetID", - zap.Stringer("nodeID", p.id), - zap.Stringer("subnetID", subnetID), - ) - p.StartClose() - return - } - - uptime := subnetUptime.Uptime - if uptime > 100 { - p.Log.Debug("dropping message with invalid uptime", - zap.Stringer("nodeID", p.id), - zap.Stringer("subnetID", subnetID), - zap.Uint32("uptime", uptime), - ) - p.StartClose() - return - } - p.observeUptime(subnetID, uptime) - } -} +func (*peer) handlePong(*p2p.Pong) {} // Record that the given peer perceives our uptime for the given [subnetID] // to be [uptime]. @@ -956,16 +891,20 @@ func (p *peer) observeUptime(subnetID ids.ID, uptime uint32) { func (p *peer) handleHandshake(msg *p2p.Handshake) { if p.gotHandshake.Get() { - // TODO: this should never happen, should we close the connection here? - p.Log.Verbo("dropping duplicated handshake message", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("reason", "already received handshake"), ) + p.StartClose() return } if msg.NetworkId != p.NetworkID { - p.Log.Debug("networkID mismatch", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "networkID"), zap.Uint32("peerNetworkID", msg.NetworkId), zap.Uint32("ourNetworkID", p.NetworkID), ) @@ -973,80 +912,73 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { return } - myTime := p.Clock.Time() - myTimeUnix := uint64(myTime.Unix()) - clockDifference := math.Abs(float64(msg.MyTime) - float64(myTimeUnix)) + localTime := p.Clock.Time() + localUnixTime := uint64(localTime.Unix()) + clockDifference := math.Abs(float64(msg.MyTime) - float64(localUnixTime)) - p.Metrics.ClockSkew.Observe(clockDifference) + p.Metrics.ClockSkewCount.Inc() + p.Metrics.ClockSkewSum.Add(clockDifference) if clockDifference > p.MaxClockDifference.Seconds() { + log := p.Log.Debug if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { - p.Log.Warn("beacon reports out of sync time", - zap.Stringer("nodeID", p.id), - zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTimeUnix), - ) - } else { - p.Log.Debug("peer reports out of sync time", - zap.Stringer("nodeID", p.id), - zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTimeUnix), - ) + log = p.Log.Warn } + log(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "myTime"), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("localTime", localUnixTime), + ) p.StartClose() return } - if msg.Client != nil { - p.version = &version.Application{ - Name: msg.Client.Name, - Major: int(msg.Client.Major), - Minor: int(msg.Client.Minor), - Patch: int(msg.Client.Patch), - } - } else { - // Handle legacy version field - peerVersion, err := version.ParseLegacyApplication(msg.MyVersion) - if err != nil { - p.Log.Debug("failed to parse peer version", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) - p.StartClose() - return - } - p.version = peerVersion + p.version = &version.Application{ + Name: msg.Client.GetName(), + Major: int(msg.Client.GetMajor()), + Minor: int(msg.Client.GetMinor()), + Patch: int(msg.Client.GetPatch()), } if p.VersionCompatibility.Version().Before(p.version) { + log := p.Log.Debug if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { - p.Log.Info("beacon attempting to connect with newer version. You may want to update your client", - zap.Stringer("nodeID", p.id), - zap.Stringer("beaconVersion", p.version), - ) - } else { - p.Log.Debug("peer attempting to connect with newer version. You may want to update your client", - zap.Stringer("nodeID", p.id), - zap.Stringer("peerVersion", p.version), - ) + log = p.Log.Info } + log("peer attempting to connect with newer version. You may want to update your client", + zap.Stringer("nodeID", p.id), + zap.Stringer("peerVersion", p.version), + ) } // handle subnet IDs + if numTrackedSubnets := len(msg.TrackedSubnets); numTrackedSubnets > maxNumTrackedSubnets { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "trackedSubnets"), + zap.Int("numTrackedSubnets", numTrackedSubnets), + ) + p.StartClose() + return + } + + p.trackedSubnets.Add(constants.PrimaryNetworkID) for _, subnetIDBytes := range msg.TrackedSubnets { subnetID, err := ids.ToID(subnetIDBytes) if err != nil { - p.Log.Debug("failed to parse peer's tracked subnets", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "trackedSubnets"), zap.Error(err), ) p.StartClose() return } - // add only if we also track this subnet - if p.MySubnets.Contains(subnetID) { - p.trackedSubnets.Add(subnetID) - } + p.trackedSubnets.Add(subnetID) } for _, acp := range msg.SupportedAcps { @@ -1061,10 +993,10 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { } if p.supportedACPs.Overlaps(p.objectedACPs) { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), - zap.String("field", "ACPs"), + zap.String("field", "acps"), zap.Reflect("supportedACPs", p.supportedACPs), zap.Reflect("objectedACPs", p.objectedACPs), ) @@ -1080,10 +1012,10 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { var err error knownPeers, err = bloom.Parse(msg.KnownPeers.Filter) if err != nil { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), - zap.String("field", "KnownPeers.Filter"), + zap.String("field", "knownPeers.filter"), zap.Error(err), ) p.StartClose() @@ -1092,10 +1024,10 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { salt = msg.KnownPeers.Salt if saltLen := len(salt); saltLen > maxBloomSaltLen { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), - zap.String("field", "KnownPeers.Salt"), + zap.String("field", "knownPeers.salt"), zap.Int("saltLen", saltLen), ) p.StartClose() @@ -1103,23 +1035,25 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { } } - // "net.IP" type in Golang is 16-byte - if ipLen := len(msg.IpAddr); ipLen != net.IPv6len { - p.Log.Debug("message with invalid field", + addr, ok := ips.AddrFromSlice(msg.IpAddr) + if !ok { + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), - zap.String("field", "IP"), - zap.Int("ipLen", ipLen), + zap.String("field", "ip"), + zap.Int("ipLen", len(msg.IpAddr)), ) p.StartClose() return } + + port := uint16(msg.IpPort) if msg.IpPort == 0 { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.HandshakeOp), - zap.String("field", "Port"), - zap.Uint32("port", msg.IpPort), + zap.String("field", "port"), + zap.Uint16("port", port), ) p.StartClose() return @@ -1127,55 +1061,48 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { p.ip = &SignedIP{ UnsignedIP: UnsignedIP{ - IPPort: ips.IPPort{ - IP: msg.IpAddr, - Port: uint16(msg.IpPort), - }, + AddrPort: netip.AddrPortFrom( + addr, + port, + ), Timestamp: msg.IpSigningTime, }, TLSSignature: msg.IpNodeIdSig, } - maxTimestamp := myTime.Add(p.MaxClockDifference) + maxTimestamp := localTime.Add(p.MaxClockDifference) if err := p.ip.Verify(p.cert, maxTimestamp); err != nil { + log := p.Log.Debug if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { - p.Log.Warn("beacon has invalid signature or is out of sync", - zap.Stringer("nodeID", p.id), - zap.String("signatureType", "tls"), - zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTimeUnix), - zap.Error(err), - ) - } else { - p.Log.Debug("peer has invalid signature or is out of sync", - zap.Stringer("nodeID", p.id), - zap.String("signatureType", "tls"), - zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTimeUnix), - zap.Error(err), - ) + log = p.Log.Warn } + log(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "tlsSignature"), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("localTime", localUnixTime), + zap.Error(err), + ) p.StartClose() return } - // TODO: After v1.11.x is activated, require the key to be provided. - if len(msg.IpBlsSig) > 0 { - signature, err := bls.SignatureFromBytes(msg.IpBlsSig) - if err != nil { - p.Log.Debug("peer has malformed signature", - zap.Stringer("nodeID", p.id), - zap.String("signatureType", "bls"), - zap.Error(err), - ) - p.StartClose() - return - } - - p.ip.BLSSignature = signature - p.ip.BLSSignatureBytes = msg.IpBlsSig + signature, err := bls.SignatureFromBytes(msg.IpBlsSig) + if err != nil { + p.Log.Debug(malformedMessageLog, + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.HandshakeOp), + zap.String("field", "blsSignature"), + zap.Error(err), + ) + p.StartClose() + return } + p.ip.BLSSignature = signature + p.ip.BLSSignatureBytes = msg.IpBlsSig + // If the peer is running an incompatible version or has an invalid BLS // signature, disconnect from them prior to marking the handshake as // completed. @@ -1192,28 +1119,33 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { // acknowledged correctly. peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, true /*=bypassThrottling*/) if err != nil { - p.Log.Error("failed to create peer list handshake message", + p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.Error(err), ) + p.StartClose() return } if !p.Send(p.onClosingCtx, peerListMsg) { // Because throttling was marked to be bypassed with this message, // sending should only fail if the peer has started closing. - p.Log.Debug("failed to send peer list for handshake", + p.Log.Debug("failed to send reliable message", zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), zap.Error(p.onClosingCtx.Err()), ) + p.StartClose() } } func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { if !p.finishedHandshake.Get() { - p.Log.Verbo("dropping get peer list message", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.GetPeerListOp), + zap.String("reason", "not finished handshake"), ) return } @@ -1221,10 +1153,10 @@ func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { knownPeersMsg := msg.GetKnownPeers() filter, err := bloom.Parse(knownPeersMsg.GetFilter()) if err != nil { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.GetPeerListOp), - zap.String("field", "KnownPeers.Filter"), + zap.String("field", "knownPeers.filter"), zap.Error(err), ) p.StartClose() @@ -1233,10 +1165,10 @@ func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { salt := knownPeersMsg.GetSalt() if saltLen := len(salt); saltLen > maxBloomSaltLen { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.GetPeerListOp), - zap.String("field", "KnownPeers.Salt"), + zap.String("field", "knownPeers.salt"), zap.Int("saltLen", saltLen), ) p.StartClose() @@ -1255,18 +1187,15 @@ func (p *peer) handleGetPeerList(msg *p2p.GetPeerList) { // sending pattern. peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, false /*=bypassThrottling*/) if err != nil { - p.Log.Error("failed to create peer list message", + p.Log.Error(failedToCreateMessageLog, zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), zap.Error(err), ) return } - if !p.Send(p.onClosingCtx, peerListMsg) { - p.Log.Debug("failed to send peer list", - zap.Stringer("nodeID", p.id), - ) - } + p.Send(p.onClosingCtx, peerListMsg) } func (p *peer) handlePeerList(msg *p2p.PeerList) { @@ -1280,68 +1209,57 @@ func (p *peer) handlePeerList(msg *p2p.PeerList) { close(p.onFinishHandshake) } - // Invariant: We do not account for clock skew here, as the sender of the - // certificate is expected to account for clock skew during the activation - // of Durango. - durangoTime := version.GetDurangoTime(p.NetworkID) - beforeDurango := time.Now().Before(durangoTime) discoveredIPs := make([]*ips.ClaimedIPPort, len(msg.ClaimedIpPorts)) // the peers this peer told us about for i, claimedIPPort := range msg.ClaimedIpPorts { - var ( - tlsCert *staking.Certificate - err error - ) - if beforeDurango { - tlsCert, err = staking.ParseCertificate(claimedIPPort.X509Certificate) - } else { - tlsCert, err = staking.ParseCertificatePermissive(claimedIPPort.X509Certificate) - } + tlsCert, err := staking.ParseCertificate(claimedIPPort.X509Certificate) if err != nil { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), - zap.String("field", "Cert"), + zap.String("field", "cert"), zap.Error(err), ) p.StartClose() return } - // "net.IP" type in Golang is 16-byte - if ipLen := len(claimedIPPort.IpAddr); ipLen != net.IPv6len { - p.Log.Debug("message with invalid field", + addr, ok := ips.AddrFromSlice(claimedIPPort.IpAddr) + if !ok { + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), - zap.String("field", "IP"), - zap.Int("ipLen", ipLen), + zap.String("field", "ip"), + zap.Int("ipLen", len(claimedIPPort.IpAddr)), ) p.StartClose() return } - if claimedIPPort.IpPort == 0 { - p.Log.Debug("message with invalid field", + + port := uint16(claimedIPPort.IpPort) + if port == 0 { + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), - zap.String("field", "Port"), - zap.Uint32("port", claimedIPPort.IpPort), + zap.String("field", "port"), + zap.Uint16("port", port), ) - // TODO: After v1.11.x is activated, close the peer here. - continue + p.StartClose() + return } discoveredIPs[i] = ips.NewClaimedIPPort( tlsCert, - ips.IPPort{ - IP: claimedIPPort.IpAddr, - Port: uint16(claimedIPPort.IpPort), - }, + netip.AddrPortFrom( + addr, + port, + ), claimedIPPort.Timestamp, claimedIPPort.Signature, ) } if err := p.Network.Track(discoveredIPs); err != nil { - p.Log.Debug("message with invalid field", + p.Log.Debug(malformedMessageLog, zap.Stringer("nodeID", p.id), zap.Stringer("messageOp", message.PeerListOp), zap.String("field", "claimedIP"), diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index e52273fc6fbe..e29edbe17ba6 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -7,6 +7,7 @@ import ( "context" "crypto" "net" + "net/netip" "testing" "time" @@ -22,14 +23,13 @@ import ( "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" ) @@ -40,7 +40,6 @@ type testPeer struct { type rawTestPeer struct { config *Config - conn net.Conn cert *staking.Certificate nodeID ids.NodeID inboundMsgChan <-chan message.InboundMessage @@ -52,7 +51,6 @@ func newMessageCreator(t *testing.T) message.Creator { mc, err := message.NewCreator( logging.NoLog{}, prometheus.NewRegistry(), - "", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -61,30 +59,11 @@ func newMessageCreator(t *testing.T) message.Creator { return mc } -func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPeer, *rawTestPeer) { +func newConfig(t *testing.T) Config { t.Helper() require := require.New(t) - conn0, conn1 := net.Pipe() - - tlsCert0, err := staking.NewTLSCert() - require.NoError(err) - cert0 := staking.CertificateFromX509(tlsCert0.Leaf) - - tlsCert1, err := staking.NewTLSCert() - require.NoError(err) - cert1 := staking.CertificateFromX509(tlsCert1.Leaf) - - nodeID0 := ids.NodeIDFromCert(cert0) - nodeID1 := ids.NodeIDFromCert(cert1) - - mc := newMessageCreator(t) - - metrics, err := NewMetrics( - logging.NoLog{}, - "", - prometheus.NewRegistry(), - ) + metrics, err := NewMetrics(prometheus.NewRegistry()) require.NoError(err) resourceTracker, err := tracker.NewResourceTracker( @@ -95,14 +74,17 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee ) require.NoError(err) - sharedConfig := Config{ + return Config{ + ReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, + WriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, Metrics: metrics, - MessageCreator: mc, + MessageCreator: newMessageCreator(t), Log: logging.NoLog{}, InboundMsgThrottler: throttling.NewNoInboundThrottler(), + Network: TestNetwork, + Router: nil, VersionCompatibility: version.GetCompatibility(constants.LocalID), - MySubnets: trackedSubnets, - UptimeCalculator: uptime.NoOpCalculator, + MySubnets: nil, Beacons: validators.NewManager(), Validators: validators.NewManager(), NetworkID: constants.LocalID, @@ -110,141 +92,94 @@ func makeRawTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*rawTestPee PongTimeout: constants.DefaultPingPongTimeout, MaxClockDifference: time.Minute, ResourceTracker: resourceTracker, + UptimeCalculator: uptime.NoOpCalculator, + IPSigner: nil, } - peerConfig0 := sharedConfig - peerConfig1 := sharedConfig - - ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 1) - tls0 := tlsCert0.PrivateKey.(crypto.Signer) - bls0, err := bls.NewSecretKey() - require.NoError(err) +} - peerConfig0.IPSigner = NewIPSigner(ip0, tls0, bls0) +func newRawTestPeer(t *testing.T, config Config) *rawTestPeer { + t.Helper() + require := require.New(t) - peerConfig0.Network = TestNetwork - inboundMsgChan0 := make(chan message.InboundMessage) - peerConfig0.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { - inboundMsgChan0 <- msg - }) + tlsCert, err := staking.NewTLSCert() + require.NoError(err) + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) + nodeID := ids.NodeIDFromCert(cert) - ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 2) - tls1 := tlsCert1.PrivateKey.(crypto.Signer) - bls1, err := bls.NewSecretKey() + ip := utils.NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), + 1, + )) + tls := tlsCert.PrivateKey.(crypto.Signer) + bls, err := bls.NewSecretKey() require.NoError(err) - peerConfig1.IPSigner = NewIPSigner(ip1, tls1, bls1) + config.IPSigner = NewIPSigner(ip, tls, bls) - peerConfig1.Network = TestNetwork - inboundMsgChan1 := make(chan message.InboundMessage) - peerConfig1.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { - inboundMsgChan1 <- msg + inboundMsgChan := make(chan message.InboundMessage) + config.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { + inboundMsgChan <- msg }) - peer0 := &rawTestPeer{ - config: &peerConfig0, - conn: conn0, - cert: cert0, - nodeID: nodeID0, - inboundMsgChan: inboundMsgChan0, - } - peer1 := &rawTestPeer{ - config: &peerConfig1, - conn: conn1, - cert: cert1, - nodeID: nodeID1, - inboundMsgChan: inboundMsgChan1, + return &rawTestPeer{ + config: &config, + cert: cert, + nodeID: nodeID, + inboundMsgChan: inboundMsgChan, } - return peer0, peer1 } -func makeTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) { - rawPeer0, rawPeer1 := makeRawTestPeers(t, trackedSubnets) - - peer0 := &testPeer{ +func startTestPeer(self *rawTestPeer, peer *rawTestPeer, conn net.Conn) *testPeer { + return &testPeer{ Peer: Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, + self.config, + conn, + peer.cert, + peer.nodeID, NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, + self.config.Metrics, + peer.nodeID, logging.NoLog{}, throttling.NewNoOutboundThrottler(), ), ), - inboundMsgChan: rawPeer0.inboundMsgChan, - } - peer1 := &testPeer{ - Peer: Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, - NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ), - inboundMsgChan: rawPeer1.inboundMsgChan, + inboundMsgChan: self.inboundMsgChan, } +} + +func startTestPeers(rawPeer0 *rawTestPeer, rawPeer1 *rawTestPeer) (*testPeer, *testPeer) { + conn0, conn1 := net.Pipe() + peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) + peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) return peer0, peer1 } -func makeReadyTestPeers(t *testing.T, trackedSubnets set.Set[ids.ID]) (*testPeer, *testPeer) { +func awaitReady(t *testing.T, peers ...Peer) { t.Helper() require := require.New(t) - peer0, peer1 := makeTestPeers(t, trackedSubnets) - - require.NoError(peer0.AwaitReady(context.Background())) - require.True(peer0.Ready()) - - require.NoError(peer1.AwaitReady(context.Background())) - require.True(peer1.Ready()) - - return peer0, peer1 + for _, peer := range peers { + require.NoError(peer.AwaitReady(context.Background())) + require.True(peer.Ready()) + } } func TestReady(t *testing.T) { require := require.New(t) - rawPeer0, rawPeer1 := makeRawTestPeers(t, set.Set[ids.ID]{}) - peer0 := Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, - NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ) + config := newConfig(t) - require.False(peer0.Ready()) + rawPeer0 := newRawTestPeer(t, config) + rawPeer1 := newRawTestPeer(t, config) - peer1 := Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, - NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ) + conn0, conn1 := net.Pipe() - require.NoError(peer0.AwaitReady(context.Background())) - require.True(peer0.Ready()) + peer0 := startTestPeer(rawPeer0, rawPeer1, conn0) + require.False(peer0.Ready()) - require.NoError(peer1.AwaitReady(context.Background())) - require.True(peer1.Ready()) + peer1 := startTestPeer(rawPeer1, rawPeer0, conn1) + awaitReady(t, peer0, peer1) peer0.StartClose() require.NoError(peer0.AwaitClosed(context.Background())) @@ -254,10 +189,15 @@ func TestReady(t *testing.T) { func TestSend(t *testing.T) { require := require.New(t) - peer0, peer1 := makeReadyTestPeers(t, set.Set[ids.ID]{}) - mc := newMessageCreator(t) + sharedConfig := newConfig(t) + + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) + + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + awaitReady(t, peer0, peer1) - outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + outboundGetMsg, err := sharedConfig.MessageCreator.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(err) require.True(peer0.Send(context.Background(), outboundGetMsg)) @@ -274,9 +214,8 @@ func TestPingUptimes(t *testing.T) { trackedSubnetID := ids.GenerateTestID() untrackedSubnetID := ids.GenerateTestID() - trackedSubnets := set.Of(trackedSubnetID) - - mc := newMessageCreator(t) + sharedConfig := newConfig(t) + sharedConfig.MySubnets = set.Of(trackedSubnetID) testCases := []struct { name string @@ -287,10 +226,11 @@ func TestPingUptimes(t *testing.T) { { name: "primary network only", msg: func() message.OutboundMessage { - pingMsg, err := mc.Ping(1, nil) + pingMsg, err := sharedConfig.MessageCreator.Ping(1, nil) require.NoError(t, err) return pingMsg }(), + shouldClose: false, assertFn: func(require *require.Assertions, peer *testPeer) { uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) require.True(ok) @@ -304,7 +244,7 @@ func TestPingUptimes(t *testing.T) { { name: "primary network and subnet", msg: func() message.OutboundMessage { - pingMsg, err := mc.Ping( + pingMsg, err := sharedConfig.MessageCreator.Ping( 1, []*p2p.SubnetUptime{ { @@ -316,6 +256,7 @@ func TestPingUptimes(t *testing.T) { require.NoError(t, err) return pingMsg }(), + shouldClose: false, assertFn: func(require *require.Assertions, peer *testPeer) { uptime, ok := peer.ObservedUptime(constants.PrimaryNetworkID) require.True(ok) @@ -329,7 +270,7 @@ func TestPingUptimes(t *testing.T) { { name: "primary network and non tracked subnet", msg: func() message.OutboundMessage { - pingMsg, err := mc.Ping( + pingMsg, err := sharedConfig.MessageCreator.Ping( 1, []*p2p.SubnetUptime{ { @@ -348,27 +289,30 @@ func TestPingUptimes(t *testing.T) { return pingMsg }(), shouldClose: true, + assertFn: nil, }, } - // Note: we reuse peers across tests because makeReadyTestPeers takes awhile - // to run. - peer0, peer1 := makeReadyTestPeers(t, trackedSubnets) - defer func() { - peer1.StartClose() - peer0.StartClose() - require.NoError(t, peer0.AwaitClosed(context.Background())) - require.NoError(t, peer1.AwaitClosed(context.Background())) - }() + // The raw peers are generated outside of the test cases to avoid generating + // many TLS keys. + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { require := require.New(t) + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + awaitReady(t, peer0, peer1) + defer func() { + peer1.StartClose() + peer0.StartClose() + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + }() + require.True(peer0.Send(context.Background(), tc.msg)) - // Note: shouldClose can only be `true` for the last test because - // we reuse peers across tests. if tc.shouldClose { require.NoError(peer1.AwaitClosed(context.Background())) return @@ -385,11 +329,85 @@ func TestPingUptimes(t *testing.T) { } } +func TestTrackedSubnets(t *testing.T) { + sharedConfig := newConfig(t) + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) + + makeSubnetIDs := func(numSubnets int) []ids.ID { + subnetIDs := make([]ids.ID, numSubnets) + for i := range subnetIDs { + subnetIDs[i] = ids.GenerateTestID() + } + return subnetIDs + } + + tests := []struct { + name string + trackedSubnets []ids.ID + shouldDisconnect bool + }{ + { + name: "primary network only", + trackedSubnets: makeSubnetIDs(0), + shouldDisconnect: false, + }, + { + name: "single subnet", + trackedSubnets: makeSubnetIDs(1), + shouldDisconnect: false, + }, + { + name: "max subnets", + trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets), + shouldDisconnect: false, + }, + { + name: "too many subnets", + trackedSubnets: makeSubnetIDs(maxNumTrackedSubnets + 1), + shouldDisconnect: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + rawPeer0.config.MySubnets = set.Of(test.trackedSubnets...) + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) + if test.shouldDisconnect { + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + return + } + + defer func() { + peer1.StartClose() + peer0.StartClose() + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) + }() + + awaitReady(t, peer0, peer1) + + require.Equal(set.Of(constants.PrimaryNetworkID), peer0.TrackedSubnets()) + + expectedTrackedSubnets := set.Of(test.trackedSubnets...) + expectedTrackedSubnets.Add(constants.PrimaryNetworkID) + require.Equal(expectedTrackedSubnets, peer1.TrackedSubnets()) + }) + } +} + // Test that a peer using the wrong BLS key is disconnected from. func TestInvalidBLSKeyDisconnects(t *testing.T) { require := require.New(t) - rawPeer0, rawPeer1 := makeRawTestPeers(t, nil) + sharedConfig := newConfig(t) + + rawPeer0 := newRawTestPeer(t, sharedConfig) + rawPeer1 := newRawTestPeer(t, sharedConfig) + require.NoError(rawPeer0.config.Validators.AddStaker( constants.PrimaryNetworkID, rawPeer1.nodeID, @@ -407,36 +425,8 @@ func TestInvalidBLSKeyDisconnects(t *testing.T) { ids.GenerateTestID(), 1, )) - peer0 := &testPeer{ - Peer: Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, - NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ), - inboundMsgChan: rawPeer0.inboundMsgChan, - } - peer1 := &testPeer{ - Peer: Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, - NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ), - inboundMsgChan: rawPeer1.inboundMsgChan, - } + + peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) // Because peer1 thinks that peer0 is using the wrong BLS key, they should // disconnect from each other. @@ -591,14 +581,9 @@ func TestShouldDisconnect(t *testing.T) { expectedShouldDisconnect: false, }, { - name: "past durango without a signature", + name: "peer without signature", initialPeer: &peer{ Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(mockable.MaxTime) - return clk - }(), Log: logging.NoLog{}, VersionCompatibility: version.GetCompatibility(constants.UnitTestID), Validators: func() validators.Manager { @@ -619,11 +604,6 @@ func TestShouldDisconnect(t *testing.T) { }, expectedPeer: &peer{ Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(mockable.MaxTime) - return clk - }(), Log: logging.NoLog{}, VersionCompatibility: version.GetCompatibility(constants.UnitTestID), Validators: func() validators.Manager { @@ -645,68 +625,9 @@ func TestShouldDisconnect(t *testing.T) { expectedShouldDisconnect: true, }, { - name: "pre durango without a signature", - initialPeer: &peer{ - Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(time.Time{}) - return clk - }(), - Log: logging.NoLog{}, - VersionCompatibility: version.GetCompatibility(constants.UnitTestID), - Validators: func() validators.Manager { - vdrs := validators.NewManager() - require.NoError(t, vdrs.AddStaker( - constants.PrimaryNetworkID, - peerID, - bls.PublicFromSecretKey(blsKey), - txID, - 1, - )) - return vdrs - }(), - }, - id: peerID, - version: version.CurrentApp, - ip: &SignedIP{}, - }, - expectedPeer: &peer{ - Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(time.Time{}) - return clk - }(), - Log: logging.NoLog{}, - VersionCompatibility: version.GetCompatibility(constants.UnitTestID), - Validators: func() validators.Manager { - vdrs := validators.NewManager() - require.NoError(t, vdrs.AddStaker( - constants.PrimaryNetworkID, - peerID, - bls.PublicFromSecretKey(blsKey), - txID, - 1, - )) - return vdrs - }(), - }, - id: peerID, - version: version.CurrentApp, - ip: &SignedIP{}, - }, - expectedShouldDisconnect: false, - }, - { - name: "pre durango with an invalid signature", + name: "peer with invalid signature", initialPeer: &peer{ Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(time.Time{}) - return clk - }(), Log: logging.NoLog{}, VersionCompatibility: version.GetCompatibility(constants.UnitTestID), Validators: func() validators.Manager { @@ -729,11 +650,6 @@ func TestShouldDisconnect(t *testing.T) { }, expectedPeer: &peer{ Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(time.Time{}) - return clk - }(), Log: logging.NoLog{}, VersionCompatibility: version.GetCompatibility(constants.UnitTestID), Validators: func() validators.Manager { @@ -757,14 +673,9 @@ func TestShouldDisconnect(t *testing.T) { expectedShouldDisconnect: true, }, { - name: "pre durango with a valid signature", + name: "peer with valid signature", initialPeer: &peer{ Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(time.Time{}) - return clk - }(), Log: logging.NoLog{}, VersionCompatibility: version.GetCompatibility(constants.UnitTestID), Validators: func() validators.Manager { @@ -787,11 +698,6 @@ func TestShouldDisconnect(t *testing.T) { }, expectedPeer: &peer{ Config: &Config{ - Clock: func() mockable.Clock { - clk := mockable.Clock{} - clk.Set(time.Time{}) - return clk - }(), Log: logging.NoLog{}, VersionCompatibility: version.GetCompatibility(constants.UnitTestID), Validators: func() validators.Manager { @@ -834,7 +740,7 @@ func TestShouldDisconnect(t *testing.T) { func sendAndFlush(t *testing.T, sender *testPeer, receiver *testPeer) { t.Helper() mc := newMessageCreator(t) - outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(t, err) require.True(t, sender.Send(context.Background(), outboundGetMsg)) inboundGetMsg := <-receiver.inboundMsgChan diff --git a/network/peer/set.go b/network/peer/set.go index cbb9675ec305..a90ffc4e56a9 100644 --- a/network/peer/set.go +++ b/network/peer/set.go @@ -124,8 +124,8 @@ func (s *peerSet) Sample(n int, precondition func(Peer) bool) []Peer { peers := make([]Peer, 0, n) for len(peers) < n { - index, err := sampler.Next() - if err != nil { + index, hasNext := sampler.Next() + if !hasNext { // We have run out of peers to attempt to sample. break } diff --git a/network/peer/test_peer.go b/network/peer/test_peer.go index eb1a79476480..ae03594f8e67 100644 --- a/network/peer/test_peer.go +++ b/network/peer/test_peer.go @@ -7,6 +7,7 @@ import ( "context" "crypto" "net" + "net/netip" "time" "github.com/prometheus/client_golang/prometheus" @@ -19,9 +20,9 @@ import ( "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -47,7 +48,7 @@ const maxMessageToSend = 1024 // peer. func StartTestPeer( ctx context.Context, - ip ips.IPPort, + ip netip.AddrPort, networkID uint32, router router.InboundHandler, ) (Peer, error) { @@ -66,7 +67,6 @@ func StartTestPeer( clientUpgrader := NewTLSClientUpgrader( tlsConfg, prometheus.NewCounter(prometheus.CounterOpts{}), - version.GetDurangoTime(networkID), ) peerID, conn, cert, err := clientUpgrader.Upgrade(conn) @@ -77,7 +77,6 @@ func StartTestPeer( mc, err := message.NewCreator( logging.NoLog{}, prometheus.NewRegistry(), - "", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -85,11 +84,7 @@ func StartTestPeer( return nil, err } - metrics, err := NewMetrics( - logging.NoLog{}, - "", - prometheus.NewRegistry(), - ) + metrics, err := NewMetrics(prometheus.NewRegistry()) if err != nil { return nil, err } @@ -104,7 +99,6 @@ func StartTestPeer( return nil, err } - signerIP := ips.NewDynamicIPPort(net.IPv6zero, 1) tlsKey := tlsCert.PrivateKey.(crypto.Signer) blsKey, err := bls.NewSecretKey() if err != nil { @@ -129,7 +123,14 @@ func StartTestPeer( MaxClockDifference: time.Minute, ResourceTracker: resourceTracker, UptimeCalculator: uptime.NoOpCalculator, - IPSigner: NewIPSigner(signerIP, tlsKey, blsKey), + IPSigner: NewIPSigner( + utils.NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), + 1, + )), + tlsKey, + blsKey, + ), }, conn, cert, diff --git a/network/peer/tls_config.go b/network/peer/tls_config.go index 7de848ed062a..9673b98dc8f1 100644 --- a/network/peer/tls_config.go +++ b/network/peer/tls_config.go @@ -14,7 +14,6 @@ import ( // It is safe, and typically expected, for [keyLogWriter] to be [nil]. // [keyLogWriter] should only be enabled for debugging. func TLSConfig(cert tls.Certificate, keyLogWriter io.Writer) *tls.Config { - // #nosec G402 return &tls.Config{ Certificates: []tls.Certificate{cert}, ClientAuth: tls.RequireAnyClientCert, @@ -24,7 +23,7 @@ func TLSConfig(cert tls.Certificate, keyLogWriter io.Writer) *tls.Config { // // During our security audit by Quantstamp, this was investigated // and confirmed to be safe and correct. - InsecureSkipVerify: true, + InsecureSkipVerify: true, //#nosec G402 MinVersion: tls.VersionTLS13, KeyLogWriter: keyLogWriter, } diff --git a/network/peer/upgrader.go b/network/peer/upgrader.go index 9341922175cd..ec39c87136d2 100644 --- a/network/peer/upgrader.go +++ b/network/peer/upgrader.go @@ -7,7 +7,6 @@ import ( "crypto/tls" "errors" "net" - "time" "github.com/prometheus/client_golang/prometheus" @@ -30,40 +29,36 @@ type Upgrader interface { type tlsServerUpgrader struct { config *tls.Config invalidCerts prometheus.Counter - durangoTime time.Time } -func NewTLSServerUpgrader(config *tls.Config, invalidCerts prometheus.Counter, durangoTime time.Time) Upgrader { +func NewTLSServerUpgrader(config *tls.Config, invalidCerts prometheus.Counter) Upgrader { return &tlsServerUpgrader{ config: config, invalidCerts: invalidCerts, - durangoTime: durangoTime, } } func (t *tlsServerUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) { - return connToIDAndCert(tls.Server(conn, t.config), t.invalidCerts, t.durangoTime) + return connToIDAndCert(tls.Server(conn, t.config), t.invalidCerts) } type tlsClientUpgrader struct { config *tls.Config invalidCerts prometheus.Counter - durangoTime time.Time } -func NewTLSClientUpgrader(config *tls.Config, invalidCerts prometheus.Counter, durangoTime time.Time) Upgrader { +func NewTLSClientUpgrader(config *tls.Config, invalidCerts prometheus.Counter) Upgrader { return &tlsClientUpgrader{ config: config, invalidCerts: invalidCerts, - durangoTime: durangoTime, } } func (t *tlsClientUpgrader) Upgrade(conn net.Conn) (ids.NodeID, net.Conn, *staking.Certificate, error) { - return connToIDAndCert(tls.Client(conn, t.config), t.invalidCerts, t.durangoTime) + return connToIDAndCert(tls.Client(conn, t.config), t.invalidCerts) } -func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter, durangoTime time.Time) (ids.NodeID, net.Conn, *staking.Certificate, error) { +func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter) (ids.NodeID, net.Conn, *staking.Certificate, error) { if err := conn.Handshake(); err != nil { return ids.EmptyNodeID, nil, nil, err } @@ -74,20 +69,7 @@ func connToIDAndCert(conn *tls.Conn, invalidCerts prometheus.Counter, durangoTim } tlsCert := state.PeerCertificates[0] - // Invariant: ParseCertificate is used rather than CertificateFromX509 to - // ensure that signature verification can assume the certificate was - // parseable according the staking package's parser. - // - // TODO: Remove pre-Durango parsing after v1.11.x has activated. - var ( - peerCert *staking.Certificate - err error - ) - if time.Now().Before(durangoTime) { - peerCert, err = staking.ParseCertificate(tlsCert.Raw) - } else { - peerCert, err = staking.ParseCertificatePermissive(tlsCert.Raw) - } + peerCert, err := staking.ParseCertificate(tlsCert.Raw) if err != nil { invalidCerts.Inc() return ids.EmptyNodeID, nil, nil, err diff --git a/network/test_cert_1.crt b/network/test_cert_1.crt deleted file mode 100644 index 2f2b95e658ad..000000000000 --- a/network/test_cert_1.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw -MFoYDzIxMjQwMTA5MTQ0NTU4WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAqCOUESK8b5N894dVCSIs4mTfNTdhaL5cnw3ZXSbZlfquBRJOxhqHXutG -An9++OTWvevrssaXBxGT4oOT3N11dm4iKh7ewi3to+1Sfqq71blCVZtBDOeWpZx0 -WwhPO37Us26fCR7T2gStiTHY9qE0QV/9p15OCAFsRb94JuhF0OR0d6tRm0yQ6b7Y -NRzpaBw4MBxZD9h84+QDdhsTyxI0xk/NnbG74pykjsau0/YA9mNqHHSnL4DyD5qu -IKqRfD5HQHemx66I3jEXUB/GxTHhxz5uskIpS9AV3oclvVi14BjSEWgNkJX+nMi+ -tjuSKouAFpzJZzZme2DvmyAecxbNVBdajOTe2QRiG7HKh1OdMZabd2dUNv5S9/gd -bI53s4R++z/H4llsBfk6B2+/DmqDRauh4Mz9HTf0Pud7Nz2b7r77PnPTjHExgN3R -i+Yo6LskRCQTzzTVwW/RY+rNVux9UE6ZPLarDbXnSyetKMUS7qlz8NUerWjtkC6i -om570LfTGs3GxIqVgoGg0mXuji+EoG+XpYR3PRaeo8cAmfEu7T+SxgSfJAv7DyZv -+a2VTZcOPDI1KTLrM8Xovy17t5rd9cy1/75vxnKLiGDEhzWJmNl4IvIYbtihWWl5 -ksdFYbe9Dpvuh/wBCGoK+kmCirUM1DiizWn5TxJeS1qYI8I2sYMCAwEAAaMgMB4w -DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB -AABzczRjzfhlmV+bFDzAs7nszQlZREcoRuWe5qHy7VKLvZvIAaYlcApB34hH7nDq -T/8fS8g8rC+Tzw0iCPF21Z4AzSe76V6EU4VGWWe8l00nDszfvavE5BF24z8dCuVC -1gScC1tvG6FPT23koZ0BVmyueCIa7sdqFlDz8rrRpLWfUcLj38gxwWM2JVBHQUvV -j87lzpTNH+2nPiwrKISqUPFi4YvbWKe8T4bY2Elw7THiNLZGfgqOXVkeIVi4fs97 -Tc5uscZ4OpSTlrfJqMJEV8cMRvrDmhD/VWbJvnk7lyELPoHx6MUinBswBT51yvmY -bZh4AZ43GSvSyo/V7p9scytQP3zM1MeHpsFa0RHwGVFp2BmO1abvydAxX0NMWasv -WUzXCKliXsVD/qUeCU/CFnaBqpzBvm4AFBgwHzprwzP9Be/mz/TjTcsfrmoiyxlr -QjXNk9TnP9d+aeOJsRz+JSYyHETACO5PkCg+XCDyEOf+kQAzVb9Dp0oWaCovXciU -A5z0DSDzyKVBOQo0syb5NFsLZ2DeJemNbP+3kCNzBBASQ4VWAvRbLjPh3Oe8A5PZ -xezCvzRE05O6tYkz5C5hcKbpAjfP8G8RV6ERjLBICBfb7XI7T0hixhiNHlIKknkJ -F82B/zDt+qBFARw8A/qr44RF+vy3Ql4IS2ZcflAv2pTO ------END CERTIFICATE----- diff --git a/network/test_cert_2.crt b/network/test_cert_2.crt deleted file mode 100644 index 283e286be446..000000000000 --- a/network/test_cert_2.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw -MFoYDzIxMjQwMTA5MTQ0NTQ3WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEArT7afarml6cvCmAySAO8GQka1mcQIMACyEWy6KsqiccX+DoGh5ECyQSa -WFKWKGdQ32dAWGVlSkmmgJ1jtW749hSguysav3EPMaxe/ad5CV1MwyyccGS9U99M -z0UVuFEXVjN5W6UlcULp1oJDj07NzZP6ByRiDvnjzgeYb3jHwjqOBNwex1jLW6vp -oWD03zTanVQXZaaGcEISCI2CgDP3uXfd0NQpoGVpf9gMi0cdGu8gpqbLqBjzjzr8 -GDBQYGaWKFnlqe6X9nBUad/qNE3Zeb3ehSg+M2ecQzTZFWirfa6cGTtovu04RMML -9OLflQy3rTRST2HQ6z0gpVCP3V2Mg/LmAuWyhOLVYNkhEwkRHvddzFksRzQ+ghpP -cGfvI0dwxQV0CbEMVjd9zVEA6dOrMLI3st2922hqF23Al1+Hwcu1G/T3ybfSTwjd -YZ23IgkQF4r+RIXevzgOBBXfEwE8XERW2zNwUG5Sv5dxx+FgDjX0EGbrzgY6OeKT -D1SP/7WQLjwmGgwyNJYkAklvEKwU+dlGD5NpgvJ9fg8R1wUhp2HhSZ1l1OUVmRYw -YqUm7dTLK1CJU2BH2sRyZcUkwstjvgi688zfHNttGYmAnx6wGS12jWf+W4df+QNI -Ng6AdcJ5Ee0z0JAbTpZW/zX3CTSroow7igHnd4AwvKEVQFcyO/MCAwEAAaMgMB4w -DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB -ACePaZvjw2KiheheWNjzOv2B+7uLVe7oEbThEUQypEmTFK8wKaHwI4BGdBGEOr/N -LZ1M2wAYgwzMTEDJE+GEB2ZHIdH9cH5lu7ITsOMVcBSJttEJVhhEtbMwVJ9JC62j -AsW4VmHFpEik+xvinxedKczXOa21YJo4sv2TiFWFaSHqPeRo7HA1dxQYOwiLsS6e -JKIupMrn8IZz2YN5gFhbvQTBp2J3u6kxMIzN0a+BPARR4fwMn5lVMVvye/+8Kwtw -dZHSN1FYUcFqHagmhNlNkAOaGQklSFWtsVVQxQCFS2bxEImLj5kG16fCAsQoRC0J -ZS2OaRncrtB0r0Qu1JB5XJP9FLflSb57KIxBNVrl+iWdWikgBFE6cMthMwgLfQ99 -k8AMp6KrCjcxqegN+P30ct/JwahKPq2+SwtdHG3yrZ2TJEjhOtersrTnRK9zqm9v -lqS7JsiztjgqnhMs2eTdXygfEe0AoZihGTaaLYj37A9+2RECkuijkjBghG2NBnv6 -264lTghZyZcZgZNCgYglYC1bhifEorJpYf6TOOcDAi5UH8R7vi4x70vI6sIDrhga -d9E63EVe11QdIjceceMlNm42UTrhl0epMbL6FIzU+d91qBgd9qT6YqoYPFZSiYFy -2hArgLxH2fxTXatCAit5g1MEk0w1MiHVrPZ8lTU3U/ET ------END CERTIFICATE----- diff --git a/network/test_cert_3.crt b/network/test_cert_3.crt deleted file mode 100644 index c0977191ec7b..000000000000 --- a/network/test_cert_3.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnTCCAoWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAAMCAXDTk5MTIzMTAwMDAw -MFoYDzIxMjQwMTA5MTQ0NTM0WjAAMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEA5aV76ivIZ1iWmW0OzGMCrmFQBnej9JntQ1jP9yiacKu7j5Z/bD/eqoyc -jRwoSiesErfnThAGy7H80glVw/XmC0fYNPVDPyzAEdNk46M3yEI8hAKI6aSkl1s1 -KVAHpQuNcG+3xIB39OOMx0XuycZ6gqzyMmjqeT0cThNDXTwGbodMVDAf0q220QAq -zB/lz0sjHPXlYh25LJ1yPtl+vlcfGrP+q+2ODR9rnI79PE7AZB4Xc6wUIca5XXkH -PS7zQ1Ida1xrf446MYCVuazLFhpzq8/nhkxNMzxdZsJaWavL+xkpjGxAySvj0jlu -QFGsmsxOIU/XgJD/VRqqyISXpl2wg0l8mpsU9fV7bEW1y6MIc7AARRgbbEPiDz8m -/O8mjEW3C16untLHB7LzPCCitTssGR65Shkj+Lw+aM4X5ZI+Xm8eHTRCek8T5Cl3 -Sm2UFkLk2mun6cwoyWWhwi6+EfW6ks0c7qSHtJTP8DgLrWxYmBuD9PKSHclpa4/5 -toj52YnT6fIBJWz5ggIdntRCaH8+0eWvwuvDsdPUL7JQFjJmfQOdMenlNqW2aEvx -+JZiYLJBWj9cjpI33P5CAfFEVM3IFlDHmMHRTQ/kKLcfvSDfuofEBoMt4tjf01Um -dfi8kFKWl9ba9I7CoQ13U4J1wkk6KxatZP7eGCmKRoq8w+Y38NsCAwEAAaMgMB4w -DgYDVR0PAQH/BAQDAgSwMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggIB -AKsvbN5/r4YPguetl+jIhqpr4TZM8GNZRGTqkKC8clRspBeihJqkNQWsnZiFkJTH -NhNAx+7tlJHqeGdojc2XjBAkc+//qYqXKHgihsO54bVG9oN9IPO+mpPumRRhGneH -jTUE/hLFqwA4ZPw5L1HtJ0m1yqg/HXf4aBXcVQ/YO8YN17ZgLpueYt+Chi1pP/Ku -TzHuoKuHst2T6uuZQZxcD+XJoXwdOt7mfPTh5y9/Psjn+qx833DNWSwF3O/lEghA -2yOb+5CFta2LLUHH894oj5SvgJ/5cvn4+NbyDCUv5ebvE98BMh72PLNRuIRV0gfO -XalMIZ+9Jm2TGXD0dWt9GeZ5z3h+nCEB6s3x0sqluaWG3lTUx+4T/aIxdGuvPFi6 -7DWm7TG7yxFGfbECyyXXL+B/gyHhE1Q93nE3wK9flSG+ljqFJS+8wytht52XhgwE -lV1AwHgxkbkFzNIwB0s7etR9+wBcQvFKqeCZrDeG1twKNcY1dv1D/OCUlBYJvL/X -YADeT2ZjFzHhWhv6TLVEAtqytT1o4qXh6VWeIrwfMG0VcQSiJyNxwO/aW5BOTM44 -EelDzvSjo/pRxqN/m44Iuf0Ran86DO7LmjNYh/04FN3oaL9cFIaT9BWXt/Xx2Fdw -+dg5bPSJ62ExVnnNRlY9lQECkSoRZK2epcICs+3YmmGX ------END CERTIFICATE----- diff --git a/network/test_key_1.key b/network/test_key_1.key deleted file mode 100644 index c49775114d66..000000000000 --- a/network/test_key_1.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCoI5QRIrxvk3z3 -h1UJIiziZN81N2FovlyfDdldJtmV+q4FEk7GGode60YCf3745Na96+uyxpcHEZPi -g5Pc3XV2biIqHt7CLe2j7VJ+qrvVuUJVm0EM55alnHRbCE87ftSzbp8JHtPaBK2J -Mdj2oTRBX/2nXk4IAWxFv3gm6EXQ5HR3q1GbTJDpvtg1HOloHDgwHFkP2Hzj5AN2 -GxPLEjTGT82dsbvinKSOxq7T9gD2Y2ocdKcvgPIPmq4gqpF8PkdAd6bHrojeMRdQ -H8bFMeHHPm6yQilL0BXehyW9WLXgGNIRaA2Qlf6cyL62O5Iqi4AWnMlnNmZ7YO+b -IB5zFs1UF1qM5N7ZBGIbscqHU50xlpt3Z1Q2/lL3+B1sjnezhH77P8fiWWwF+ToH -b78OaoNFq6HgzP0dN/Q+53s3PZvuvvs+c9OMcTGA3dGL5ijouyREJBPPNNXBb9Fj -6s1W7H1QTpk8tqsNtedLJ60oxRLuqXPw1R6taO2QLqKibnvQt9MazcbEipWCgaDS -Ze6OL4Sgb5elhHc9Fp6jxwCZ8S7tP5LGBJ8kC/sPJm/5rZVNlw48MjUpMuszxei/ -LXu3mt31zLX/vm/GcouIYMSHNYmY2Xgi8hhu2KFZaXmSx0Vht70Om+6H/AEIagr6 -SYKKtQzUOKLNaflPEl5LWpgjwjaxgwIDAQABAoICAHGe8U0PGyWPFlCzLDyq0of+ -wHNWxEWi9jYphqyTN1BJgVU+BOuMO9RhywKfI6+P/KmFBtbdqmuFblkQr1f+c4Uf -cYjjKYcwwDkZg7jDKYGI2pG9A51z1nJ9oodtuxUqZRQH+gKQyXq31Ik0nTg0wXo4 -ItH6QWLZi1AqzkgEiEFcUHQZ2mDGwdqjM7nYmsXW5AVm8qxpkCP0Dn6+V4bP+8fT -X9BjreK6Fd3B15y2zfmyPp+SGPRZ/7mZvnemq/+4mi+va43enPEBXY6wmoLhbYBV -6ToeyYdIy65/x3oHu4f/Xd2TYi9FnTRX18CPyvtjH6CoPNW5hlFztRcwAkOlsgQ7 -sZ+9FGAnRvz1lrBg80DeCHeSKVkDHmMQSINhPcPnlMJpxn6iiZjdvz/Bd+9RRqZl -xUI/lV3/Wueh8SeCQlFOj3fHBZEaq6QoC/VmmaeIiLEm1hj+ymuFxwOtA6AKWLb3 -59XnEkONeTfv9d2eQ7NOPU86n/zhWHUKodmBUEaxLDaUwRkS1Adb4rLuRwrMfn3a -2KkknYWzvyrlk8lDqKAMeQneFmpresGAXeIn0vt434eaGcK4a/IZ8PebuhZxGq1Z -bVbxVm0AsLmd9X3htR6MOiZswnVmA3JCw1AMKZpLMDRSbjV0uYuhBJQsN4Y/kyOK -l52JtymFNvbuRF+836+RAoIBAQDZ9wyihmgsEPLl7PHzfYo4pnTs1puoT5PS7GjO -iVm7UtOKaawsJxKX3cxzSFVXONs9hbPPzmsQEL3Xz+lUsgrSeXReF00KLRbfE2LM -dv9hlJVMQXEKnEkFYNNgETyZIJE3ZDDqdd2PDzNM8aKHlvLYREiETCwVn7r4x5QE -jIHC0gUjRJHqUgSdAMa+qvranPLxVV9mpJmL2RXjjb/OtJosFef9h5augSNI9tPS -EDLm4wMjyXr25Vu20/cusmTlOhCzi2d23hNHx8nPE0nCEVtZ2rnnWyH/ozqRnpXX -EPh0IeZQmebBhHWzkjIPaOa05Ua5rkVAQau8/FUUubjXytyZAoIBAQDFerIQwodP -V46WVC0LtSq4ju88x1vgDfT0NFE3H6hIX7Mc91zt0NGOhzv4crfjnoj+romNfQwD -0ymtudnnoaGPFBRrRF8T+26jfFpes7Ve5q/PpY78zJH1ZLwyKKX4dzgeY0Aj9FbO -q4dzh21oD7wyknRm0NTqOvgLAuxoBFZ4FTgudKNDzGymgIaQVT1+h0226og289WT -iptkpOZ/HcxQts2U3j3a87pJB0IFjIrBTtVqIyphdwRVDa929WGDITUPHa3aqykx -Ma/zvXvocAlIDITVwxXlS16DkSS+5jdN/CUj5h0O6FefGaJmk6/bFQIeXM4fRhRF -M0cs1mxXkNR7AoIBAQCFxYftn4wDr4tD7f44sE3Kou6UBMqXq+9PvmQ8jjOSMi0+ -f8h5eKmCp0+5WSV3WJ/FzG8lFMzEmWHKOAI+Rt85ee0fajGQE0g8NMuoLUhjfSt8 -F5XnKy/tqxVPmoSUflZhpo4W96u5B1021f4oNU5pyM6w04ci5lt8IBEKEan6Bae9 -k3HyW9AVA8r2bj1zOmwoDXt1pYPPPraeZ/rWRCVy9SbihPrHst4TA9nQzLxQ0/az -Wg6rxOxa8xB7imU+AjsJ1n7zhyxSG54SBwZ3outr5D/AbEAbgvSJNslDq1iw/bU6 -tpnXHxKV2R38MyeU0jpr7zb1Tti2Li+RfsKhPhHRAoIBAHfbpXH4r6mfaeKiCokd -l2VXE6tfEMtnjTIfAuAjLb9nnk3JcTTCVj5cpDCCaEwV7+4sPz6KFB3KL3TK5Y/q -ESXHOTF12QNGyvsdQbhS+JU2DKVKRgP3oetADd2fwESTD5OaB9cKuRlNELQ1EVlk -m4RSUaYJwAC+c8gzKQtk/pp5vpSrpGBFFfjk70dxBRbjxm5r4OsBibK4IOKwF1o1 -2sluek6NqRtYbMtgRVka2SjE0VFPMKzhUNbSrJnWCy5MnGilSdz7n8/E6ZdVfXwx -a+C4AHPBqWt3GFFgad4X2p9Rl7U3OJHQwUXGiEQcBVNCZ/vHti9TGIB7xApZxn5L -YDsCggEBAJ8RhrfEzm2YkyODFKFwgOszHQ3TNSvbC4+yLOUMSdzdKIyroOq0t53A -PSs046TINd+EDs9Pi6E69C+RYLim1NYMHeHFMzmKnQPXPwJVnYYUKInbIMURcuE9 -8FNBSKg3SUGz31SwG4bRIkJluMUp5oSAEUxWaxbUzLYkZex2uxnUGSd6TjddWKk1 -+SuoiZ3+W6yPWWh7TDKAR/oukBCmLIJI7dXSwv2DhagRpppdoMfqcnsCAgs/omB8 -Ku4y/jEkGbxLgo3Qd6U1o/QZlZG+9Q0iaxQS4dIpMxA3LwrL5txy00bm3JeWMB4H -MUZqfFgfj8ESxFBEeToOwr3Jq46vOwQ= ------END PRIVATE KEY----- diff --git a/network/test_key_2.key b/network/test_key_2.key deleted file mode 100644 index bcc0a192b2b4..000000000000 --- a/network/test_key_2.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCtPtp9quaXpy8K -YDJIA7wZCRrWZxAgwALIRbLoqyqJxxf4OgaHkQLJBJpYUpYoZ1DfZ0BYZWVKSaaA -nWO1bvj2FKC7Kxq/cQ8xrF79p3kJXUzDLJxwZL1T30zPRRW4URdWM3lbpSVxQunW -gkOPTs3Nk/oHJGIO+ePOB5hveMfCOo4E3B7HWMtbq+mhYPTfNNqdVBdlpoZwQhII -jYKAM/e5d93Q1CmgZWl/2AyLRx0a7yCmpsuoGPOPOvwYMFBgZpYoWeWp7pf2cFRp -3+o0Tdl5vd6FKD4zZ5xDNNkVaKt9rpwZO2i+7ThEwwv04t+VDLetNFJPYdDrPSCl -UI/dXYyD8uYC5bKE4tVg2SETCREe913MWSxHND6CGk9wZ+8jR3DFBXQJsQxWN33N -UQDp06swsjey3b3baGoXbcCXX4fBy7Ub9PfJt9JPCN1hnbciCRAXiv5Ehd6/OA4E -Fd8TATxcRFbbM3BQblK/l3HH4WAONfQQZuvOBjo54pMPVI//tZAuPCYaDDI0liQC -SW8QrBT52UYPk2mC8n1+DxHXBSGnYeFJnWXU5RWZFjBipSbt1MsrUIlTYEfaxHJl -xSTCy2O+CLrzzN8c220ZiYCfHrAZLXaNZ/5bh1/5A0g2DoB1wnkR7TPQkBtOllb/ -NfcJNKuijDuKAed3gDC8oRVAVzI78wIDAQABAoICAQCIgPu7BMuINoyUClPT9k1h -FJF22eIVS/VlQ7XCKgvsX1j9lwrKCnI9XUkXyorR7wYD4OEMRWhX7kwpDtoffP7h -NkOm9kGvEjA8nWqDRk/SFxeCuUXSMS4URd/JeM+yWQKgQxKeKTOlWGnTQPRmmFsE -XlIlCn/Q+QiLr+RmAK601VpNbfs6azZgVsZRB4opzQVr7XQ5/cnz7bszzfxDc67/ -DflSr7jUztMfjmXj3/aI4F3DsazKGE7gTkOP85GBQ5OQ27Rf/sTxwnRgr7Nj3us6 -R2ZrWNgZvMudEKjze3OUJd6M6wiPV258j4p+O7ybPlgDOzSXo6TvlUyBtUaFz04E -5S7bgimNUxEjFzTxkn9W/FTUeauvJcgDk+JmMZ+I9dFdMIuyksndywN9KdXBVxZH -1ZtO1P6JeFpxF7zQUmkH+/6RZd9PbQGlpNI06nAj98LVwqSDCO1aejLqoXYs9zqG -DOU4JdRm3qK0eshIghkvVOWIYhqKPkskQfbTFY+hasg82cGGFyzxqOsSiuW+CVIy -3iF3WyfKgvLMABoK/38zutsMT+/mOtA7rjErh1NJuwwWkkglmuwQMDqaWdOASs+v -MK8JjSi6zDpnbp70Prw5pUlHvvsD1iYWo7SOcpFos+U5zw1jHJJvnAatzcXWixuu -Xzbn2BtCqSFigW7waMy14QKCAQEAx/Nwy2xH9lVGfz8aO2CB0FGL9Ra3Jcv4HFJT -nw6/yvVLvRAwr87+/c+qbIzwLKbQXV/4vmNsqPrIJiazY+Tk739DjcW8YaMbejfr -ASPHtYbeF0FmVbxBHNZ/JSDSYUXdFZ7JlBiDSs3zhPlFBZYG2tU3JJZCR8+9J/Ss -JEIwL9UlapMznMwljFkLbvZ2oFstKkfdY61WxROOIwuGaKr0yRnNvMMp135JiB/O -dwh/NfROt4JzQ5O4ipMg6Wc73+OvBsOSQHYZQHl9NOaK1uomu5bUY7H8pLwGU7sw -LmPRzrGiu8dB+UUEyFkNI2xzwkjet+0UGupDyOfsCMf9hlzWmwKCAQEA3c8FeHkl -Il4GEB0VEw1NtC5x6i+s3NiPOlUmH+nOHgdaI7/BfljfTokQBGo+GkXkJ36yTEMh -L9Vtya3HtW4VEHNfPMjntPztn4XQvMZdSpu/k8rM44m+CB0DDLhFfwRr2cyUAwHz -xebXw8KhceqaWRp6ygJGx5Sk0gr7s7nhmIByjdx4tddEH/MahLklGdV7Vnp+yb3o -zNLVx/aDueknArgUb/zvZRcYWuNoGs9ac4pl0m6jan/x0ZcdBF0SU2bI6ltvF3WT -qwcvVnbJbBwq5PRuL4ZUqrqmXBbBAkpLJTx+kfPKD4bgcZTBnV2TxDbzze9CeieT -YCtg4u+khW7ZiQKCAQBrMIEuPD0TvEFPo8dvP1w4Dg9Gc0f5li/LFwNHCIQezIMu -togzJ3ehHvuQt7llZoPbGsDhZ7FvoQk9EpAmpCVqksHnNbK4cNUhHur3sHO2R7e1 -pdSzb3lEeWStxbuic+6CUZ5kqwNvTZsXlP3Acd344EZwcbDUiHQyAENsKKNmcRBe -4szPaM1UQMQVV0De1CIRQXdYoSsb+VDATsReRg9140Rcxg8fO881jz+CpmZzySWN -0PvzpTRP7XG+Th5V9tv0d1FnByigXMCXZGPXtKzQ8ZmoXFlBAp8tsfKxW8e005uW -qMogVDStJrgZXmFsLN5goVKe3yk5gcMSLgwmRIyzAoIBAQCoE6CkmsAd27uiaDc4 -+aLA/1TIzZmiu+NEo5NBKY1LyexvHHZGBJgqTcg6YDtw8zchCmuXSGMUeRk5cxrb -C3Cgx5wKVn7l8acqc18qPPIigATavBkn7o92XG2cLOJUjogfQVuDL+6GLxeeupRV -2x1cmakj/DegMq32j+YNWbRuOB8WClPaDyYLQ877dcR8X/2XGTmMLAEFfFoMrWtB -7D/oWo76EWNiae7FqH6RmkCDPwNLQxVHtW4LkQOm89PYKRHkLKbw0uKz/bzMOzUE -XA/Q8Lux/YuY19kJ/SACWUO6Eq4icObTfzQCPWO9mFRJog57JWttXyHZBOXk8Qzt -I4NpAoIBACurK0zJxaGUdTjmzaVipauyOZYFBsbzvCWsdSNodtZ/mw6n/qkj2N33 -vNCRLrsQAkDKATzWrscRg+xvl5/wIa4B3s8TZNIp3hL7bvI/NoR5bi5M0vcjdXEd -DeKeZsSBzEs5zivM3aWEF5MSR2zpJPNYyD0PnT6EvZOkMoq6LM3FJcouS1ChePLQ -wHEY5ZMqPODOcQ+EixNXl6FGdywaJYxKnG4liG9zdJ0lGNIivTA7gyM+JCbG4fs8 -73uGsbCpts5Y2xKFp3uK8HjWKbOCR3dE4mOZM8M/NlsUGNjSydXZMIJYWR8nvVmo -i3mHicYaTQxj0ruIz7JHOtFNVGi1sME= ------END PRIVATE KEY----- diff --git a/network/test_key_3.key b/network/test_key_3.key deleted file mode 100644 index 2cef238b67a9..000000000000 --- a/network/test_key_3.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDlpXvqK8hnWJaZ -bQ7MYwKuYVAGd6P0me1DWM/3KJpwq7uPln9sP96qjJyNHChKJ6wSt+dOEAbLsfzS -CVXD9eYLR9g09UM/LMAR02TjozfIQjyEAojppKSXWzUpUAelC41wb7fEgHf044zH -Re7JxnqCrPIyaOp5PRxOE0NdPAZuh0xUMB/SrbbRACrMH+XPSyMc9eViHbksnXI+ -2X6+Vx8as/6r7Y4NH2ucjv08TsBkHhdzrBQhxrldeQc9LvNDUh1rXGt/jjoxgJW5 -rMsWGnOrz+eGTE0zPF1mwlpZq8v7GSmMbEDJK+PSOW5AUayazE4hT9eAkP9VGqrI -hJemXbCDSXyamxT19XtsRbXLowhzsABFGBtsQ+IPPyb87yaMRbcLXq6e0scHsvM8 -IKK1OywZHrlKGSP4vD5ozhflkj5ebx4dNEJ6TxPkKXdKbZQWQuTaa6fpzCjJZaHC -Lr4R9bqSzRzupIe0lM/wOAutbFiYG4P08pIdyWlrj/m2iPnZidPp8gElbPmCAh2e -1EJofz7R5a/C68Ox09QvslAWMmZ9A50x6eU2pbZoS/H4lmJgskFaP1yOkjfc/kIB -8URUzcgWUMeYwdFND+Qotx+9IN+6h8QGgy3i2N/TVSZ1+LyQUpaX1tr0jsKhDXdT -gnXCSTorFq1k/t4YKYpGirzD5jfw2wIDAQABAoICAQC/Rt32h29NvTj7JB5OWS2z -h3R7Xo2ev9Mi5EecSyKQNEpuZ+FMjcpubd47nrdkRLULhkhP+gNfCKpXW9Um+psY -zEemnJ7dcO2uK1B+VsWwtJLpNZ9KVIuPUjXuai1j6EJv423Ca2r++8WXeYVSZVJH -o7u8By09vIvl8B+M+eE1kNYfzVHETlLWtHfxO6RTy/a8OYhM+ArzwVSWStxJuBE9 -Ua0PETffcEtWxLbi04lmGrZX7315QKfG1ncUHBYc/blpYjpbrWCFON/9HpKtn2y3 -L91dPBKVWXNGkx1kUTb+t8+mmchAh6Ejyhgt1Jma+g8dqf4KpTs3bJXRnLcfqCvL -Kq+wCUGv7iVWlTmhlzLpneajLDdBxGfbkAgwPFOyZoJNrnh6hU60TPc1IV6YSLlB -GsxesK9QWUrg3BAN4iKD3FvDt0qeUPbPztxEZi1OzSYQDZUQBrBL+WHuD9NxeAYe -2yx1OlPMo73gK5GW/MHBCz77+NX2kVURlTvYW4TsmInCRvOTsVNkRPUJtiHYT7Ss -Y8SzS5F/u9sfjFAVowGgwtNfq8Rm6Q1QdPZltiUNBgiTekFNQEy7WhzVg6MlT5Ca -BRqUhN3+CFwxLZ9rSQL6gxfAHk9umb0ee4JU9JgcYjtb5AtyE6DmmcSZPSejjxit -HwZ/g5MDK7kk5fKMcnL7kQKCAQEA895z7T0c6y3rhWfEUMDdTlsPgAoxYNf+jXyJ -aQmtfnDP9tf8BdPpobfHp29e7JRaGGa9QWPaaemBPHXMmD+IegG9/E+PQdHQwFSG -OpI13uCBULt8a+MMUbTCg1V4uXqf2j1BUo9SFQ6aXh/Rg1gVBgsq1M6eyvel93io -0X+/cinsDEpB5HENZwBuRb0SP0RfCgQR9Yh+jIy2TwJDDNw3sG1TvIo9aK7blSwB -z/gwSDx1UUa2KReD4ChYcqgLFUj3F/uF2f20P/JuaUn7tU3HoCsbG0C+Cci/XSJ9 -gu8xYl64Vg16bO3CflqjucPTFXgyBOt0lIug77YYa9CgCUJvEwKCAQEA8RHqGghV -meDnRXvPmAEwtoT7IKBe+eYjGN6wc2o+QZzjeUFkyfOtaB8rqriUXqvihD2GD6XQ -O/cSNCqp5g6yUhBLo3b9BmCsQsvxkhMpwB/hdi5aYjn+CFQVD4rAso9yGwRBWoA0 -gQdGMKenOUhU/PtVKyTTUuY7rFD8RhYq0ZLqEgO7chn8QXCNPo7MfE/qF9vQBosP -ktiS0FG442PJp2B/lYKK6N2w77ZeCoLhQowaNN0/N36kX/n4bjBE2XFLNpSuHtlg -C7bV/RMR5i/3yB0eRVUDVlqC077qlC1w0tCNZvvi6kbWwIu/4pQTdcA8mAz5B7Lc -OwOMbA2GT4OIGQKCAQABoyS0Gwzup0hFhQTUZfcWZ5YbDfZ25/xVhtiFVANOLgO3 -bIvMnjebVliIzz6b6AMS1t2+aqU0wNSVS1UsUIDiENDtuLsFfhsgr3CXRBQIgwlb -OWcEcmnKwqPrrc85r5ETLgYaP8wVSBvRNfV6JEU/3SNUem6mfjMnDjBT97+ZTJ7B -Fl6K4hds8ZvL7BELS7I3pv9X3qq61tcCgMlidLgK/zDouyTeZw4iWkFI3Cm20nEX -MppWfEnuX1b4rhgk9HB0QMQNSp7DLyV+n3iJJxSIBsIP1Mdx2V8viOO+1UxHlMs4 -CK8hvBbqMkGXJbFtG3l6fvoxZR6XfWl8j9IDPebxAoIBAF07cnBy/LgwdQE4awb8 -ntxX/c+WdmTrjnNV3KQmWMGDba49jj9UkKIOPBMgo7EhhM9kA+8VT72BRncKcP7a -fDikuLwVjrHivXxv55N4+dKmAcp1DtuiVg7ehe6m2PO16olsUeIwZx3ntEuo61GK -GeRlR4ESEvCivj1cbNSmShUXXpNtAheU2Sxt3RJuo8MIHR7xEjkVmwZN4CnVEU5Q -D3M+LNmjzRlWc9GhlCk4iOn1yUTctFBAGE5OHLhwzo/R8ya+xcCEjVK6eXQQ5gFC -V+/64vQpdsr04lgGJC7+i/3cTnOfwxicIP4CjkmQvx3xJP4hNka189qW+r3nVSR3 -WDECggEAAQCCqF4J8C2keY+o/kYQBq0tHhrC28HgiVQuCGc4XruYQtDh4di/I72F -RsvgVHS29ApAlh29i29ws7K2bU6WIc+JR3nmwAHUtiJmxRZhn/c722AvRXF5YMH/ -u46bEURHF5sGz8vr5chX/R4LiF579xyNsB9KC3mPqdjW/L6ACQdrBJVAS9cwplO0 -D+YWxmCE1Ps2tQtz6ZN+LUC7WO6M24k8KW2y4Scue0/23uCllWFgS3/vxDdQDZWn -+7AvMYPh4Wrfdd0t0cU+c9rirFYVz+uo/QBUIZOIw64AvIUjZpHTbhcjz1mAqcgJ -eAOQk+OFUTNKeI9uJwoNYOguHsxt2w== ------END PRIVATE KEY----- diff --git a/network/test_network.go b/network/test_network.go index 1cb56127c1a7..8644eb359ae1 100644 --- a/network/test_network.go +++ b/network/test_network.go @@ -8,6 +8,7 @@ import ( "errors" "math" "net" + "net/netip" "runtime" "sync" @@ -24,9 +25,9 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" @@ -82,7 +83,6 @@ func NewTestNetwork( msgCreator, err := message.NewCreator( logging.NoLog{}, metrics, - "", constants.DefaultNetworkCompressionType, constants.DefaultNetworkMaximumInboundTimeout, ) @@ -90,118 +90,20 @@ func NewTestNetwork( return nil, err } - networkConfig := Config{ - ThrottlerConfig: ThrottlerConfig{ - InboundConnUpgradeThrottlerConfig: throttling.InboundConnUpgradeThrottlerConfig{ - UpgradeCooldown: constants.DefaultInboundConnUpgradeThrottlerCooldown, - MaxRecentConnsUpgraded: int(math.Ceil(constants.DefaultInboundThrottlerMaxConnsPerSec * constants.DefaultInboundConnUpgradeThrottlerCooldown.Seconds())), - }, - - InboundMsgThrottlerConfig: throttling.InboundMsgThrottlerConfig{ - MsgByteThrottlerConfig: throttling.MsgByteThrottlerConfig{ - VdrAllocSize: constants.DefaultInboundThrottlerVdrAllocSize, - AtLargeAllocSize: constants.DefaultInboundThrottlerAtLargeAllocSize, - NodeMaxAtLargeBytes: constants.DefaultInboundThrottlerNodeMaxAtLargeBytes, - }, - - BandwidthThrottlerConfig: throttling.BandwidthThrottlerConfig{ - RefillRate: constants.DefaultInboundThrottlerBandwidthRefillRate, - MaxBurstSize: constants.DefaultInboundThrottlerBandwidthMaxBurstSize, - }, - - CPUThrottlerConfig: throttling.SystemThrottlerConfig{ - MaxRecheckDelay: constants.DefaultInboundThrottlerCPUMaxRecheckDelay, - }, - - DiskThrottlerConfig: throttling.SystemThrottlerConfig{ - MaxRecheckDelay: constants.DefaultInboundThrottlerDiskMaxRecheckDelay, - }, - - MaxProcessingMsgsPerNode: constants.DefaultInboundThrottlerMaxProcessingMsgsPerNode, - }, - OutboundMsgThrottlerConfig: throttling.MsgByteThrottlerConfig{ - VdrAllocSize: constants.DefaultOutboundThrottlerVdrAllocSize, - AtLargeAllocSize: constants.DefaultOutboundThrottlerAtLargeAllocSize, - NodeMaxAtLargeBytes: constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, - }, - - MaxInboundConnsPerSec: constants.DefaultInboundThrottlerMaxConnsPerSec, - }, - - HealthConfig: HealthConfig{ - Enabled: true, - MinConnectedPeers: constants.DefaultNetworkHealthMinPeers, - MaxTimeSinceMsgReceived: constants.DefaultNetworkHealthMaxTimeSinceMsgReceived, - MaxTimeSinceMsgSent: constants.DefaultNetworkHealthMaxTimeSinceMsgSent, - MaxPortionSendQueueBytesFull: constants.DefaultNetworkHealthMaxPortionSendQueueFill, - MaxSendFailRate: constants.DefaultNetworkHealthMaxSendFailRate, - SendFailRateHalflife: constants.DefaultHealthCheckAveragerHalflife, - }, - - ProxyEnabled: constants.DefaultNetworkTCPProxyEnabled, - ProxyReadHeaderTimeout: constants.DefaultNetworkTCPProxyReadTimeout, - - DialerConfig: dialer.Config{ - ThrottleRps: constants.DefaultOutboundConnectionThrottlingRps, - ConnectionTimeout: constants.DefaultOutboundConnectionTimeout, - }, - - TimeoutConfig: TimeoutConfig{ - PingPongTimeout: constants.DefaultPingPongTimeout, - ReadHandshakeTimeout: constants.DefaultNetworkReadHandshakeTimeout, - }, - - PeerListGossipConfig: PeerListGossipConfig{ - PeerListNumValidatorIPs: constants.DefaultNetworkPeerListNumValidatorIPs, - PeerListValidatorGossipSize: constants.DefaultNetworkPeerListValidatorGossipSize, - PeerListNonValidatorGossipSize: constants.DefaultNetworkPeerListNonValidatorGossipSize, - PeerListPeersGossipSize: constants.DefaultNetworkPeerListPeersGossipSize, - PeerListGossipFreq: constants.DefaultNetworkPeerListGossipFreq, - PeerListPullGossipFreq: constants.DefaultNetworkPeerListPullGossipFreq, - PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, - }, - - DelayConfig: DelayConfig{ - InitialReconnectDelay: constants.DefaultNetworkInitialReconnectDelay, - MaxReconnectDelay: constants.DefaultNetworkMaxReconnectDelay, - }, - - MaxClockDifference: constants.DefaultNetworkMaxClockDifference, - CompressionType: constants.DefaultNetworkCompressionType, - PingFrequency: constants.DefaultPingFrequency, - AllowPrivateIPs: !constants.ProductionNetworkIDs.Contains(networkID), - UptimeMetricFreq: constants.DefaultUptimeMetricFreq, - MaximumInboundMessageTimeout: constants.DefaultNetworkMaximumInboundTimeout, - - RequireValidatorToConnect: constants.DefaultNetworkRequireValidatorToConnect, - PeerReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, - PeerWriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, - } - - networkConfig.NetworkID = networkID - networkConfig.TrackedSubnets = trackedSubnets - tlsCert, err := staking.NewTLSCert() if err != nil { return nil, err } - tlsConfig := peer.TLSConfig(*tlsCert, nil) - networkConfig.TLSConfig = tlsConfig - networkConfig.TLSKey = tlsCert.PrivateKey.(crypto.Signer) - networkConfig.BLSKey, err = bls.NewSecretKey() + + blsKey, err := bls.NewSecretKey() if err != nil { return nil, err } - networkConfig.Validators = currentValidators - networkConfig.Beacons = validators.NewManager() - // This never actually does anything because we never initialize the P-chain - networkConfig.UptimeCalculator = uptime.NoOpCalculator - // TODO actually monitor usage // TestNetwork doesn't use disk so we don't need to track it, but we should // still have guardrails around cpu/memory usage. - networkConfig.ResourceTracker, err = tracker.NewResourceTracker( + resourceTracker, err := tracker.NewResourceTracker( metrics, resource.NoUsage, &meter.ContinuousFactory{}, @@ -210,31 +112,110 @@ func NewTestNetwork( if err != nil { return nil, err } - networkConfig.CPUTargeter = tracker.NewTargeter( - logging.NoLog{}, - &tracker.TargeterConfig{ - VdrAlloc: float64(runtime.NumCPU()), - MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), - MaxNonVdrNodeUsage: float64(runtime.NumCPU()) / 8, - }, - currentValidators, - networkConfig.ResourceTracker.CPUTracker(), - ) - networkConfig.DiskTargeter = tracker.NewTargeter( - logging.NoLog{}, - &tracker.TargeterConfig{ - VdrAlloc: 1000 * units.GiB, - MaxNonVdrUsage: 1000 * units.GiB, - MaxNonVdrNodeUsage: 1000 * units.GiB, - }, - currentValidators, - networkConfig.ResourceTracker.DiskTracker(), - ) - - networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 1) return NewNetwork( - &networkConfig, + &Config{ + HealthConfig: HealthConfig{ + Enabled: true, + MinConnectedPeers: constants.DefaultNetworkHealthMinPeers, + MaxTimeSinceMsgReceived: constants.DefaultNetworkHealthMaxTimeSinceMsgReceived, + MaxTimeSinceMsgSent: constants.DefaultNetworkHealthMaxTimeSinceMsgSent, + MaxPortionSendQueueBytesFull: constants.DefaultNetworkHealthMaxPortionSendQueueFill, + MaxSendFailRate: constants.DefaultNetworkHealthMaxSendFailRate, + SendFailRateHalflife: constants.DefaultHealthCheckAveragerHalflife, + }, + PeerListGossipConfig: PeerListGossipConfig{ + PeerListNumValidatorIPs: constants.DefaultNetworkPeerListNumValidatorIPs, + PeerListPullGossipFreq: constants.DefaultNetworkPeerListPullGossipFreq, + PeerListBloomResetFreq: constants.DefaultNetworkPeerListBloomResetFreq, + }, + TimeoutConfig: TimeoutConfig{ + PingPongTimeout: constants.DefaultPingPongTimeout, + ReadHandshakeTimeout: constants.DefaultNetworkReadHandshakeTimeout, + }, + DelayConfig: DelayConfig{ + InitialReconnectDelay: constants.DefaultNetworkInitialReconnectDelay, + MaxReconnectDelay: constants.DefaultNetworkMaxReconnectDelay, + }, + ThrottlerConfig: ThrottlerConfig{ + InboundConnUpgradeThrottlerConfig: throttling.InboundConnUpgradeThrottlerConfig{ + UpgradeCooldown: constants.DefaultInboundConnUpgradeThrottlerCooldown, + MaxRecentConnsUpgraded: int(math.Ceil(constants.DefaultInboundThrottlerMaxConnsPerSec * constants.DefaultInboundConnUpgradeThrottlerCooldown.Seconds())), + }, + InboundMsgThrottlerConfig: throttling.InboundMsgThrottlerConfig{ + MsgByteThrottlerConfig: throttling.MsgByteThrottlerConfig{ + VdrAllocSize: constants.DefaultInboundThrottlerVdrAllocSize, + AtLargeAllocSize: constants.DefaultInboundThrottlerAtLargeAllocSize, + NodeMaxAtLargeBytes: constants.DefaultInboundThrottlerNodeMaxAtLargeBytes, + }, + BandwidthThrottlerConfig: throttling.BandwidthThrottlerConfig{ + RefillRate: constants.DefaultInboundThrottlerBandwidthRefillRate, + MaxBurstSize: constants.DefaultInboundThrottlerBandwidthMaxBurstSize, + }, + CPUThrottlerConfig: throttling.SystemThrottlerConfig{ + MaxRecheckDelay: constants.DefaultInboundThrottlerCPUMaxRecheckDelay, + }, + DiskThrottlerConfig: throttling.SystemThrottlerConfig{ + MaxRecheckDelay: constants.DefaultInboundThrottlerDiskMaxRecheckDelay, + }, + MaxProcessingMsgsPerNode: constants.DefaultInboundThrottlerMaxProcessingMsgsPerNode, + }, + OutboundMsgThrottlerConfig: throttling.MsgByteThrottlerConfig{ + VdrAllocSize: constants.DefaultOutboundThrottlerVdrAllocSize, + AtLargeAllocSize: constants.DefaultOutboundThrottlerAtLargeAllocSize, + NodeMaxAtLargeBytes: constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, + }, + MaxInboundConnsPerSec: constants.DefaultInboundThrottlerMaxConnsPerSec, + }, + ProxyEnabled: constants.DefaultNetworkTCPProxyEnabled, + ProxyReadHeaderTimeout: constants.DefaultNetworkTCPProxyReadTimeout, + DialerConfig: dialer.Config{ + ThrottleRps: constants.DefaultOutboundConnectionThrottlingRps, + ConnectionTimeout: constants.DefaultOutboundConnectionTimeout, + }, + TLSConfig: peer.TLSConfig(*tlsCert, nil), + MyIPPort: utils.NewAtomic(netip.AddrPortFrom( + netip.IPv4Unspecified(), + 1, + )), + NetworkID: networkID, + MaxClockDifference: constants.DefaultNetworkMaxClockDifference, + PingFrequency: constants.DefaultPingFrequency, + AllowPrivateIPs: !constants.ProductionNetworkIDs.Contains(networkID), + CompressionType: constants.DefaultNetworkCompressionType, + TLSKey: tlsCert.PrivateKey.(crypto.Signer), + BLSKey: blsKey, + TrackedSubnets: trackedSubnets, + Beacons: validators.NewManager(), + Validators: currentValidators, + UptimeCalculator: uptime.NoOpCalculator, + UptimeMetricFreq: constants.DefaultUptimeMetricFreq, + RequireValidatorToConnect: constants.DefaultNetworkRequireValidatorToConnect, + MaximumInboundMessageTimeout: constants.DefaultNetworkMaximumInboundTimeout, + PeerReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, + PeerWriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, + ResourceTracker: resourceTracker, + CPUTargeter: tracker.NewTargeter( + logging.NoLog{}, + &tracker.TargeterConfig{ + VdrAlloc: float64(runtime.NumCPU()), + MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), + MaxNonVdrNodeUsage: float64(runtime.NumCPU()) / 8, + }, + currentValidators, + resourceTracker.CPUTracker(), + ), + DiskTargeter: tracker.NewTargeter( + logging.NoLog{}, + &tracker.TargeterConfig{ + VdrAlloc: 1000 * units.GiB, + MaxNonVdrUsage: 1000 * units.GiB, + MaxNonVdrNodeUsage: 1000 * units.GiB, + }, + currentValidators, + resourceTracker.DiskTracker(), + ), + }, msgCreator, metrics, log, diff --git a/network/throttling/bandwidth_throttler.go b/network/throttling/bandwidth_throttler.go index cde94b96124b..58938f31c11a 100644 --- a/network/throttling/bandwidth_throttler.go +++ b/network/throttling/bandwidth_throttler.go @@ -58,7 +58,6 @@ type BandwidthThrottlerConfig struct { func newBandwidthThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, config BandwidthThrottlerConfig, ) (bandwidthThrottler, error) { @@ -69,16 +68,14 @@ func newBandwidthThrottler( limiters: make(map[ids.NodeID]*rate.Limiter), metrics: bandwidthThrottlerMetrics{ acquireLatency: metric.NewAveragerWithErrs( - namespace, "bandwidth_throttler_inbound_acquire_latency", "average time (in ns) to acquire bytes from the inbound bandwidth throttler", registerer, &errs, ), awaitingAcquire: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bandwidth_throttler_inbound_awaiting_acquire", - Help: "Number of inbound messages waiting to acquire bandwidth from the inbound bandwidth throttler", + Name: "bandwidth_throttler_inbound_awaiting_acquire", + Help: "Number of inbound messages waiting to acquire bandwidth from the inbound bandwidth throttler", }), }, } diff --git a/network/throttling/bandwidth_throttler_test.go b/network/throttling/bandwidth_throttler_test.go index 9f9195477b38..da9ac6ded28d 100644 --- a/network/throttling/bandwidth_throttler_test.go +++ b/network/throttling/bandwidth_throttler_test.go @@ -22,7 +22,7 @@ func TestBandwidthThrottler(t *testing.T) { RefillRate: 8, MaxBurstSize: 10, } - throttlerIntf, err := newBandwidthThrottler(logging.NoLog{}, "", prometheus.NewRegistry(), config) + throttlerIntf, err := newBandwidthThrottler(logging.NoLog{}, prometheus.NewRegistry(), config) require.NoError(err) require.IsType(&bandwidthThrottlerImpl{}, throttlerIntf) throttler := throttlerIntf.(*bandwidthThrottlerImpl) diff --git a/network/throttling/inbound_conn_upgrade_throttler.go b/network/throttling/inbound_conn_upgrade_throttler.go index 4df5ee39b776..4067d80b2b29 100644 --- a/network/throttling/inbound_conn_upgrade_throttler.go +++ b/network/throttling/inbound_conn_upgrade_throttler.go @@ -4,10 +4,10 @@ package throttling import ( + "net/netip" "sync" "time" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -36,7 +36,7 @@ type InboundConnUpgradeThrottler interface { // Must only be called after [Dispatch] has been called. // If [ip] is a local IP, this method always returns true. // Must not be called after [Stop] has been called. - ShouldUpgrade(ip ips.IPPort) bool + ShouldUpgrade(ip netip.AddrPort) bool } type InboundConnUpgradeThrottlerConfig struct { @@ -73,12 +73,12 @@ func (*noInboundConnUpgradeThrottler) Dispatch() {} func (*noInboundConnUpgradeThrottler) Stop() {} -func (*noInboundConnUpgradeThrottler) ShouldUpgrade(ips.IPPort) bool { +func (*noInboundConnUpgradeThrottler) ShouldUpgrade(netip.AddrPort) bool { return true } type ipAndTime struct { - ip string + ip netip.Addr cooldownElapsedAt time.Time } @@ -92,7 +92,7 @@ type inboundConnUpgradeThrottler struct { done chan struct{} // IP --> Present if ShouldUpgrade(ipStr) returned true // within the last [UpgradeCooldown]. - recentIPs set.Set[string] + recentIPs set.Set[netip.Addr] // Sorted in order of increasing time // of last call to ShouldUpgrade that returned true. // For each IP in this channel, ShouldUpgrade(ipStr) @@ -101,28 +101,29 @@ type inboundConnUpgradeThrottler struct { } // Returns whether we should upgrade an inbound connection from [ipStr]. -func (n *inboundConnUpgradeThrottler) ShouldUpgrade(ip ips.IPPort) bool { - if ip.IP.IsLoopback() { +func (n *inboundConnUpgradeThrottler) ShouldUpgrade(addrPort netip.AddrPort) bool { + // Only use addr (not port). This mitigates DoS attacks from many nodes on one + // host. + addr := addrPort.Addr() + if addr.IsLoopback() { // Don't rate-limit loopback IPs return true } - // Only use IP (not port). This mitigates DoS - // attacks from many nodes on one host. - ipStr := ip.IP.String() + n.lock.Lock() defer n.lock.Unlock() - if n.recentIPs.Contains(ipStr) { + if n.recentIPs.Contains(addr) { // We recently upgraded an inbound connection from this IP return false } select { case n.recentIPsAndTimes <- ipAndTime{ - ip: ipStr, + ip: addr, cooldownElapsedAt: n.clock.Time().Add(n.UpgradeCooldown), }: - n.recentIPs.Add(ipStr) + n.recentIPs.Add(addr) return true default: return false diff --git a/network/throttling/inbound_conn_upgrade_throttler_test.go b/network/throttling/inbound_conn_upgrade_throttler_test.go index 2f6cd926451e..802593db8b02 100644 --- a/network/throttling/inbound_conn_upgrade_throttler_test.go +++ b/network/throttling/inbound_conn_upgrade_throttler_test.go @@ -4,22 +4,21 @@ package throttling import ( - "net" + "net/netip" "testing" "time" "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) var ( - host1 = ips.IPPort{IP: net.IPv4(1, 2, 3, 4), Port: 9651} - host2 = ips.IPPort{IP: net.IPv4(1, 2, 3, 5), Port: 9653} - host3 = ips.IPPort{IP: net.IPv4(1, 2, 3, 6), Port: 9655} - host4 = ips.IPPort{IP: net.IPv4(1, 2, 3, 7), Port: 9657} - loopbackIP = ips.IPPort{IP: net.IPv4(127, 0, 0, 1), Port: 9657} + host1 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 4}), 9651) + host2 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 5}), 9653) + host3 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 6}), 9655) + host4 = netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 7}), 9657) + loopbackIP = netip.AddrPortFrom(netip.AddrFrom4([4]byte{127, 0, 0, 1}), 9657) ) func TestNoInboundConnUpgradeThrottler(t *testing.T) { diff --git a/network/throttling/inbound_msg_buffer_throttler.go b/network/throttling/inbound_msg_buffer_throttler.go index 65306eea7d51..73ebc4ed9778 100644 --- a/network/throttling/inbound_msg_buffer_throttler.go +++ b/network/throttling/inbound_msg_buffer_throttler.go @@ -18,7 +18,6 @@ import ( // See inbound_msg_throttler.go func newInboundMsgBufferThrottler( - namespace string, registerer prometheus.Registerer, maxProcessingMsgsPerNode uint64, ) (*inboundMsgBufferThrottler, error) { @@ -27,7 +26,7 @@ func newInboundMsgBufferThrottler( awaitingAcquire: make(map[ids.NodeID]chan struct{}), nodeToNumProcessingMsgs: make(map[ids.NodeID]uint64), } - return t, t.metrics.initialize(namespace, registerer) + return t, t.metrics.initialize(registerer) } // Rate-limits inbound messages based on the number of @@ -130,19 +129,17 @@ type inboundMsgBufferThrottlerMetrics struct { awaitingAcquire prometheus.Gauge } -func (m *inboundMsgBufferThrottlerMetrics) initialize(namespace string, reg prometheus.Registerer) error { +func (m *inboundMsgBufferThrottlerMetrics) initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} m.acquireLatency = metric.NewAveragerWithErrs( - namespace, "buffer_throttler_inbound_acquire_latency", "average time (in ns) to get space on the inbound message buffer", reg, &errs, ) m.awaitingAcquire = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "buffer_throttler_inbound_awaiting_acquire", - Help: "Number of inbound messages waiting to take space on the inbound message buffer", + Name: "buffer_throttler_inbound_awaiting_acquire", + Help: "Number of inbound messages waiting to take space on the inbound message buffer", }) errs.Add( reg.Register(m.awaitingAcquire), diff --git a/network/throttling/inbound_msg_buffer_throttler_test.go b/network/throttling/inbound_msg_buffer_throttler_test.go index 38e6d735097a..d9f3e4d29bc4 100644 --- a/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/network/throttling/inbound_msg_buffer_throttler_test.go @@ -17,7 +17,7 @@ import ( // Test inboundMsgBufferThrottler func TestMsgBufferThrottler(t *testing.T) { require := require.New(t) - throttler, err := newInboundMsgBufferThrottler("", prometheus.NewRegistry(), 3) + throttler, err := newInboundMsgBufferThrottler(prometheus.NewRegistry(), 3) require.NoError(err) nodeID1, nodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() @@ -69,7 +69,7 @@ func TestMsgBufferThrottler(t *testing.T) { // Test inboundMsgBufferThrottler when an acquire is cancelled func TestMsgBufferThrottlerContextCancelled(t *testing.T) { require := require.New(t) - throttler, err := newInboundMsgBufferThrottler("", prometheus.NewRegistry(), 3) + throttler, err := newInboundMsgBufferThrottler(prometheus.NewRegistry(), 3) require.NoError(err) vdr1Context, vdr1ContextCancelFunc := context.WithCancel(context.Background()) diff --git a/network/throttling/inbound_msg_byte_throttler.go b/network/throttling/inbound_msg_byte_throttler.go index 0bac7ca294b6..237041f00a09 100644 --- a/network/throttling/inbound_msg_byte_throttler.go +++ b/network/throttling/inbound_msg_byte_throttler.go @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -23,7 +23,6 @@ import ( func newInboundMsgByteThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, vdrs validators.Manager, config MsgByteThrottlerConfig, @@ -39,10 +38,10 @@ func newInboundMsgByteThrottler( nodeToVdrBytesUsed: make(map[ids.NodeID]uint64), nodeToAtLargeBytesUsed: make(map[ids.NodeID]uint64), }, - waitingToAcquire: linkedhashmap.New[uint64, *msgMetadata](), + waitingToAcquire: linked.NewHashmap[uint64, *msgMetadata](), nodeToWaitingMsgID: make(map[ids.NodeID]uint64), } - return t, t.metrics.initialize(namespace, registerer) + return t, t.metrics.initialize(registerer) } // Information about a message waiting to be read. @@ -67,7 +66,7 @@ type inboundMsgByteThrottler struct { // Node ID --> Msg ID for a message this node is waiting to acquire nodeToWaitingMsgID map[ids.NodeID]uint64 // Msg ID --> *msgMetadata - waitingToAcquire linkedhashmap.LinkedHashmap[uint64, *msgMetadata] + waitingToAcquire *linked.Hashmap[uint64, *msgMetadata] // Invariant: The node is only waiting on a single message at a time // // Invariant: waitingToAcquire.Get(nodeToWaitingMsgIDs[nodeID]) @@ -306,34 +305,29 @@ type inboundMsgByteThrottlerMetrics struct { awaitingRelease prometheus.Gauge } -func (m *inboundMsgByteThrottlerMetrics) initialize(namespace string, reg prometheus.Registerer) error { +func (m *inboundMsgByteThrottlerMetrics) initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} m.acquireLatency = metric.NewAveragerWithErrs( - namespace, "byte_throttler_inbound_acquire_latency", "average time (in ns) to get space on the inbound message byte buffer", reg, &errs, ) m.remainingAtLargeBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_remaining_at_large_bytes", - Help: "Bytes remaining in the at-large byte buffer", + Name: "byte_throttler_inbound_remaining_at_large_bytes", + Help: "Bytes remaining in the at-large byte buffer", }) m.remainingVdrBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_remaining_validator_bytes", - Help: "Bytes remaining in the validator byte buffer", + Name: "byte_throttler_inbound_remaining_validator_bytes", + Help: "Bytes remaining in the validator byte buffer", }) m.awaitingAcquire = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_awaiting_acquire", - Help: "Number of inbound messages waiting to acquire space on the inbound message byte buffer", + Name: "byte_throttler_inbound_awaiting_acquire", + Help: "Number of inbound messages waiting to acquire space on the inbound message byte buffer", }) m.awaitingRelease = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "byte_throttler_inbound_awaiting_release", - Help: "Number of messages currently being read/handled", + Name: "byte_throttler_inbound_awaiting_release", + Help: "Number of messages currently being read/handled", }) errs.Add( reg.Register(m.remainingAtLargeBytes), diff --git a/network/throttling/inbound_msg_byte_throttler_test.go b/network/throttling/inbound_msg_byte_throttler_test.go index 52ffcf83c67e..72ca316de442 100644 --- a/network/throttling/inbound_msg_byte_throttler_test.go +++ b/network/throttling/inbound_msg_byte_throttler_test.go @@ -30,7 +30,6 @@ func TestInboundMsgByteThrottlerCancelContextDeadlock(t *testing.T) { throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -60,7 +59,6 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -118,7 +116,6 @@ func TestInboundMsgByteThrottler(t *testing.T) { throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -333,7 +330,6 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -384,7 +380,6 @@ func TestMsgThrottlerNextMsg(t *testing.T) { maxBytes := maxVdrBytes throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -422,13 +417,16 @@ func TestMsgThrottlerNextMsg(t *testing.T) { // Release 1 byte throttler.release(&msgMetadata{msgSize: 1}, vdr1ID) + // Byte should have gone toward next validator message + throttler.lock.Lock() require.Equal(2, throttler.waitingToAcquire.Len()) require.Contains(throttler.nodeToWaitingMsgID, vdr1ID) firstMsgID := throttler.nodeToWaitingMsgID[vdr1ID] firstMsg, exists := throttler.waitingToAcquire.Get(firstMsgID) require.True(exists) require.Equal(maxBytes-2, firstMsg.bytesNeeded) + throttler.lock.Unlock() select { case <-doneVdr: diff --git a/network/throttling/inbound_msg_throttler.go b/network/throttling/inbound_msg_throttler.go index ea9167deca15..faf64ed083af 100644 --- a/network/throttling/inbound_msg_throttler.go +++ b/network/throttling/inbound_msg_throttler.go @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/metric" ) var _ InboundMsgThrottler = (*inboundMsgThrottler)(nil) @@ -54,7 +53,6 @@ type InboundMsgThrottlerConfig struct { // Returns a new, sybil-safe inbound message throttler. func NewInboundMsgThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, vdrs validators.Manager, throttlerConfig InboundMsgThrottlerConfig, @@ -64,7 +62,6 @@ func NewInboundMsgThrottler( ) (InboundMsgThrottler, error) { byteThrottler, err := newInboundMsgByteThrottler( log, - namespace, registerer, vdrs, throttlerConfig.MsgByteThrottlerConfig, @@ -73,7 +70,6 @@ func NewInboundMsgThrottler( return nil, err } bufferThrottler, err := newInboundMsgBufferThrottler( - namespace, registerer, throttlerConfig.MaxProcessingMsgsPerNode, ) @@ -82,7 +78,6 @@ func NewInboundMsgThrottler( } bandwidthThrottler, err := newBandwidthThrottler( log, - namespace, registerer, throttlerConfig.BandwidthThrottlerConfig, ) @@ -90,7 +85,7 @@ func NewInboundMsgThrottler( return nil, err } cpuThrottler, err := NewSystemThrottler( - metric.AppendNamespace(namespace, "cpu"), + "cpu", registerer, throttlerConfig.CPUThrottlerConfig, resourceTracker.CPUTracker(), @@ -100,7 +95,7 @@ func NewInboundMsgThrottler( return nil, err } diskThrottler, err := NewSystemThrottler( - metric.AppendNamespace(namespace, "disk"), + "disk", registerer, throttlerConfig.DiskThrottlerConfig, resourceTracker.DiskTracker(), diff --git a/network/throttling/outbound_msg_throttler.go b/network/throttling/outbound_msg_throttler.go index d75c53f1548a..b27fe01060dc 100644 --- a/network/throttling/outbound_msg_throttler.go +++ b/network/throttling/outbound_msg_throttler.go @@ -42,7 +42,6 @@ type outboundMsgThrottler struct { func NewSybilOutboundMsgThrottler( log logging.Logger, - namespace string, registerer prometheus.Registerer, vdrs validators.Manager, config MsgByteThrottlerConfig, @@ -59,7 +58,7 @@ func NewSybilOutboundMsgThrottler( nodeToAtLargeBytesUsed: make(map[ids.NodeID]uint64), }, } - return t, t.metrics.initialize(namespace, registerer) + return t, t.metrics.initialize(registerer) } func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.NodeID) bool { @@ -176,31 +175,26 @@ type outboundMsgThrottlerMetrics struct { awaitingRelease prometheus.Gauge } -func (m *outboundMsgThrottlerMetrics) initialize(namespace string, registerer prometheus.Registerer) error { +func (m *outboundMsgThrottlerMetrics) initialize(registerer prometheus.Registerer) error { m.acquireSuccesses = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "throttler_outbound_acquire_successes", - Help: "Outbound messages not dropped due to rate-limiting", + Name: "throttler_outbound_acquire_successes", + Help: "Outbound messages not dropped due to rate-limiting", }) m.acquireFailures = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "throttler_outbound_acquire_failures", - Help: "Outbound messages dropped due to rate-limiting", + Name: "throttler_outbound_acquire_failures", + Help: "Outbound messages dropped due to rate-limiting", }) m.remainingAtLargeBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "throttler_outbound_remaining_at_large_bytes", - Help: "Bytes remaining in the at large byte allocation", + Name: "throttler_outbound_remaining_at_large_bytes", + Help: "Bytes remaining in the at large byte allocation", }) m.remainingVdrBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "throttler_outbound_remaining_validator_bytes", - Help: "Bytes remaining in the validator byte allocation", + Name: "throttler_outbound_remaining_validator_bytes", + Help: "Bytes remaining in the validator byte allocation", }) m.awaitingRelease = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "throttler_outbound_awaiting_release", - Help: "Number of messages waiting to be sent", + Name: "throttler_outbound_awaiting_release", + Help: "Number of messages waiting to be sent", }) return utils.Err( registerer.Register(m.acquireSuccesses), diff --git a/network/throttling/outbound_msg_throttler_test.go b/network/throttling/outbound_msg_throttler_test.go index 664449adadd6..ab883b8fa4e4 100644 --- a/network/throttling/outbound_msg_throttler_test.go +++ b/network/throttling/outbound_msg_throttler_test.go @@ -32,7 +32,6 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -174,7 +173,6 @@ func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, @@ -221,7 +219,6 @@ func TestBypassThrottling(t *testing.T) { require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, - "", prometheus.NewRegistry(), vdrs, config, diff --git a/network/tracked_ip.go b/network/tracked_ip.go index 6a95bbee5a47..87377ec669e5 100644 --- a/network/tracked_ip.go +++ b/network/tracked_ip.go @@ -5,10 +5,9 @@ package network import ( "math/rand" + "net/netip" "sync" "time" - - "github.com/ava-labs/avalanchego/utils/ips" ) func init() { @@ -19,20 +18,20 @@ type trackedIP struct { delayLock sync.RWMutex delay time.Duration - ip ips.IPPort + ip netip.AddrPort stopTrackingOnce sync.Once onStopTracking chan struct{} } -func newTrackedIP(ip ips.IPPort) *trackedIP { +func newTrackedIP(ip netip.AddrPort) *trackedIP { return &trackedIP{ ip: ip, onStopTracking: make(chan struct{}), } } -func (ip *trackedIP) trackNewIP(newIP ips.IPPort) *trackedIP { +func (ip *trackedIP) trackNewIP(newIP netip.AddrPort) *trackedIP { ip.stopTracking() return &trackedIP{ delay: ip.getDelay(), diff --git a/network/tracked_ip_test.go b/network/tracked_ip_test.go index 956f02cc19b4..4e735668ecc9 100644 --- a/network/tracked_ip_test.go +++ b/network/tracked_ip_test.go @@ -4,12 +4,62 @@ package network import ( + "net/netip" "testing" "time" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/ips" ) +var ( + ip *ips.ClaimedIPPort + otherIP *ips.ClaimedIPPort + + defaultLoopbackAddrPort = netip.AddrPortFrom( + netip.AddrFrom4([4]byte{127, 0, 0, 1}), + 9651, + ) +) + +func init() { + { + cert, err := staking.NewTLSCert() + if err != nil { + panic(err) + } + stakingCert, err := staking.ParseCertificate(cert.Leaf.Raw) + if err != nil { + panic(err) + } + ip = ips.NewClaimedIPPort( + stakingCert, + defaultLoopbackAddrPort, + 1, // timestamp + nil, // signature + ) + } + + { + cert, err := staking.NewTLSCert() + if err != nil { + panic(err) + } + stakingCert, err := staking.ParseCertificate(cert.Leaf.Raw) + if err != nil { + panic(err) + } + otherIP = ips.NewClaimedIPPort( + stakingCert, + defaultLoopbackAddrPort, + 1, // timestamp + nil, // signature + ) + } +} + func TestTrackedIP(t *testing.T) { require := require.New(t) diff --git a/node/config.go b/node/config.go index 3974409d3114..6f8d3e1ec549 100644 --- a/node/config.go +++ b/node/config.go @@ -5,6 +5,7 @@ package node import ( "crypto/tls" + "net/netip" "time" "github.com/ava-labs/avalanchego/api/server" @@ -18,24 +19,13 @@ import ( "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) -type IPCConfig struct { - IPCAPIEnabled bool `json:"ipcAPIEnabled"` - IPCPath string `json:"ipcPath"` - IPCDefaultChainIDs []string `json:"ipcDefaultChainIDs"` -} - -type APIAuthConfig struct { - APIRequireAuthToken bool `json:"apiRequireAuthToken"` - APIAuthPassword string `json:"-"` -} - type APIIndexerConfig struct { IndexAPIEnabled bool `json:"indexAPIEnabled"` IndexAllowIncomplete bool `json:"indexAllowIncomplete"` @@ -59,9 +49,7 @@ type HTTPConfig struct { } type APIConfig struct { - APIAuthConfig `json:"authConfig"` APIIndexerConfig `json:"indexerConfig"` - IPCConfig `json:"ipcConfig"` // Enable/Disable APIs AdminAPIEnabled bool `json:"adminAPIEnabled"` @@ -97,8 +85,8 @@ type StakingConfig struct { } type StateSyncConfig struct { - StateSyncIDs []ids.NodeID `json:"stateSyncIDs"` - StateSyncIPs []ips.IPPort `json:"stateSyncIPs"` + StateSyncIDs []ids.NodeID `json:"stateSyncIDs"` + StateSyncIPs []netip.AddrPort `json:"stateSyncIPs"` } type BootstrapConfig struct { @@ -135,13 +123,13 @@ type DatabaseConfig struct { // Config contains all of the configurations of an Avalanche node. type Config struct { - HTTPConfig `json:"httpConfig"` - IPConfig `json:"ipConfig"` - StakingConfig `json:"stakingConfig"` - genesis.TxFeeConfig `json:"txFeeConfig"` - StateSyncConfig `json:"stateSyncConfig"` - BootstrapConfig `json:"bootstrapConfig"` - DatabaseConfig `json:"databaseConfig"` + HTTPConfig `json:"httpConfig"` + IPConfig `json:"ipConfig"` + StakingConfig `json:"stakingConfig"` + fee.StaticConfig `json:"txFeeConfig"` + StateSyncConfig `json:"stateSyncConfig"` + BootstrapConfig `json:"bootstrapConfig"` + DatabaseConfig `json:"databaseConfig"` // Genesis information GenesisBytes []byte `json:"-"` diff --git a/node/node.go b/node/node.go index 54caaba12516..63946140258d 100644 --- a/node/node.go +++ b/node/node.go @@ -13,6 +13,7 @@ import ( "io" "io/fs" "net" + "net/netip" "os" "path/filepath" "strconv" @@ -25,7 +26,6 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/admin" - "github.com/ava-labs/avalanchego/api/auth" "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/api/keystore" @@ -37,13 +37,12 @@ import ( "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/meterdb" - "github.com/ava-labs/avalanchego/database/pebble" + "github.com/ava-labs/avalanchego/database/pebbledb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/indexer" - "github.com/ava-labs/avalanchego/ipcs" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" @@ -68,6 +67,7 @@ import ( "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/resource" @@ -76,13 +76,11 @@ import ( "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" - ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" platformconfig "github.com/ava-labs/avalanchego/vms/platformvm/config" coreth "github.com/ava-labs/coreth/plugin/evm" @@ -93,6 +91,18 @@ const ( httpPortName = constants.AppName + "-http" ipResolutionTimeout = 30 * time.Second + + apiNamespace = constants.PlatformName + metric.NamespaceSeparator + "api" + benchlistNamespace = constants.PlatformName + metric.NamespaceSeparator + "benchlist" + dbNamespace = constants.PlatformName + metric.NamespaceSeparator + "db" + healthNamespace = constants.PlatformName + metric.NamespaceSeparator + "health" + meterDBNamespace = constants.PlatformName + metric.NamespaceSeparator + "meterdb" + networkNamespace = constants.PlatformName + metric.NamespaceSeparator + "network" + processNamespace = constants.PlatformName + metric.NamespaceSeparator + "process" + requestsNamespace = constants.PlatformName + metric.NamespaceSeparator + "requests" + resourceTrackerNamespace = constants.PlatformName + metric.NamespaceSeparator + "resource_tracker" + responsesNamespace = constants.PlatformName + metric.NamespaceSeparator + "responses" + systemResourcesNamespace = constants.PlatformName + metric.NamespaceSeparator + "system_resources" ) var ( @@ -113,16 +123,18 @@ func New( logger logging.Logger, ) (*Node, error) { tlsCert := config.StakingTLSCert.Leaf - stakingCert := staking.CertificateFromX509(tlsCert) - if err := staking.ValidateCertificate(stakingCert); err != nil { + stakingCert, err := staking.ParseCertificate(tlsCert.Raw) + if err != nil { return nil, fmt.Errorf("invalid staking certificate: %w", err) } n := &Node{ - Log: logger, - LogFactory: logFactory, - ID: ids.NodeIDFromCert(stakingCert), - Config: config, + Log: logger, + LogFactory: logFactory, + StakingTLSSigner: config.StakingTLSCert.PrivateKey.(crypto.Signer), + StakingTLSCert: stakingCert, + ID: ids.NodeIDFromCert(stakingCert), + Config: config, } n.DoneShuttingDown.Add(1) @@ -137,7 +149,6 @@ func New( zap.Reflect("config", n.Config), ) - var err error n.VMFactoryLog, err = logFactory.Make("vm-factory") if err != nil { return nil, fmt.Errorf("problem creating vm logger: %w", err) @@ -163,7 +174,10 @@ func New( return nil, fmt.Errorf("couldn't initialize tracer: %w", err) } - n.initMetrics() + if err := n.initMetrics(); err != nil { + return nil, fmt.Errorf("couldn't initialize metrics: %w", err) + } + n.initNAT() if err := n.initAPIServer(); err != nil { // Start the API Server return nil, fmt.Errorf("couldn't initialize API server: %w", err) @@ -187,11 +201,18 @@ func New( // It must be initiated before networking (initNetworking), chain manager (initChainManager) // and the engine (initChains) but after the metrics (initMetricsAPI) // message.Creator currently record metrics under network namespace - n.networkNamespace = "network" + + networkRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + networkNamespace, + ) + if err != nil { + return nil, err + } + n.msgCreator, err = message.NewCreator( n.Log, - n.MetricsRegisterer, - n.networkNamespace, + networkRegisterer, n.Config.NetworkConfig.CompressionType, n.Config.NetworkConfig.MaximumInboundMessageTimeout, ) @@ -204,12 +225,12 @@ func New( logger.Warn("sybil control is not enforced") n.vdrs = newOverriddenManager(constants.PrimaryNetworkID, n.vdrs) } - if err := n.initResourceManager(n.MetricsRegisterer); err != nil { + if err := n.initResourceManager(); err != nil { return nil, fmt.Errorf("problem initializing resource manager: %w", err) } n.initCPUTargeter(&config.CPUTargeterConfig) n.initDiskTargeter(&config.DiskTargeterConfig) - if err := n.initNetworking(); err != nil { // Set up networking layer. + if err := n.initNetworking(networkRegisterer); err != nil { // Set up networking layer. return nil, fmt.Errorf("problem initializing networking: %w", err) } @@ -236,12 +257,6 @@ func New( if err := n.initInfoAPI(); err != nil { // Start the Info API return nil, fmt.Errorf("couldn't initialize info API: %w", err) } - if err := n.initIPCs(); err != nil { // Start the IPCs - return nil, fmt.Errorf("couldn't initialize IPCs: %w", err) - } - if err := n.initIPCAPI(); err != nil { // Start the IPC API - return nil, fmt.Errorf("couldn't initialize the IPC API: %w", err) - } if err := n.initChainAliases(n.Config.GenesisBytes); err != nil { return nil, fmt.Errorf("couldn't initialize chain aliases: %w", err) } @@ -272,6 +287,9 @@ type Node struct { // (in consensus, for example) ID ids.NodeID + StakingTLSSigner crypto.Signer + StakingTLSCert *staking.Certificate + // Storage for this node DB database.Database @@ -315,11 +333,8 @@ type Node struct { TxAcceptorGroup snow.AcceptorGroup VertexAcceptorGroup snow.AcceptorGroup - IPCs *ipcs.ChainIPCs - // Net runs the networking stack - networkNamespace string - Net network.Network + Net network.Network // The staking address will optionally be written to a process context // file to enable other nodes to be configured to use this node as a @@ -360,8 +375,8 @@ type Node struct { DoneShuttingDown sync.WaitGroup // Metrics Registerer - MetricsRegisterer *prometheus.Registry - MetricsGatherer metrics.MultiGatherer + MetricsGatherer metrics.MultiGatherer + MeterDBMetricsGatherer metrics.MultiGatherer VMAliaser ids.Aliaser VMManager vms.Manager @@ -395,7 +410,7 @@ type Node struct { // Initialize the networking layer. // Assumes [n.vdrs], [n.CPUTracker], and [n.CPUTargeter] have been initialized. -func (n *Node) initNetworking() error { +func (n *Node) initNetworking(reg prometheus.Registerer) error { // Providing either loopback address - `::1` for ipv6 and `127.0.0.1` for ipv4 - as the listen // host will avoid the need for a firewall exception on recent MacOS: // @@ -423,20 +438,26 @@ func (n *Node) initNetworking() error { // Record the bound address to enable inclusion in process context file. n.stakingAddress = listener.Addr().String() - ipPort, err := ips.ToIPPort(n.stakingAddress) + stakingAddrPort, err := ips.ParseAddrPort(n.stakingAddress) if err != nil { return err } - var dynamicIP ips.DynamicIPPort + var ( + publicAddr netip.Addr + atomicIP *utils.Atomic[netip.AddrPort] + ) switch { case n.Config.PublicIP != "": // Use the specified public IP. - ipPort.IP = net.ParseIP(n.Config.PublicIP) - if ipPort.IP == nil { - return fmt.Errorf("invalid IP Address: %s", n.Config.PublicIP) + publicAddr, err = ips.ParseAddr(n.Config.PublicIP) + if err != nil { + return fmt.Errorf("invalid public IP address %q: %w", n.Config.PublicIP, err) } - dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + atomicIP = utils.NewAtomic(netip.AddrPortFrom( + publicAddr, + stakingAddrPort.Port(), + )) n.ipUpdater = dynamicip.NewNoUpdater() case n.Config.PublicIPResolutionService != "": // Use dynamic IP resolution. @@ -447,40 +468,46 @@ func (n *Node) initNetworking() error { // Use that to resolve our public IP. ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) - ipPort.IP, err = resolver.Resolve(ctx) + publicAddr, err = resolver.Resolve(ctx) cancel() if err != nil { return fmt.Errorf("couldn't resolve public IP: %w", err) } - dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) - n.ipUpdater = dynamicip.NewUpdater(dynamicIP, resolver, n.Config.PublicIPResolutionFreq) + atomicIP = utils.NewAtomic(netip.AddrPortFrom( + publicAddr, + stakingAddrPort.Port(), + )) + n.ipUpdater = dynamicip.NewUpdater(atomicIP, resolver, n.Config.PublicIPResolutionFreq) default: - ipPort.IP, err = n.router.ExternalIP() + publicAddr, err = n.router.ExternalIP() if err != nil { return fmt.Errorf("public IP / IP resolution service not given and failed to resolve IP with NAT: %w", err) } - dynamicIP = ips.NewDynamicIPPort(ipPort.IP, ipPort.Port) + atomicIP = utils.NewAtomic(netip.AddrPortFrom( + publicAddr, + stakingAddrPort.Port(), + )) n.ipUpdater = dynamicip.NewNoUpdater() } - if ipPort.IP.IsLoopback() || ipPort.IP.IsPrivate() { + if !ips.IsPublic(publicAddr) { n.Log.Warn("P2P IP is private, you will not be publicly discoverable", - zap.Stringer("ip", ipPort), + zap.Stringer("ip", publicAddr), ) } // Regularly update our public IP and port mappings. n.portMapper.Map( - ipPort.Port, - ipPort.Port, + stakingAddrPort.Port(), + stakingAddrPort.Port(), stakingPortName, - dynamicIP, + atomicIP, n.Config.PublicIPResolutionFreq, ) go n.ipUpdater.Dispatch(n.Log) n.Log.Info("initializing networking", - zap.Stringer("ip", ipPort), + zap.Stringer("ip", atomicIP.Get()), ) tlsKey, ok := n.Config.StakingTLSCert.PrivateKey.(crypto.Signer) @@ -512,7 +539,7 @@ func (n *Node) initNetworking() error { } } if unknownACPs.Len() > 0 { - n.Log.Warn("gossipping unknown ACPs", + n.Log.Warn("gossiping unknown ACPs", zap.Reflect("acps", unknownACPs), ) } @@ -528,6 +555,16 @@ func (n *Node) initNetworking() error { // Configure benchlist n.Config.BenchlistConfig.Validators = n.vdrs n.Config.BenchlistConfig.Benchable = n.chainRouter + n.Config.BenchlistConfig.BenchlistRegisterer = metrics.NewLabelGatherer(chains.ChainLabel) + + err = n.MetricsGatherer.Register( + benchlistNamespace, + n.Config.BenchlistConfig.BenchlistRegisterer, + ) + if err != nil { + return err + } + n.benchlistManager = benchlist.NewManager(&n.Config.BenchlistConfig) n.uptimeCalculator = uptime.NewLockedCalculator() @@ -592,9 +629,8 @@ func (n *Node) initNetworking() error { } // add node configs to network config - n.Config.NetworkConfig.Namespace = n.networkNamespace n.Config.NetworkConfig.MyNodeID = n.ID - n.Config.NetworkConfig.MyIPPort = dynamicIP + n.Config.NetworkConfig.MyIPPort = atomicIP n.Config.NetworkConfig.NetworkID = n.Config.NetworkID n.Config.NetworkConfig.Validators = n.vdrs n.Config.NetworkConfig.Beacons = n.bootstrappers @@ -611,7 +647,7 @@ func (n *Node) initNetworking() error { n.Net, err = network.NewNetwork( &n.Config.NetworkConfig, n.msgCreator, - n.MetricsRegisterer, + reg, n.Log, listener, dialer.NewDialer(constants.NetworkType, n.Config.NetworkConfig.DialerConfig, n.Log), @@ -686,7 +722,7 @@ func (n *Node) Dispatch() error { // Add bootstrap nodes to the peer network for _, bootstrapper := range n.Config.Bootstrappers { - n.Net.ManuallyTrack(bootstrapper.ID, ips.IPPort(bootstrapper.IP)) + n.Net.ManuallyTrack(bootstrapper.ID, bootstrapper.IP) } // Start P2P connections @@ -728,25 +764,31 @@ func (n *Node) Dispatch() error { */ func (n *Node) initDatabase() error { + dbRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + dbNamespace, + ) + if err != nil { + return err + } + // start the db switch n.Config.DatabaseConfig.Name { case leveldb.Name: // Prior to v1.10.15, the only on-disk database was leveldb, and its // files went to [dbPath]/[networkID]/v1.4.5. dbPath := filepath.Join(n.Config.DatabaseConfig.Path, version.CurrentDatabase.String()) - var err error - n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + n.DB, err = leveldb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, dbRegisterer) if err != nil { - return fmt.Errorf("couldn't create leveldb at %s: %w", dbPath, err) + return fmt.Errorf("couldn't create %s at %s: %w", leveldb.Name, dbPath, err) } case memdb.Name: n.DB = memdb.New() - case pebble.Name: - dbPath := filepath.Join(n.Config.DatabaseConfig.Path, pebble.Name) - var err error - n.DB, err = pebble.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, "db_internal", n.MetricsRegisterer) + case pebbledb.Name: + dbPath := filepath.Join(n.Config.DatabaseConfig.Path, "pebble") + n.DB, err = pebbledb.New(dbPath, n.Config.DatabaseConfig.Config, n.Log, dbRegisterer) if err != nil { - return fmt.Errorf("couldn't create pebbledb at %s: %w", dbPath, err) + return fmt.Errorf("couldn't create %s at %s: %w", pebbledb.Name, dbPath, err) } default: return fmt.Errorf( @@ -754,7 +796,7 @@ func (n *Node) initDatabase() error { n.Config.DatabaseConfig.Name, leveldb.Name, memdb.Name, - pebble.Name, + pebbledb.Name, ) } @@ -762,8 +804,15 @@ func (n *Node) initDatabase() error { n.DB = versiondb.New(n.DB) } - var err error - n.DB, err = meterdb.New("db", n.MetricsRegisterer, n.DB) + meterDBReg, err := metrics.MakeAndRegister( + n.MeterDBMetricsGatherer, + "all", + ) + if err != nil { + return err + } + + n.DB, err = meterdb.New(meterDBReg, n.DB) if err != nil { return err } @@ -837,29 +886,6 @@ func (n *Node) initEventDispatchers() { n.VertexAcceptorGroup = snow.NewAcceptorGroup(n.Log) } -func (n *Node) initIPCs() error { - chainIDs := make([]ids.ID, len(n.Config.IPCDefaultChainIDs)) - for i, chainID := range n.Config.IPCDefaultChainIDs { - id, err := ids.FromString(chainID) - if err != nil { - return err - } - chainIDs[i] = id - } - - var err error - n.IPCs, err = ipcs.NewChainIPCs( - n.Log, - n.Config.IPCPath, - n.Config.NetworkID, - n.BlockAcceptorGroup, - n.TxAcceptorGroup, - n.VertexAcceptorGroup, - chainIDs, - ) - return err -} - // Initialize [n.indexer]. // Should only be called after [n.DB], [n.DecisionAcceptorGroup], // [n.ConsensusAcceptorGroup], [n.Log], [n.APIServer], [n.chainManager] are @@ -907,9 +933,13 @@ func (n *Node) initChains(genesisBytes []byte) error { return n.chainManager.StartChainCreator(platformChain) } -func (n *Node) initMetrics() { - n.MetricsRegisterer = prometheus.NewRegistry() - n.MetricsGatherer = metrics.NewMultiGatherer() +func (n *Node) initMetrics() error { + n.MetricsGatherer = metrics.NewPrefixGatherer() + n.MeterDBMetricsGatherer = metrics.NewLabelGatherer(chains.ChainLabel) + return n.MetricsGatherer.Register( + meterDBNamespace, + n.MeterDBMetricsGatherer, + ) } func (n *Node) initNAT() { @@ -945,7 +975,7 @@ func (n *Node) initAPIServer() error { ) return err } - hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() + hostIsPublic = ips.IsPublic(ip) n.Log.Debug("finished HTTP host lookup", zap.String("host", n.Config.HTTPHost), @@ -960,8 +990,8 @@ func (n *Node) initAPIServer() error { return err } - addr := listener.Addr().String() - ipPort, err := ips.ToIPPort(addr) + addrStr := listener.Addr().String() + addrPort, err := ips.ParseAddrPort(addrStr) if err != nil { return err } @@ -974,8 +1004,8 @@ func (n *Node) initAPIServer() error { ) n.portMapper.Map( - ipPort.Port, - ipPort.Port, + addrPort.Port(), + addrPort.Port(), httpPortName, nil, n.Config.PublicIPResolutionFreq, @@ -998,26 +1028,10 @@ func (n *Node) initAPIServer() error { } n.apiURI = fmt.Sprintf("%s://%s", protocol, listener.Addr()) - if !n.Config.APIRequireAuthToken { - var err error - n.APIServer, err = server.New( - n.Log, - n.LogFactory, - listener, - n.Config.HTTPAllowedOrigins, - n.Config.ShutdownTimeout, - n.ID, - n.Config.TraceConfig.Enabled, - n.tracer, - "api", - n.MetricsRegisterer, - n.Config.HTTPConfig.HTTPConfig, - n.Config.HTTPAllowedHosts, - ) - return err - } - - a, err := auth.New(n.Log, "auth", n.Config.APIAuthPassword) + apiRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + apiNamespace, + ) if err != nil { return err } @@ -1031,23 +1045,11 @@ func (n *Node) initAPIServer() error { n.ID, n.Config.TraceConfig.Enabled, n.tracer, - "api", - n.MetricsRegisterer, + apiRegisterer, n.Config.HTTPConfig.HTTPConfig, n.Config.HTTPAllowedHosts, - a, ) - if err != nil { - return err - } - - // only create auth service if token authorization is required - n.Log.Info("API authorization is enabled. Auth tokens must be passed in the header of API requests, except requests to the auth service.") - handler, err := a.CreateHandler() - if err != nil { - return err - } - return n.APIServer.AddRoute(handler, "auth", "") + return err } // Add the default VM aliases @@ -1087,11 +1089,27 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { cChainID, ) + requestsReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + requestsNamespace, + ) + if err != nil { + return err + } + + responseReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + responsesNamespace, + ) + if err != nil { + return err + } + n.timeoutManager, err = timeout.NewManager( &n.Config.AdaptiveTimeoutConfig, n.benchlistManager, - "requests", - n.MetricsRegisterer, + requestsReg, + responseReg, ) if err != nil { return err @@ -1109,8 +1127,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { n.Config.TrackedSubnets, n.Shutdown, n.Config.RouterHealthConfig, - "requests", - n.MetricsRegisterer, + requestsReg, ) if err != nil { return fmt.Errorf("couldn't initialize chain router: %w", err) @@ -1120,10 +1137,12 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { if err != nil { return fmt.Errorf("failed to initialize subnets: %w", err) } - n.chainManager = chains.New( + + n.chainManager, err = chains.New( &chains.ManagerConfig{ SybilProtectionEnabled: n.Config.SybilProtectionEnabled, - StakingTLSCert: n.Config.StakingTLSCert, + StakingTLSSigner: n.StakingTLSSigner, + StakingTLSCert: n.StakingTLSCert, StakingBLSKey: n.Config.StakingSigningKey, Log: n.Log, LogFactory: n.LogFactory, @@ -1151,6 +1170,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { ShutdownNodeFunc: n.Shutdown, MeterVMEnabled: n.Config.MeterVMEnabled, Metrics: n.MetricsGatherer, + MeterDBMetrics: n.MeterDBMetricsGatherer, SubnetConfigs: n.Config.SubnetConfigs, ChainConfigs: n.Config.ChainConfigs, FrontierPollFrequency: n.Config.FrontierPollFrequency, @@ -1168,6 +1188,9 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { Subnets: subnets, }, ) + if err != nil { + return err + } // Notify the API server when new chains are created n.chainManager.AddRegistrant(n.APIServer) @@ -1187,57 +1210,42 @@ func (n *Node) initVMs() error { vdrs = validators.NewManager() } - durangoTime := version.GetDurangoTime(n.Config.NetworkID) - if err := txs.InitCodec(durangoTime); err != nil { - return err - } - if err := block.InitCodec(durangoTime); err != nil { - return err - } - if err := coreth.InitCodec(durangoTime); err != nil { - return err - } - // Register the VMs that Avalanche supports + eUpgradeTime := version.GetEUpgradeTime(n.Config.NetworkID) err := utils.Err( n.VMManager.RegisterFactory(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ Config: platformconfig.Config{ - Chains: n.chainManager, - Validators: vdrs, - UptimeLockedCalculator: n.uptimeCalculator, - SybilProtectionEnabled: n.Config.SybilProtectionEnabled, - PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, - TrackedSubnets: n.Config.TrackedSubnets, - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - CreateSubnetTxFee: n.Config.CreateSubnetTxFee, - TransformSubnetTxFee: n.Config.TransformSubnetTxFee, - CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, - AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, - AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - UptimePercentage: n.Config.UptimeRequirement, - MinValidatorStake: n.Config.MinValidatorStake, - MaxValidatorStake: n.Config.MaxValidatorStake, - MinDelegatorStake: n.Config.MinDelegatorStake, - MinDelegationFee: n.Config.MinDelegationFee, - MinStakeDuration: n.Config.MinStakeDuration, - MaxStakeDuration: n.Config.MaxStakeDuration, - RewardConfig: n.Config.RewardConfig, - ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), - ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), - BanffTime: version.GetBanffTime(n.Config.NetworkID), - CortinaTime: version.GetCortinaTime(n.Config.NetworkID), - DurangoTime: durangoTime, - UseCurrentHeight: n.Config.UseCurrentHeight, + Chains: n.chainManager, + Validators: vdrs, + UptimeLockedCalculator: n.uptimeCalculator, + SybilProtectionEnabled: n.Config.SybilProtectionEnabled, + PartialSyncPrimaryNetwork: n.Config.PartialSyncPrimaryNetwork, + TrackedSubnets: n.Config.TrackedSubnets, + StaticFeeConfig: n.Config.StaticConfig, + UptimePercentage: n.Config.UptimeRequirement, + MinValidatorStake: n.Config.MinValidatorStake, + MaxValidatorStake: n.Config.MaxValidatorStake, + MinDelegatorStake: n.Config.MinDelegatorStake, + MinDelegationFee: n.Config.MinDelegationFee, + MinStakeDuration: n.Config.MinStakeDuration, + MaxStakeDuration: n.Config.MaxStakeDuration, + RewardConfig: n.Config.RewardConfig, + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), + ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), + BanffTime: version.GetBanffTime(n.Config.NetworkID), + CortinaTime: version.GetCortinaTime(n.Config.NetworkID), + DurangoTime: version.GetDurangoTime(n.Config.NetworkID), + EUpgradeTime: eUpgradeTime, + }, + UseCurrentHeight: n.Config.UseCurrentHeight, }, }), n.VMManager.RegisterFactory(context.TODO(), constants.AVMID, &avm.Factory{ Config: avmconfig.Config{ TxFee: n.Config.TxFee, CreateAssetTxFee: n.Config.CreateAssetTxFee, - DurangoTime: durangoTime, + EUpgradeTime: eUpgradeTime, }, }), n.VMManager.RegisterFactory(context.TODO(), constants.EVMID, &coreth.Factory{}), @@ -1304,19 +1312,23 @@ func (n *Node) initMetricsAPI() error { return nil } - if err := n.MetricsGatherer.Register(constants.PlatformName, n.MetricsRegisterer); err != nil { + processReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + processNamespace, + ) + if err != nil { return err } // Current state of process metrics. processCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) - if err := n.MetricsRegisterer.Register(processCollector); err != nil { + if err := processReg.Register(processCollector); err != nil { return err } // Go process metrics using debug.GCStats. goCollector := collectors.NewGoCollector() - if err := n.MetricsRegisterer.Register(goCollector); err != nil { + if err := processReg.Register(goCollector); err != nil { return err } @@ -1433,11 +1445,18 @@ func (n *Node) initInfoAPI() error { // initHealthAPI initializes the Health API service // Assumes n.Log, n.Net, n.APIServer, n.HTTPLog already initialized func (n *Node) initHealthAPI() error { - healthChecker, err := health.New(n.Log, n.MetricsRegisterer) + healthReg, err := metrics.MakeAndRegister( + n.MetricsGatherer, + healthNamespace, + ) + if err != nil { + return err + } + + n.health, err = health.New(n.Log, healthReg) if err != nil { return err } - n.health = healthChecker if !n.Config.HealthAPIEnabled { n.Log.Info("skipping health API initialization because it has been disabled") @@ -1445,18 +1464,18 @@ func (n *Node) initHealthAPI() error { } n.Log.Info("initializing Health API") - err = healthChecker.RegisterHealthCheck("network", n.Net, health.ApplicationTag) + err = n.health.RegisterHealthCheck("network", n.Net, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register network health check: %w", err) } - err = healthChecker.RegisterHealthCheck("router", n.chainRouter, health.ApplicationTag) + err = n.health.RegisterHealthCheck("router", n.chainRouter, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register router health check: %w", err) } // TODO: add database health to liveness check - err = healthChecker.RegisterHealthCheck("database", n.DB, health.ApplicationTag) + err = n.health.RegisterHealthCheck("database", n.DB, health.ApplicationTag) if err != nil { return fmt.Errorf("couldn't register database health check: %w", err) } @@ -1488,7 +1507,7 @@ func (n *Node) initHealthAPI() error { return fmt.Errorf("couldn't register resource health check: %w", err) } - handler, err := health.NewGetAndPostHandler(n.Log, healthChecker) + handler, err := health.NewGetAndPostHandler(n.Log, n.health) if err != nil { return err } @@ -1503,7 +1522,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - health.NewGetHandler(healthChecker.Readiness), + health.NewGetHandler(n.health.Readiness), "health", "/readiness", ) @@ -1512,7 +1531,7 @@ func (n *Node) initHealthAPI() error { } err = n.APIServer.AddRoute( - health.NewGetHandler(healthChecker.Health), + health.NewGetHandler(n.health.Health), "health", "/health", ) @@ -1521,31 +1540,12 @@ func (n *Node) initHealthAPI() error { } return n.APIServer.AddRoute( - health.NewGetHandler(healthChecker.Liveness), + health.NewGetHandler(n.health.Liveness), "health", "/liveness", ) } -// initIPCAPI initializes the IPC API service -// Assumes n.log and n.chainManager already initialized -func (n *Node) initIPCAPI() error { - if !n.Config.IPCAPIEnabled { - n.Log.Info("skipping ipc API initialization because it has been disabled") - return nil - } - n.Log.Warn("initializing deprecated ipc API") - service, err := ipcsapi.NewService(n.Log, n.chainManager, n.IPCs) - if err != nil { - return err - } - return n.APIServer.AddRoute( - service, - "ipcs", - "", - ) -} - // Give chains aliases as specified by the genesis information func (n *Node) initChainAliases(genesisBytes []byte) error { n.Log.Info("initializing chain aliases") @@ -1590,14 +1590,21 @@ func (n *Node) initAPIAliases(genesisBytes []byte) error { } // Initialize [n.resourceManager]. -func (n *Node) initResourceManager(reg prometheus.Registerer) error { +func (n *Node) initResourceManager() error { + systemResourcesRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + systemResourcesNamespace, + ) + if err != nil { + return err + } resourceManager, err := resource.NewManager( n.Log, n.Config.DatabaseConfig.Path, n.Config.SystemTrackerFrequency, n.Config.SystemTrackerCPUHalflife, n.Config.SystemTrackerDiskHalflife, - reg, + systemResourcesRegisterer, ) if err != nil { return err @@ -1605,7 +1612,19 @@ func (n *Node) initResourceManager(reg prometheus.Registerer) error { n.resourceManager = resourceManager n.resourceManager.TrackProcess(os.Getpid()) - n.resourceTracker, err = tracker.NewResourceTracker(reg, n.resourceManager, &meter.ContinuousFactory{}, n.Config.SystemTrackerProcessingHalflife) + resourceTrackerRegisterer, err := metrics.MakeAndRegister( + n.MetricsGatherer, + resourceTrackerNamespace, + ) + if err != nil { + return err + } + n.resourceTracker, err = tracker.NewResourceTracker( + resourceTrackerRegisterer, + n.resourceManager, + &meter.ContinuousFactory{}, + n.Config.SystemTrackerProcessingHalflife, + ) return err } @@ -1671,13 +1690,6 @@ func (n *Node) shutdown() { if n.resourceManager != nil { n.resourceManager.Shutdown() } - if n.IPCs != nil { - if err := n.IPCs.Shutdown(); err != nil { - n.Log.Debug("error during IPC shutdown", - zap.Error(err), - ) - } - } n.timeoutManager.Stop() if n.chainManager != nil { n.chainManager.Shutdown() diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 4dd49b65eab6..484fe05da758 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -72,8 +72,12 @@ func (o *overriddenManager) GetMap(ids.ID) map[ids.NodeID]*validators.GetValidat return o.manager.GetMap(o.subnetID) } -func (o *overriddenManager) RegisterCallbackListener(_ ids.ID, listener validators.SetCallbackListener) { - o.manager.RegisterCallbackListener(o.subnetID, listener) +func (o *overriddenManager) RegisterCallbackListener(listener validators.ManagerCallbackListener) { + o.manager.RegisterCallbackListener(listener) +} + +func (o *overriddenManager) RegisterSetCallbackListener(_ ids.ID, listener validators.SetCallbackListener) { + o.manager.RegisterSetCallbackListener(o.subnetID, listener) } func (o *overriddenManager) String() string { diff --git a/proto/Dockerfile.buf b/proto/Dockerfile.buf deleted file mode 100644 index 3007f58667b3..000000000000 --- a/proto/Dockerfile.buf +++ /dev/null @@ -1,22 +0,0 @@ -FROM bufbuild/buf:1.29.0 AS builder - -FROM ubuntu:20.04 - -RUN apt-get update && apt -y install bash curl unzip git -WORKDIR /opt - -RUN \ - curl -L https://go.dev/dl/go1.21.7.linux-amd64.tar.gz > golang.tar.gz && \ - mkdir golang && \ - tar -zxvf golang.tar.gz -C golang/ - -ENV PATH="${PATH}:/opt/golang/go/bin" - -COPY --from=builder /usr/local/bin/buf /usr/local/bin/ - -# any version changes here should also be bumped in scripts/protobuf_codegen.sh -RUN \ - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30.0 && \ - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.3.0 - -ENV PATH="${PATH}:/root/go/bin/" diff --git a/proto/README.md b/proto/README.md index 34d4228571eb..e1ef00f4e996 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,6 +1,6 @@ # Avalanche gRPC -Now Serving: **Protocol Version 30** +Now Serving: **Protocol Version 35** Protobuf files are hosted at [https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and @@ -10,8 +10,7 @@ Protobuf linting and generation for this project is managed by [buf](https://github.com/bufbuild/buf). Please find installation instructions on -[https://docs.buf.build/installation/](https://docs.buf.build/installation/) or -use `Dockerfile.buf` provided in the `proto/` directory of AvalancheGo. +[https://docs.buf.build/installation/](https://docs.buf.build/installation/). Any changes made to proto definition can be updated by running `protobuf_codegen.sh` located in the `scripts/` directory of AvalancheGo. @@ -34,4 +33,4 @@ subnet vm must use the same protocol version to be compatible. - Publish new tag to buf registry. `buf push -t v26` Note: Publishing requires auth to the ava-labs org in buf -https://buf.build/ava-labs/repositories \ No newline at end of file +https://buf.build/ava-labs/repositories diff --git a/proto/appsender/appsender.proto b/proto/appsender/appsender.proto index 1d7cdac8992e..5a03d449359e 100644 --- a/proto/appsender/appsender.proto +++ b/proto/appsender/appsender.proto @@ -11,7 +11,6 @@ service AppSender { rpc SendAppResponse(SendAppResponseMsg) returns (google.protobuf.Empty); rpc SendAppError(SendAppErrorMsg) returns (google.protobuf.Empty); rpc SendAppGossip(SendAppGossipMsg) returns (google.protobuf.Empty); - rpc SendAppGossipSpecific(SendAppGossipSpecificMsg) returns (google.protobuf.Empty); rpc SendCrossChainAppRequest(SendCrossChainAppRequestMsg) returns (google.protobuf.Empty); rpc SendCrossChainAppResponse(SendCrossChainAppResponseMsg) returns (google.protobuf.Empty); @@ -48,15 +47,13 @@ message SendAppErrorMsg { } message SendAppGossipMsg { - // The message body - bytes msg = 1; -} - -message SendAppGossipSpecificMsg { - // The nodes to send this request to + // Who to send this message to repeated bytes node_ids = 1; + uint64 validators = 2; + uint64 non_validators = 3; + uint64 peers = 4; // The message body - bytes msg = 2; + bytes msg = 5; } message SendCrossChainAppRequestMsg { diff --git a/proto/http/http.proto b/proto/http/http.proto index bb281cb6f647..614a231436bd 100644 --- a/proto/http/http.proto +++ b/proto/http/http.proto @@ -45,14 +45,14 @@ message Userinfo { string username = 1; // password is the password for the user string password = 2; - // password_set is a boolean which is true if the passord is set + // password_set is a boolean which is true if the password is set bool password_set = 3; } message Element { // key is a element key in a key value pair string key = 1; - // values are a list of strings coresponding to the key + // values are a list of strings corresponding to the key repeated string values = 2; } diff --git a/proto/message/tx.proto b/proto/message/tx.proto deleted file mode 100644 index d651e7c07278..000000000000 --- a/proto/message/tx.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package message; - -option go_package = "github.com/ava-labs/avalanchego/proto/pb/message"; - -message Message { - oneof message { - Tx tx = 1; - } -} - -message Tx { - // The byte representation of this transaction. - bytes tx = 1; -} diff --git a/proto/p2p/p2p.proto b/proto/p2p/p2p.proto index 71a7c4f8f847..53c0c84de303 100644 --- a/proto/p2p/p2p.proto +++ b/proto/p2p/p2p.proto @@ -8,17 +8,12 @@ option go_package = "github.com/ava-labs/avalanchego/proto/pb/p2p"; // Represents peer-to-peer messages. // Only one type can be non-null. message Message { - reserved 33; // Until after durango activation. + reserved 1; // Until E upgrade is activated. reserved 36; // Next unused field number. // NOTES // Use "oneof" for each message type and set rest to null if not used. // That is because when the compression is enabled, we don't want to include uncompressed fields. oneof message { - // Gzip-compressed bytes of a "p2p.Message" whose "oneof" "message" field is - // NOT compressed_* BUT one of the message types (e.g. ping, pong, etc.). - // This field is only set if the message type supports compression. - bytes compressed_gzip = 1; - // zstd-compressed bytes of a "p2p.Message" whose "oneof" "message" field is // NOT compressed_* BUT one of the message types (e.g. ping, pong, etc.). // This field is only set if the message type supports compression. @@ -81,15 +76,9 @@ message SubnetUptime { uint32 uptime = 2; } -// Pong is sent in response to a Ping with the perceived uptime of the -// peer. +// Pong is sent in response to a Ping. message Pong { - // Deprecated: uptime is now sent in Ping - // Uptime percentage on the primary network [0, 100] - uint32 uptime = 1; - // Deprecated: uptime is now sent in Ping - // Uptime percentage on subnets - repeated SubnetUptime subnet_uptimes = 2; + reserved 1, 2; // Until E upgrade is activated. } // Handshake is the first outbound message sent to a peer when a connection is @@ -100,6 +89,7 @@ message Pong { // // Peers should drop connections to peers with incompatible versions. message Handshake { + reserved 5; // Until E upgrade is activated. // Network the peer is running on (e.g local, testnet, mainnet) uint32 network_id = 1; // Unix timestamp when this Handshake message was created @@ -108,8 +98,6 @@ message Handshake { bytes ip_addr = 3; // IP port of the peer uint32 ip_port = 4; - // Avalanche client version - string my_version = 5; // Timestamp of the IP uint64 ip_signing_time = 6; // Signature of the peer IP port pair at a provided timestamp with the TLS @@ -237,21 +225,19 @@ enum EngineType { // // Peers should respond to GetAcceptedFrontier with AcceptedFrontier. message GetAcceptedFrontier { + reserved 4; // Chain being requested from bytes chain_id = 1; // Unique identifier for this request uint32 request_id = 2; // Timeout (ns) for this request uint64 deadline = 3; - // Consensus type the remote peer should use to handle this message - EngineType engine_type = 4; } // AcceptedFrontier contains the remote peer's last accepted frontier. // // AcceptedFrontier is sent in response to GetAcceptedFrontier. message AcceptedFrontier { - reserved 4; // Until Cortina upgrade is activated // Chain being responded from bytes chain_id = 1; // Request id of the original GetAcceptedFrontier request @@ -265,6 +251,7 @@ message AcceptedFrontier { // // Peers should respond to GetAccepted with an Accepted message. message GetAccepted { + reserved 5; // Chain being requested from bytes chain_id = 1; // Unique identifier for this message @@ -273,15 +260,12 @@ message GetAccepted { uint64 deadline = 3; // The sender's accepted frontier repeated bytes container_ids = 4; - // Consensus type to handle this message - EngineType engine_type = 5; } // Accepted is sent in response to GetAccepted. The sending peer responds with // a subset of container ids from the GetAccepted request that the sending peer // has accepted. message Accepted { - reserved 4; // Until Cortina upgrade is activated // Chain being responded from bytes chain_id = 1; // Request id of the original GetAccepted request @@ -312,7 +296,6 @@ message GetAncestors { // Ancestors contains a contiguous ancestry of containers for the requested // container in order of increasing block height. message Ancestors { - reserved 4; // Until Cortina upgrade is activated // Chain being responded from bytes chain_id = 1; // Request id of the original GetAncestors request @@ -325,6 +308,7 @@ message Ancestors { // // Remote peers should respond with a Put message if they have the container. message Get { + reserved 5; // Chain being requested from bytes chain_id = 1; // Unique identifier for this request @@ -333,8 +317,6 @@ message Get { uint64 deadline = 3; // Container being requested bytes container_id = 4; - // Consensus type to handle this message - EngineType engine_type = 5; } // Put is sent in response to Get with the requested block. @@ -345,14 +327,13 @@ message Put { uint32 request_id = 2; // Requested container bytes container = 3; - // Consensus type to handle this message - EngineType engine_type = 4; } // PushQuery requests the preferences of a remote peer given a container. // // Remote peers should respond to a PushQuery with a Chits message message PushQuery { + reserved 5; // Chain being requested from bytes chain_id = 1; // Unique identifier for this request @@ -361,8 +342,6 @@ message PushQuery { uint64 deadline = 3; // Container being gossiped bytes container = 4; - // Consensus type to handle this message - EngineType engine_type = 5; // Requesting peer's last accepted height uint64 requested_height = 6; } @@ -371,6 +350,7 @@ message PushQuery { // // Remote peers should respond to a PullQuery with a Chits message message PullQuery { + reserved 5; // Chain being requested from bytes chain_id = 1; // Unique identifier for this request @@ -379,8 +359,6 @@ message PullQuery { uint64 deadline = 3; // Container id being gossiped bytes container_id = 4; - // Consensus type to handle this message - EngineType engine_type = 5; // Requesting peer's last accepted height uint64 requested_height = 6; } diff --git a/proto/pb/aliasreader/aliasreader.pb.go b/proto/pb/aliasreader/aliasreader.pb.go index 20084292d030..4fbac30f6659 100644 --- a/proto/pb/aliasreader/aliasreader.pb.go +++ b/proto/pb/aliasreader/aliasreader.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: aliasreader/aliasreader.proto diff --git a/proto/pb/appsender/appsender.pb.go b/proto/pb/appsender/appsender.pb.go index 416faab9bce0..0513d08bb736 100644 --- a/proto/pb/appsender/appsender.pb.go +++ b/proto/pb/appsender/appsender.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: appsender/appsender.proto @@ -233,8 +233,13 @@ type SendAppGossipMsg struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Who to send this message to + NodeIds [][]byte `protobuf:"bytes,1,rep,name=node_ids,json=nodeIds,proto3" json:"node_ids,omitempty"` + Validators uint64 `protobuf:"varint,2,opt,name=validators,proto3" json:"validators,omitempty"` + NonValidators uint64 `protobuf:"varint,3,opt,name=non_validators,json=nonValidators,proto3" json:"non_validators,omitempty"` + Peers uint64 `protobuf:"varint,4,opt,name=peers,proto3" json:"peers,omitempty"` // The message body - Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` + Msg []byte `protobuf:"bytes,5,opt,name=msg,proto3" json:"msg,omitempty"` } func (x *SendAppGossipMsg) Reset() { @@ -269,64 +274,35 @@ func (*SendAppGossipMsg) Descriptor() ([]byte, []int) { return file_appsender_appsender_proto_rawDescGZIP(), []int{3} } -func (x *SendAppGossipMsg) GetMsg() []byte { +func (x *SendAppGossipMsg) GetNodeIds() [][]byte { if x != nil { - return x.Msg + return x.NodeIds } return nil } -type SendAppGossipSpecificMsg struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The nodes to send this request to - NodeIds [][]byte `protobuf:"bytes,1,rep,name=node_ids,json=nodeIds,proto3" json:"node_ids,omitempty"` - // The message body - Msg []byte `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` -} - -func (x *SendAppGossipSpecificMsg) Reset() { - *x = SendAppGossipSpecificMsg{} - if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SendAppGossipMsg) GetValidators() uint64 { + if x != nil { + return x.Validators } + return 0 } -func (x *SendAppGossipSpecificMsg) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SendAppGossipSpecificMsg) ProtoMessage() {} - -func (x *SendAppGossipSpecificMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SendAppGossipMsg) GetNonValidators() uint64 { + if x != nil { + return x.NonValidators } - return mi.MessageOf(x) -} - -// Deprecated: Use SendAppGossipSpecificMsg.ProtoReflect.Descriptor instead. -func (*SendAppGossipSpecificMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{4} + return 0 } -func (x *SendAppGossipSpecificMsg) GetNodeIds() [][]byte { +func (x *SendAppGossipMsg) GetPeers() uint64 { if x != nil { - return x.NodeIds + return x.Peers } - return nil + return 0 } -func (x *SendAppGossipSpecificMsg) GetMsg() []byte { +func (x *SendAppGossipMsg) GetMsg() []byte { if x != nil { return x.Msg } @@ -349,7 +325,7 @@ type SendCrossChainAppRequestMsg struct { func (x *SendCrossChainAppRequestMsg) Reset() { *x = SendCrossChainAppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[5] + mi := &file_appsender_appsender_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -362,7 +338,7 @@ func (x *SendCrossChainAppRequestMsg) String() string { func (*SendCrossChainAppRequestMsg) ProtoMessage() {} func (x *SendCrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[5] + mi := &file_appsender_appsender_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -375,7 +351,7 @@ func (x *SendCrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendCrossChainAppRequestMsg.ProtoReflect.Descriptor instead. func (*SendCrossChainAppRequestMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{5} + return file_appsender_appsender_proto_rawDescGZIP(), []int{4} } func (x *SendCrossChainAppRequestMsg) GetChainId() []byte { @@ -415,7 +391,7 @@ type SendCrossChainAppResponseMsg struct { func (x *SendCrossChainAppResponseMsg) Reset() { *x = SendCrossChainAppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[6] + mi := &file_appsender_appsender_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -428,7 +404,7 @@ func (x *SendCrossChainAppResponseMsg) String() string { func (*SendCrossChainAppResponseMsg) ProtoMessage() {} func (x *SendCrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[6] + mi := &file_appsender_appsender_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -441,7 +417,7 @@ func (x *SendCrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendCrossChainAppResponseMsg.ProtoReflect.Descriptor instead. func (*SendCrossChainAppResponseMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{6} + return file_appsender_appsender_proto_rawDescGZIP(), []int{5} } func (x *SendCrossChainAppResponseMsg) GetChainId() []byte { @@ -483,7 +459,7 @@ type SendCrossChainAppErrorMsg struct { func (x *SendCrossChainAppErrorMsg) Reset() { *x = SendCrossChainAppErrorMsg{} if protoimpl.UnsafeEnabled { - mi := &file_appsender_appsender_proto_msgTypes[7] + mi := &file_appsender_appsender_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -496,7 +472,7 @@ func (x *SendCrossChainAppErrorMsg) String() string { func (*SendCrossChainAppErrorMsg) ProtoMessage() {} func (x *SendCrossChainAppErrorMsg) ProtoReflect() protoreflect.Message { - mi := &file_appsender_appsender_proto_msgTypes[7] + mi := &file_appsender_appsender_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -509,7 +485,7 @@ func (x *SendCrossChainAppErrorMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use SendCrossChainAppErrorMsg.ProtoReflect.Descriptor instead. func (*SendCrossChainAppErrorMsg) Descriptor() ([]byte, []int) { - return file_appsender_appsender_proto_rawDescGZIP(), []int{7} + return file_appsender_appsender_proto_rawDescGZIP(), []int{6} } func (x *SendCrossChainAppErrorMsg) GetChainId() []byte { @@ -569,13 +545,16 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x24, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, - 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, - 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x47, 0x0a, 0x18, - 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, - 0x49, 0x64, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x9c, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x41, + 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x6e, + 0x6f, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x6f, 0x6e, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, + 0x6e, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x70, 0x65, + 0x65, 0x72, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x71, 0x0a, 0x1b, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, @@ -600,7 +579,7 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x8f, 0x05, 0x0a, 0x09, 0x41, + 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xb9, 0x04, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, @@ -619,33 +598,28 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x1b, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x54, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, - 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x12, 0x23, 0x2e, - 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, - 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x4d, - 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5a, 0x0a, 0x18, 0x53, 0x65, - 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5c, 0x0a, 0x19, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5a, 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, + 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x26, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, + 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x12, 0x5c, 0x0a, 0x19, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x2e, + 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x27, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, - 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x56, 0x0a, 0x16, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, - 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x24, - 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x34, 0x5a, 0x32, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, - 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x56, + 0x0a, 0x16, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x24, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, + 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x2f, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -660,37 +634,34 @@ func file_appsender_appsender_proto_rawDescGZIP() []byte { return file_appsender_appsender_proto_rawDescData } -var file_appsender_appsender_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_appsender_appsender_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_appsender_appsender_proto_goTypes = []interface{}{ (*SendAppRequestMsg)(nil), // 0: appsender.SendAppRequestMsg (*SendAppResponseMsg)(nil), // 1: appsender.SendAppResponseMsg (*SendAppErrorMsg)(nil), // 2: appsender.SendAppErrorMsg (*SendAppGossipMsg)(nil), // 3: appsender.SendAppGossipMsg - (*SendAppGossipSpecificMsg)(nil), // 4: appsender.SendAppGossipSpecificMsg - (*SendCrossChainAppRequestMsg)(nil), // 5: appsender.SendCrossChainAppRequestMsg - (*SendCrossChainAppResponseMsg)(nil), // 6: appsender.SendCrossChainAppResponseMsg - (*SendCrossChainAppErrorMsg)(nil), // 7: appsender.SendCrossChainAppErrorMsg - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty + (*SendCrossChainAppRequestMsg)(nil), // 4: appsender.SendCrossChainAppRequestMsg + (*SendCrossChainAppResponseMsg)(nil), // 5: appsender.SendCrossChainAppResponseMsg + (*SendCrossChainAppErrorMsg)(nil), // 6: appsender.SendCrossChainAppErrorMsg + (*emptypb.Empty)(nil), // 7: google.protobuf.Empty } var file_appsender_appsender_proto_depIdxs = []int32{ 0, // 0: appsender.AppSender.SendAppRequest:input_type -> appsender.SendAppRequestMsg 1, // 1: appsender.AppSender.SendAppResponse:input_type -> appsender.SendAppResponseMsg 2, // 2: appsender.AppSender.SendAppError:input_type -> appsender.SendAppErrorMsg 3, // 3: appsender.AppSender.SendAppGossip:input_type -> appsender.SendAppGossipMsg - 4, // 4: appsender.AppSender.SendAppGossipSpecific:input_type -> appsender.SendAppGossipSpecificMsg - 5, // 5: appsender.AppSender.SendCrossChainAppRequest:input_type -> appsender.SendCrossChainAppRequestMsg - 6, // 6: appsender.AppSender.SendCrossChainAppResponse:input_type -> appsender.SendCrossChainAppResponseMsg - 7, // 7: appsender.AppSender.SendCrossChainAppError:input_type -> appsender.SendCrossChainAppErrorMsg - 8, // 8: appsender.AppSender.SendAppRequest:output_type -> google.protobuf.Empty - 8, // 9: appsender.AppSender.SendAppResponse:output_type -> google.protobuf.Empty - 8, // 10: appsender.AppSender.SendAppError:output_type -> google.protobuf.Empty - 8, // 11: appsender.AppSender.SendAppGossip:output_type -> google.protobuf.Empty - 8, // 12: appsender.AppSender.SendAppGossipSpecific:output_type -> google.protobuf.Empty - 8, // 13: appsender.AppSender.SendCrossChainAppRequest:output_type -> google.protobuf.Empty - 8, // 14: appsender.AppSender.SendCrossChainAppResponse:output_type -> google.protobuf.Empty - 8, // 15: appsender.AppSender.SendCrossChainAppError:output_type -> google.protobuf.Empty - 8, // [8:16] is the sub-list for method output_type - 0, // [0:8] is the sub-list for method input_type + 4, // 4: appsender.AppSender.SendCrossChainAppRequest:input_type -> appsender.SendCrossChainAppRequestMsg + 5, // 5: appsender.AppSender.SendCrossChainAppResponse:input_type -> appsender.SendCrossChainAppResponseMsg + 6, // 6: appsender.AppSender.SendCrossChainAppError:input_type -> appsender.SendCrossChainAppErrorMsg + 7, // 7: appsender.AppSender.SendAppRequest:output_type -> google.protobuf.Empty + 7, // 8: appsender.AppSender.SendAppResponse:output_type -> google.protobuf.Empty + 7, // 9: appsender.AppSender.SendAppError:output_type -> google.protobuf.Empty + 7, // 10: appsender.AppSender.SendAppGossip:output_type -> google.protobuf.Empty + 7, // 11: appsender.AppSender.SendCrossChainAppRequest:output_type -> google.protobuf.Empty + 7, // 12: appsender.AppSender.SendCrossChainAppResponse:output_type -> google.protobuf.Empty + 7, // 13: appsender.AppSender.SendCrossChainAppError:output_type -> google.protobuf.Empty + 7, // [7:14] is the sub-list for method output_type + 0, // [0:7] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -751,18 +722,6 @@ func file_appsender_appsender_proto_init() { } } file_appsender_appsender_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SendAppGossipSpecificMsg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_appsender_appsender_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SendCrossChainAppRequestMsg); i { case 0: return &v.state @@ -774,7 +733,7 @@ func file_appsender_appsender_proto_init() { return nil } } - file_appsender_appsender_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_appsender_appsender_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SendCrossChainAppResponseMsg); i { case 0: return &v.state @@ -786,7 +745,7 @@ func file_appsender_appsender_proto_init() { return nil } } - file_appsender_appsender_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_appsender_appsender_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SendCrossChainAppErrorMsg); i { case 0: return &v.state @@ -805,7 +764,7 @@ func file_appsender_appsender_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_appsender_appsender_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/pb/appsender/appsender_grpc.pb.go b/proto/pb/appsender/appsender_grpc.pb.go index 6873c7768fb1..2bd4c9cf664f 100644 --- a/proto/pb/appsender/appsender_grpc.pb.go +++ b/proto/pb/appsender/appsender_grpc.pb.go @@ -24,7 +24,6 @@ const ( AppSender_SendAppResponse_FullMethodName = "/appsender.AppSender/SendAppResponse" AppSender_SendAppError_FullMethodName = "/appsender.AppSender/SendAppError" AppSender_SendAppGossip_FullMethodName = "/appsender.AppSender/SendAppGossip" - AppSender_SendAppGossipSpecific_FullMethodName = "/appsender.AppSender/SendAppGossipSpecific" AppSender_SendCrossChainAppRequest_FullMethodName = "/appsender.AppSender/SendCrossChainAppRequest" AppSender_SendCrossChainAppResponse_FullMethodName = "/appsender.AppSender/SendCrossChainAppResponse" AppSender_SendCrossChainAppError_FullMethodName = "/appsender.AppSender/SendCrossChainAppError" @@ -38,7 +37,6 @@ type AppSenderClient interface { SendAppResponse(ctx context.Context, in *SendAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppError(ctx context.Context, in *SendAppErrorMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppGossip(ctx context.Context, in *SendAppGossipMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) - SendAppGossipSpecific(ctx context.Context, in *SendAppGossipSpecificMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendCrossChainAppRequest(ctx context.Context, in *SendCrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendCrossChainAppResponse(ctx context.Context, in *SendCrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendCrossChainAppError(ctx context.Context, in *SendCrossChainAppErrorMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) @@ -88,15 +86,6 @@ func (c *appSenderClient) SendAppGossip(ctx context.Context, in *SendAppGossipMs return out, nil } -func (c *appSenderClient) SendAppGossipSpecific(ctx context.Context, in *SendAppGossipSpecificMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, AppSender_SendAppGossipSpecific_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *appSenderClient) SendCrossChainAppRequest(ctx context.Context, in *SendCrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) err := c.cc.Invoke(ctx, AppSender_SendCrossChainAppRequest_FullMethodName, in, out, opts...) @@ -132,7 +121,6 @@ type AppSenderServer interface { SendAppResponse(context.Context, *SendAppResponseMsg) (*emptypb.Empty, error) SendAppError(context.Context, *SendAppErrorMsg) (*emptypb.Empty, error) SendAppGossip(context.Context, *SendAppGossipMsg) (*emptypb.Empty, error) - SendAppGossipSpecific(context.Context, *SendAppGossipSpecificMsg) (*emptypb.Empty, error) SendCrossChainAppRequest(context.Context, *SendCrossChainAppRequestMsg) (*emptypb.Empty, error) SendCrossChainAppResponse(context.Context, *SendCrossChainAppResponseMsg) (*emptypb.Empty, error) SendCrossChainAppError(context.Context, *SendCrossChainAppErrorMsg) (*emptypb.Empty, error) @@ -155,9 +143,6 @@ func (UnimplementedAppSenderServer) SendAppError(context.Context, *SendAppErrorM func (UnimplementedAppSenderServer) SendAppGossip(context.Context, *SendAppGossipMsg) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendAppGossip not implemented") } -func (UnimplementedAppSenderServer) SendAppGossipSpecific(context.Context, *SendAppGossipSpecificMsg) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method SendAppGossipSpecific not implemented") -} func (UnimplementedAppSenderServer) SendCrossChainAppRequest(context.Context, *SendCrossChainAppRequestMsg) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendCrossChainAppRequest not implemented") } @@ -252,24 +237,6 @@ func _AppSender_SendAppGossip_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } -func _AppSender_SendAppGossipSpecific_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SendAppGossipSpecificMsg) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AppSenderServer).SendAppGossipSpecific(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: AppSender_SendAppGossipSpecific_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AppSenderServer).SendAppGossipSpecific(ctx, req.(*SendAppGossipSpecificMsg)) - } - return interceptor(ctx, in, info, handler) -} - func _AppSender_SendCrossChainAppRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SendCrossChainAppRequestMsg) if err := dec(in); err != nil { @@ -347,10 +314,6 @@ var AppSender_ServiceDesc = grpc.ServiceDesc{ MethodName: "SendAppGossip", Handler: _AppSender_SendAppGossip_Handler, }, - { - MethodName: "SendAppGossipSpecific", - Handler: _AppSender_SendAppGossipSpecific_Handler, - }, { MethodName: "SendCrossChainAppRequest", Handler: _AppSender_SendCrossChainAppRequest_Handler, diff --git a/proto/pb/http/http.pb.go b/proto/pb/http/http.pb.go index 76b6d916d9bb..528eae338fe6 100644 --- a/proto/pb/http/http.pb.go +++ b/proto/pb/http/http.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: http/http.proto @@ -152,7 +152,7 @@ type Userinfo struct { Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` // password is the password for the user Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - // password_set is a boolean which is true if the passord is set + // password_set is a boolean which is true if the password is set PasswordSet bool `protobuf:"varint,3,opt,name=password_set,json=passwordSet,proto3" json:"password_set,omitempty"` } @@ -216,7 +216,7 @@ type Element struct { // key is a element key in a key value pair Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // values are a list of strings coresponding to the key + // values are a list of strings corresponding to the key Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` } diff --git a/proto/pb/http/responsewriter/responsewriter.pb.go b/proto/pb/http/responsewriter/responsewriter.pb.go index a49113f3bb5a..008ef7ad006a 100644 --- a/proto/pb/http/responsewriter/responsewriter.pb.go +++ b/proto/pb/http/responsewriter/responsewriter.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: http/responsewriter/responsewriter.proto diff --git a/proto/pb/io/reader/reader.pb.go b/proto/pb/io/reader/reader.pb.go index 34977aa39422..110fc4719380 100644 --- a/proto/pb/io/reader/reader.pb.go +++ b/proto/pb/io/reader/reader.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: io/reader/reader.proto diff --git a/proto/pb/io/writer/writer.pb.go b/proto/pb/io/writer/writer.pb.go index 850afaa8c2cf..80984dbcfbd7 100644 --- a/proto/pb/io/writer/writer.pb.go +++ b/proto/pb/io/writer/writer.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: io/writer/writer.proto diff --git a/proto/pb/keystore/keystore.pb.go b/proto/pb/keystore/keystore.pb.go index ed9d3813f6c3..97620355c820 100644 --- a/proto/pb/keystore/keystore.pb.go +++ b/proto/pb/keystore/keystore.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: keystore/keystore.proto diff --git a/proto/pb/message/tx.pb.go b/proto/pb/message/tx.pb.go deleted file mode 100644 index 4320fdc0d12b..000000000000 --- a/proto/pb/message/tx.pb.go +++ /dev/null @@ -1,232 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc (unknown) -// source: message/tx.proto - -package message - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Message struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Message: - // - // *Message_Tx - Message isMessage_Message `protobuf_oneof:"message"` -} - -func (x *Message) Reset() { - *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_message_tx_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Message) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Message) ProtoMessage() {} - -func (x *Message) ProtoReflect() protoreflect.Message { - mi := &file_message_tx_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Message.ProtoReflect.Descriptor instead. -func (*Message) Descriptor() ([]byte, []int) { - return file_message_tx_proto_rawDescGZIP(), []int{0} -} - -func (m *Message) GetMessage() isMessage_Message { - if m != nil { - return m.Message - } - return nil -} - -func (x *Message) GetTx() *Tx { - if x, ok := x.GetMessage().(*Message_Tx); ok { - return x.Tx - } - return nil -} - -type isMessage_Message interface { - isMessage_Message() -} - -type Message_Tx struct { - Tx *Tx `protobuf:"bytes,1,opt,name=tx,proto3,oneof"` -} - -func (*Message_Tx) isMessage_Message() {} - -type Tx struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The byte representation of this transaction. - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (x *Tx) Reset() { - *x = Tx{} - if protoimpl.UnsafeEnabled { - mi := &file_message_tx_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tx) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tx) ProtoMessage() {} - -func (x *Tx) ProtoReflect() protoreflect.Message { - mi := &file_message_tx_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tx.ProtoReflect.Descriptor instead. -func (*Tx) Descriptor() ([]byte, []int) { - return file_message_tx_proto_rawDescGZIP(), []int{1} -} - -func (x *Tx) GetTx() []byte { - if x != nil { - return x.Tx - } - return nil -} - -var File_message_tx_proto protoreflect.FileDescriptor - -var file_message_tx_proto_rawDesc = []byte{ - 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2f, 0x74, 0x78, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x33, 0x0a, 0x07, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x78, 0x48, - 0x00, 0x52, 0x02, 0x74, 0x78, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x14, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x74, 0x78, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_message_tx_proto_rawDescOnce sync.Once - file_message_tx_proto_rawDescData = file_message_tx_proto_rawDesc -) - -func file_message_tx_proto_rawDescGZIP() []byte { - file_message_tx_proto_rawDescOnce.Do(func() { - file_message_tx_proto_rawDescData = protoimpl.X.CompressGZIP(file_message_tx_proto_rawDescData) - }) - return file_message_tx_proto_rawDescData -} - -var file_message_tx_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_message_tx_proto_goTypes = []interface{}{ - (*Message)(nil), // 0: message.Message - (*Tx)(nil), // 1: message.Tx -} -var file_message_tx_proto_depIdxs = []int32{ - 1, // 0: message.Message.tx:type_name -> message.Tx - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_message_tx_proto_init() } -func file_message_tx_proto_init() { - if File_message_tx_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_message_tx_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_message_tx_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tx); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_message_tx_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Message_Tx)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_message_tx_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_message_tx_proto_goTypes, - DependencyIndexes: file_message_tx_proto_depIdxs, - MessageInfos: file_message_tx_proto_msgTypes, - }.Build() - File_message_tx_proto = out.File - file_message_tx_proto_rawDesc = nil - file_message_tx_proto_goTypes = nil - file_message_tx_proto_depIdxs = nil -} diff --git a/proto/pb/messenger/messenger.pb.go b/proto/pb/messenger/messenger.pb.go index 830de252a04e..f1aabe97d788 100644 --- a/proto/pb/messenger/messenger.pb.go +++ b/proto/pb/messenger/messenger.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: messenger/messenger.proto diff --git a/proto/pb/net/conn/conn.pb.go b/proto/pb/net/conn/conn.pb.go index 8882e8de45ff..53ce91a4cfef 100644 --- a/proto/pb/net/conn/conn.pb.go +++ b/proto/pb/net/conn/conn.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: net/conn/conn.proto diff --git a/proto/pb/p2p/p2p.pb.go b/proto/pb/p2p/p2p.pb.go index 18ef744e2c9a..bd55ad703546 100644 --- a/proto/pb/p2p/p2p.pb.go +++ b/proto/pb/p2p/p2p.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: p2p/p2p.proto @@ -84,7 +84,6 @@ type Message struct { // // Types that are assignable to Message: // - // *Message_CompressedGzip // *Message_CompressedZstd // *Message_Ping // *Message_Pong @@ -152,13 +151,6 @@ func (m *Message) GetMessage() isMessage_Message { return nil } -func (x *Message) GetCompressedGzip() []byte { - if x, ok := x.GetMessage().(*Message_CompressedGzip); ok { - return x.CompressedGzip - } - return nil -} - func (x *Message) GetCompressedZstd() []byte { if x, ok := x.GetMessage().(*Message_CompressedZstd); ok { return x.CompressedZstd @@ -338,13 +330,6 @@ type isMessage_Message interface { isMessage_Message() } -type Message_CompressedGzip struct { - // Gzip-compressed bytes of a "p2p.Message" whose "oneof" "message" field is - // NOT compressed_* BUT one of the message types (e.g. ping, pong, etc.). - // This field is only set if the message type supports compression. - CompressedGzip []byte `protobuf:"bytes,1,opt,name=compressed_gzip,json=compressedGzip,proto3,oneof"` -} - type Message_CompressedZstd struct { // zstd-compressed bytes of a "p2p.Message" whose "oneof" "message" field is // NOT compressed_* BUT one of the message types (e.g. ping, pong, etc.). @@ -453,8 +438,6 @@ type Message_AppError struct { AppError *AppError `protobuf:"bytes,34,opt,name=app_error,json=appError,proto3,oneof"` } -func (*Message_CompressedGzip) isMessage_Message() {} - func (*Message_CompressedZstd) isMessage_Message() {} func (*Message_Ping) isMessage_Message() {} @@ -623,19 +606,11 @@ func (x *SubnetUptime) GetUptime() uint32 { return 0 } -// Pong is sent in response to a Ping with the perceived uptime of the -// peer. +// Pong is sent in response to a Ping. type Pong struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Deprecated: uptime is now sent in Ping - // Uptime percentage on the primary network [0, 100] - Uptime uint32 `protobuf:"varint,1,opt,name=uptime,proto3" json:"uptime,omitempty"` - // Deprecated: uptime is now sent in Ping - // Uptime percentage on subnets - SubnetUptimes []*SubnetUptime `protobuf:"bytes,2,rep,name=subnet_uptimes,json=subnetUptimes,proto3" json:"subnet_uptimes,omitempty"` } func (x *Pong) Reset() { @@ -670,20 +645,6 @@ func (*Pong) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{3} } -func (x *Pong) GetUptime() uint32 { - if x != nil { - return x.Uptime - } - return 0 -} - -func (x *Pong) GetSubnetUptimes() []*SubnetUptime { - if x != nil { - return x.SubnetUptimes - } - return nil -} - // Handshake is the first outbound message sent to a peer when a connection is // established to start the p2p handshake. // @@ -704,8 +665,6 @@ type Handshake struct { IpAddr []byte `protobuf:"bytes,3,opt,name=ip_addr,json=ipAddr,proto3" json:"ip_addr,omitempty"` // IP port of the peer IpPort uint32 `protobuf:"varint,4,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` - // Avalanche client version - MyVersion string `protobuf:"bytes,5,opt,name=my_version,json=myVersion,proto3" json:"my_version,omitempty"` // Timestamp of the IP IpSigningTime uint64 `protobuf:"varint,6,opt,name=ip_signing_time,json=ipSigningTime,proto3" json:"ip_signing_time,omitempty"` // Signature of the peer IP port pair at a provided timestamp with the TLS @@ -782,13 +741,6 @@ func (x *Handshake) GetIpPort() uint32 { return 0 } -func (x *Handshake) GetMyVersion() string { - if x != nil { - return x.MyVersion - } - return "" -} - func (x *Handshake) GetIpSigningTime() uint64 { if x != nil { return x.IpSigningTime @@ -1471,8 +1423,6 @@ type GetAcceptedFrontier struct { RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` // Timeout (ns) for this request Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - // Consensus type the remote peer should use to handle this message - EngineType EngineType `protobuf:"varint,4,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAcceptedFrontier) Reset() { @@ -1528,13 +1478,6 @@ func (x *GetAcceptedFrontier) GetDeadline() uint64 { return 0 } -func (x *GetAcceptedFrontier) GetEngineType() EngineType { - if x != nil { - return x.EngineType - } - return EngineType_ENGINE_TYPE_UNSPECIFIED -} - // AcceptedFrontier contains the remote peer's last accepted frontier. // // AcceptedFrontier is sent in response to GetAcceptedFrontier. @@ -1621,8 +1564,6 @@ type GetAccepted struct { Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` // The sender's accepted frontier ContainerIds [][]byte `protobuf:"bytes,4,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` - // Consensus type to handle this message - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAccepted) Reset() { @@ -1685,13 +1626,6 @@ func (x *GetAccepted) GetContainerIds() [][]byte { return nil } -func (x *GetAccepted) GetEngineType() EngineType { - if x != nil { - return x.EngineType - } - return EngineType_ENGINE_TYPE_UNSPECIFIED -} - // Accepted is sent in response to GetAccepted. The sending peer responds with // a subset of container ids from the GetAccepted request that the sending peer // has accepted. @@ -1935,8 +1869,6 @@ type Get struct { Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` // Container being requested ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // Consensus type to handle this message - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *Get) Reset() { @@ -1999,13 +1931,6 @@ func (x *Get) GetContainerId() []byte { return nil } -func (x *Get) GetEngineType() EngineType { - if x != nil { - return x.EngineType - } - return EngineType_ENGINE_TYPE_UNSPECIFIED -} - // Put is sent in response to Get with the requested block. type Put struct { state protoimpl.MessageState @@ -2018,8 +1943,6 @@ type Put struct { RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` // Requested container Container []byte `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"` - // Consensus type to handle this message - EngineType EngineType `protobuf:"varint,4,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *Put) Reset() { @@ -2075,13 +1998,6 @@ func (x *Put) GetContainer() []byte { return nil } -func (x *Put) GetEngineType() EngineType { - if x != nil { - return x.EngineType - } - return EngineType_ENGINE_TYPE_UNSPECIFIED -} - // PushQuery requests the preferences of a remote peer given a container. // // Remote peers should respond to a PushQuery with a Chits message @@ -2098,8 +2014,6 @@ type PushQuery struct { Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` // Container being gossiped Container []byte `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"` - // Consensus type to handle this message - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` // Requesting peer's last accepted height RequestedHeight uint64 `protobuf:"varint,6,opt,name=requested_height,json=requestedHeight,proto3" json:"requested_height,omitempty"` } @@ -2164,13 +2078,6 @@ func (x *PushQuery) GetContainer() []byte { return nil } -func (x *PushQuery) GetEngineType() EngineType { - if x != nil { - return x.EngineType - } - return EngineType_ENGINE_TYPE_UNSPECIFIED -} - func (x *PushQuery) GetRequestedHeight() uint64 { if x != nil { return x.RequestedHeight @@ -2194,8 +2101,6 @@ type PullQuery struct { Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` // Container id being gossiped ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - // Consensus type to handle this message - EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` // Requesting peer's last accepted height RequestedHeight uint64 `protobuf:"varint,6,opt,name=requested_height,json=requestedHeight,proto3" json:"requested_height,omitempty"` } @@ -2260,13 +2165,6 @@ func (x *PullQuery) GetContainerId() []byte { return nil } -func (x *PullQuery) GetEngineType() EngineType { - if x != nil { - return x.EngineType - } - return EngineType_ENGINE_TYPE_UNSPECIFIED -} - func (x *PullQuery) GetRequestedHeight() uint64 { if x != nil { return x.RequestedHeight @@ -2644,122 +2542,113 @@ var File_p2p_p2p_proto protoreflect.FileDescriptor var file_p2p_p2p_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x32, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x70, 0x32, 0x70, 0x22, 0x9e, 0x0b, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x29, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x67, - 0x7a, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x47, 0x7a, 0x69, 0x70, 0x12, 0x29, 0x0a, 0x0f, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x7a, 0x73, 0x74, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x64, 0x5a, 0x73, 0x74, 0x64, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, - 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x6f, 0x6e, 0x67, - 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x09, 0x68, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x48, 0x00, 0x52, 0x09, 0x68, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x36, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, - 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, - 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x03, 0x70, 0x32, 0x70, 0x22, 0xf3, 0x0a, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x29, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x7a, + 0x73, 0x74, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5a, 0x73, 0x74, 0x64, 0x12, 0x1f, 0x0a, 0x04, 0x70, + 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x04, + 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x50, 0x6f, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x2e, 0x0a, + 0x09, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x48, 0x00, 0x52, 0x09, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x36, 0x0a, + 0x0d, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x23, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, + 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x50, 0x65, 0x65, + 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, + 0x73, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, + 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x16, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, - 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, + 0x12, 0x51, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, + 0x69, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, + 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a, 0x16, 0x61, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, - 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4e, - 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x44, - 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, - 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0b, - 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x08, 0x61, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x08, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x6e, 0x63, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, - 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x2f, - 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, - 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, 0x48, 0x00, 0x52, 0x05, 0x63, - 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x70, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x20, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, - 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x12, 0x2c, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x22, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x48, 0x00, 0x52, 0x08, 0x61, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x09, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4a, 0x04, 0x08, 0x21, 0x10, 0x22, 0x4a, - 0x04, 0x08, 0x24, 0x10, 0x25, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, - 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, - 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, - 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, - 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, - 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, - 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, - 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0xcc, - 0x03, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, - 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, - 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, - 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x70, 0x5f, 0x73, 0x69, 0x67, 0x6e, + 0x12, 0x51, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x12, 0x4e, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, + 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, + 0x69, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, + 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, + 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x12, 0x2b, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x48, 0x00, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, + 0x0d, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, + 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, + 0x70, 0x75, 0x74, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, + 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, + 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, + 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, + 0x00, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, + 0x0c, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, + 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x2c, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, + 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x08, 0x61, 0x70, 0x70, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x24, 0x10, 0x25, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x69, + 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, + 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, + 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x12, 0x0a, 0x04, 0x50, 0x6f, 0x6e, + 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xb3, 0x03, + 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x69, 0x70, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x69, 0x70, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0e, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x18, @@ -2778,157 +2667,142 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x62, 0x6c, 0x73, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x69, 0x70, 0x42, 0x6c, 0x73, 0x53, 0x69, 0x67, 0x22, 0x5e, 0x0a, - 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, - 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, - 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x22, 0x39, 0x0a, - 0x0b, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, - 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, - 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, - 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x40, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, - 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, - 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, - 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, - 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, - 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, - 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, - 0x6f, 0x72, 0x74, 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x69, 0x70, 0x42, 0x6c, 0x73, 0x53, 0x69, 0x67, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x5e, 0x0a, 0x06, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x22, 0x39, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, + 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x22, 0xbd, + 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, + 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, + 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x22, 0x40, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x31, 0x0a, + 0x0b, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x50, 0x65, 0x65, 0x72, 0x73, + 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, + 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, + 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x63, 0x6c, 0x61, 0x69, + 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x6f, 0x0a, 0x17, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, 0x14, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, + 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x71, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x6f, 0x0a, 0x10, 0x41, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, - 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, - 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, - 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, - 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x22, 0x75, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, - 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8e, 0x01, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x69, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x73, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, - 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x22, 0x6b, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, - 0x01, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x22, 0x65, 0x0a, 0x09, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, + 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x03, 0x47, 0x65, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x22, 0xe1, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x22, 0x5d, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, + 0xb0, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x22, 0xb5, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, @@ -2936,62 +2810,59 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, - 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, 0x73, - 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, - 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, - 0x0b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x33, - 0x0a, 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x5f, 0x61, - 0x74, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, - 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, 0x74, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x08, 0x41, - 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xba, 0x01, 0x0a, 0x05, 0x43, + 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, + 0x69, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x13, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x49, 0x64, 0x41, + 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, + 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, - 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1b, 0x0a, - 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, 0x0a, 0x0a, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x4e, 0x47, 0x49, - 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x48, 0x45, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, - 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x88, + 0x01, 0x0a, 0x08, 0x41, 0x70, 0x70, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x11, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, + 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, + 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, + 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, + 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, + 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, + 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3066,23 +2937,16 @@ var file_p2p_p2p_proto_depIdxs = []int32{ 29, // 22: p2p.Message.app_gossip:type_name -> p2p.AppGossip 28, // 23: p2p.Message.app_error:type_name -> p2p.AppError 3, // 24: p2p.Ping.subnet_uptimes:type_name -> p2p.SubnetUptime - 3, // 25: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime - 6, // 26: p2p.Handshake.client:type_name -> p2p.Client - 7, // 27: p2p.Handshake.known_peers:type_name -> p2p.BloomFilter - 7, // 28: p2p.GetPeerList.known_peers:type_name -> p2p.BloomFilter - 8, // 29: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort - 0, // 30: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType - 0, // 31: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType - 0, // 32: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType - 0, // 33: p2p.Get.engine_type:type_name -> p2p.EngineType - 0, // 34: p2p.Put.engine_type:type_name -> p2p.EngineType - 0, // 35: p2p.PushQuery.engine_type:type_name -> p2p.EngineType - 0, // 36: p2p.PullQuery.engine_type:type_name -> p2p.EngineType - 37, // [37:37] is the sub-list for method output_type - 37, // [37:37] is the sub-list for method input_type - 37, // [37:37] is the sub-list for extension type_name - 37, // [37:37] is the sub-list for extension extendee - 0, // [0:37] is the sub-list for field type_name + 6, // 25: p2p.Handshake.client:type_name -> p2p.Client + 7, // 26: p2p.Handshake.known_peers:type_name -> p2p.BloomFilter + 7, // 27: p2p.GetPeerList.known_peers:type_name -> p2p.BloomFilter + 8, // 28: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort + 0, // 29: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_p2p_p2p_proto_init() } @@ -3441,7 +3305,6 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Message_CompressedGzip)(nil), (*Message_CompressedZstd)(nil), (*Message_Ping)(nil), (*Message_Pong)(nil), diff --git a/proto/pb/rpcdb/rpcdb.pb.go b/proto/pb/rpcdb/rpcdb.pb.go index 246732f1bba3..5849d778bd44 100644 --- a/proto/pb/rpcdb/rpcdb.pb.go +++ b/proto/pb/rpcdb/rpcdb.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: rpcdb/rpcdb.proto diff --git a/proto/pb/sdk/sdk.pb.go b/proto/pb/sdk/sdk.pb.go index b90c23450270..d6f458984a80 100644 --- a/proto/pb/sdk/sdk.pb.go +++ b/proto/pb/sdk/sdk.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: sdk/sdk.proto @@ -173,20 +173,20 @@ var File_sdk_sdk_proto protoreflect.FileDescriptor var file_sdk_sdk_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x73, 0x64, 0x6b, 0x2f, 0x73, 0x64, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x73, 0x64, 0x6b, 0x22, 0x45, 0x0a, 0x11, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, + 0x03, 0x73, 0x64, 0x6b, 0x22, 0x3f, 0x0a, 0x11, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x73, 0x61, 0x6c, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x2c, 0x0a, 0x12, 0x50, - 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x22, 0x24, 0x0a, 0x0a, 0x50, 0x75, 0x73, - 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, - 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x42, - 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, - 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, - 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x64, 0x6b, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x50, 0x75, 0x6c, 0x6c, 0x47, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x67, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x22, 0x24, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x47, 0x6f, 0x73, 0x73, 0x69, + 0x70, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x06, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, + 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x64, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/proto/pb/sharedmemory/sharedmemory.pb.go b/proto/pb/sharedmemory/sharedmemory.pb.go index 5de7c788691c..3708c3fb924c 100644 --- a/proto/pb/sharedmemory/sharedmemory.pb.go +++ b/proto/pb/sharedmemory/sharedmemory.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: sharedmemory/sharedmemory.proto diff --git a/proto/pb/sync/sync.pb.go b/proto/pb/sync/sync.pb.go index eb72e145420a..3a80cc22830b 100644 --- a/proto/pb/sync/sync.pb.go +++ b/proto/pb/sync/sync.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: sync/sync.proto diff --git a/proto/pb/validatorstate/validator_state.pb.go b/proto/pb/validatorstate/validator_state.pb.go index 84ddc0ab3100..591087a08342 100644 --- a/proto/pb/validatorstate/validator_state.pb.go +++ b/proto/pb/validatorstate/validator_state.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: validatorstate/validator_state.proto diff --git a/proto/pb/vm/runtime/runtime.pb.go b/proto/pb/vm/runtime/runtime.pb.go index 2a5f15dbfed3..5db5a490a359 100644 --- a/proto/pb/vm/runtime/runtime.pb.go +++ b/proto/pb/vm/runtime/runtime.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: vm/runtime/runtime.proto diff --git a/proto/pb/vm/vm.pb.go b/proto/pb/vm/vm.pb.go index 7f38e5bbf4a7..356f34d061ac 100644 --- a/proto/pb/vm/vm.pb.go +++ b/proto/pb/vm/vm.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: vm/vm.proto @@ -134,8 +134,7 @@ const ( Error_ERROR_UNSPECIFIED Error = 0 Error_ERROR_CLOSED Error = 1 Error_ERROR_NOT_FOUND Error = 2 - Error_ERROR_HEIGHT_INDEX_INCOMPLETE Error = 3 - Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED Error = 4 + Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED Error = 3 ) // Enum value maps for Error. @@ -144,15 +143,13 @@ var ( 0: "ERROR_UNSPECIFIED", 1: "ERROR_CLOSED", 2: "ERROR_NOT_FOUND", - 3: "ERROR_HEIGHT_INDEX_INCOMPLETE", - 4: "ERROR_STATE_SYNC_NOT_IMPLEMENTED", + 3: "ERROR_STATE_SYNC_NOT_IMPLEMENTED", } Error_value = map[string]int32{ "ERROR_UNSPECIFIED": 0, "ERROR_CLOSED": 1, "ERROR_NOT_FOUND": 2, - "ERROR_HEIGHT_INDEX_INCOMPLETE": 3, - "ERROR_STATE_SYNC_NOT_IMPLEMENTED": 4, + "ERROR_STATE_SYNC_NOT_IMPLEMENTED": 3, } ) @@ -232,7 +229,7 @@ func (x StateSummaryAcceptResponse_Mode) Number() protoreflect.EnumNumber { // Deprecated: Use StateSummaryAcceptResponse_Mode.Descriptor instead. func (StateSummaryAcceptResponse_Mode) EnumDescriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{44, 0} + return file_vm_vm_proto_rawDescGZIP(), []int{43, 0} } type InitializeRequest struct { @@ -2280,53 +2277,6 @@ func (x *BatchedParseBlockResponse) GetResponse() []*ParseBlockResponse { return nil } -type VerifyHeightIndexResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` -} - -func (x *VerifyHeightIndexResponse) Reset() { - *x = VerifyHeightIndexResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VerifyHeightIndexResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VerifyHeightIndexResponse) ProtoMessage() {} - -func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VerifyHeightIndexResponse.ProtoReflect.Descriptor instead. -func (*VerifyHeightIndexResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{32} -} - -func (x *VerifyHeightIndexResponse) GetErr() Error { - if x != nil { - return x.Err - } - return Error_ERROR_UNSPECIFIED -} - type GetBlockIDAtHeightRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2338,7 +2288,7 @@ type GetBlockIDAtHeightRequest struct { func (x *GetBlockIDAtHeightRequest) Reset() { *x = GetBlockIDAtHeightRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2351,7 +2301,7 @@ func (x *GetBlockIDAtHeightRequest) String() string { func (*GetBlockIDAtHeightRequest) ProtoMessage() {} func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2364,7 +2314,7 @@ func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightRequest.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{33} + return file_vm_vm_proto_rawDescGZIP(), []int{32} } func (x *GetBlockIDAtHeightRequest) GetHeight() uint64 { @@ -2386,7 +2336,7 @@ type GetBlockIDAtHeightResponse struct { func (x *GetBlockIDAtHeightResponse) Reset() { *x = GetBlockIDAtHeightResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2399,7 +2349,7 @@ func (x *GetBlockIDAtHeightResponse) String() string { func (*GetBlockIDAtHeightResponse) ProtoMessage() {} func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2412,7 +2362,7 @@ func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightResponse.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{34} + return file_vm_vm_proto_rawDescGZIP(), []int{33} } func (x *GetBlockIDAtHeightResponse) GetBlkId() []byte { @@ -2440,7 +2390,7 @@ type GatherResponse struct { func (x *GatherResponse) Reset() { *x = GatherResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2453,7 +2403,7 @@ func (x *GatherResponse) String() string { func (*GatherResponse) ProtoMessage() {} func (x *GatherResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2466,7 +2416,7 @@ func (x *GatherResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GatherResponse.ProtoReflect.Descriptor instead. func (*GatherResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{35} + return file_vm_vm_proto_rawDescGZIP(), []int{34} } func (x *GatherResponse) GetMetricFamilies() []*_go.MetricFamily { @@ -2488,7 +2438,7 @@ type StateSyncEnabledResponse struct { func (x *StateSyncEnabledResponse) Reset() { *x = StateSyncEnabledResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2501,7 +2451,7 @@ func (x *StateSyncEnabledResponse) String() string { func (*StateSyncEnabledResponse) ProtoMessage() {} func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2514,7 +2464,7 @@ func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSyncEnabledResponse.ProtoReflect.Descriptor instead. func (*StateSyncEnabledResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{36} + return file_vm_vm_proto_rawDescGZIP(), []int{35} } func (x *StateSyncEnabledResponse) GetEnabled() bool { @@ -2545,7 +2495,7 @@ type GetOngoingSyncStateSummaryResponse struct { func (x *GetOngoingSyncStateSummaryResponse) Reset() { *x = GetOngoingSyncStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2558,7 +2508,7 @@ func (x *GetOngoingSyncStateSummaryResponse) String() string { func (*GetOngoingSyncStateSummaryResponse) ProtoMessage() {} func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2571,7 +2521,7 @@ func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message // Deprecated: Use GetOngoingSyncStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetOngoingSyncStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{37} + return file_vm_vm_proto_rawDescGZIP(), []int{36} } func (x *GetOngoingSyncStateSummaryResponse) GetId() []byte { @@ -2616,7 +2566,7 @@ type GetLastStateSummaryResponse struct { func (x *GetLastStateSummaryResponse) Reset() { *x = GetLastStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2629,7 +2579,7 @@ func (x *GetLastStateSummaryResponse) String() string { func (*GetLastStateSummaryResponse) ProtoMessage() {} func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2642,7 +2592,7 @@ func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLastStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetLastStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{38} + return file_vm_vm_proto_rawDescGZIP(), []int{37} } func (x *GetLastStateSummaryResponse) GetId() []byte { @@ -2684,7 +2634,7 @@ type ParseStateSummaryRequest struct { func (x *ParseStateSummaryRequest) Reset() { *x = ParseStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2697,7 +2647,7 @@ func (x *ParseStateSummaryRequest) String() string { func (*ParseStateSummaryRequest) ProtoMessage() {} func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2710,7 +2660,7 @@ func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryRequest.ProtoReflect.Descriptor instead. func (*ParseStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{39} + return file_vm_vm_proto_rawDescGZIP(), []int{38} } func (x *ParseStateSummaryRequest) GetBytes() []byte { @@ -2733,7 +2683,7 @@ type ParseStateSummaryResponse struct { func (x *ParseStateSummaryResponse) Reset() { *x = ParseStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2746,7 +2696,7 @@ func (x *ParseStateSummaryResponse) String() string { func (*ParseStateSummaryResponse) ProtoMessage() {} func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2759,7 +2709,7 @@ func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryResponse.ProtoReflect.Descriptor instead. func (*ParseStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{40} + return file_vm_vm_proto_rawDescGZIP(), []int{39} } func (x *ParseStateSummaryResponse) GetId() []byte { @@ -2794,7 +2744,7 @@ type GetStateSummaryRequest struct { func (x *GetStateSummaryRequest) Reset() { *x = GetStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2807,7 +2757,7 @@ func (x *GetStateSummaryRequest) String() string { func (*GetStateSummaryRequest) ProtoMessage() {} func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2820,7 +2770,7 @@ func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryRequest.ProtoReflect.Descriptor instead. func (*GetStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{41} + return file_vm_vm_proto_rawDescGZIP(), []int{40} } func (x *GetStateSummaryRequest) GetHeight() uint64 { @@ -2843,7 +2793,7 @@ type GetStateSummaryResponse struct { func (x *GetStateSummaryResponse) Reset() { *x = GetStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2856,7 +2806,7 @@ func (x *GetStateSummaryResponse) String() string { func (*GetStateSummaryResponse) ProtoMessage() {} func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2869,7 +2819,7 @@ func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{42} + return file_vm_vm_proto_rawDescGZIP(), []int{41} } func (x *GetStateSummaryResponse) GetId() []byte { @@ -2904,7 +2854,7 @@ type StateSummaryAcceptRequest struct { func (x *StateSummaryAcceptRequest) Reset() { *x = StateSummaryAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2917,7 +2867,7 @@ func (x *StateSummaryAcceptRequest) String() string { func (*StateSummaryAcceptRequest) ProtoMessage() {} func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[43] + mi := &file_vm_vm_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2930,7 +2880,7 @@ func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptRequest.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{43} + return file_vm_vm_proto_rawDescGZIP(), []int{42} } func (x *StateSummaryAcceptRequest) GetBytes() []byte { @@ -2952,7 +2902,7 @@ type StateSummaryAcceptResponse struct { func (x *StateSummaryAcceptResponse) Reset() { *x = StateSummaryAcceptResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2965,7 +2915,7 @@ func (x *StateSummaryAcceptResponse) String() string { func (*StateSummaryAcceptResponse) ProtoMessage() {} func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[44] + mi := &file_vm_vm_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2978,7 +2928,7 @@ func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptResponse.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{44} + return file_vm_vm_proto_rawDescGZIP(), []int{43} } func (x *StateSummaryAcceptResponse) GetMode() StateSummaryAcceptResponse_Mode { @@ -3245,246 +3195,236 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, + 0x1a, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, + 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, + 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x5d, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, + 0x6c, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, + 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, + 0x0a, 0x18, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, + 0x72, 0x22, 0x7f, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, + 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, - 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x03, - 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, 0x0a, 0x0e, 0x47, 0x61, 0x74, - 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, - 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, - 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, - 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x7f, 0x0a, 0x22, 0x47, - 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, + 0x72, 0x72, 0x22, 0x78, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, - 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x78, 0x0a, 0x1b, - 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, + 0x0a, 0x19, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, 0x19, 0x50, 0x61, 0x72, 0x73, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1b, 0x0a, - 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x5c, 0x0a, 0x17, - 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, - 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x31, 0x0a, 0x19, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xc5, 0x01, - 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, - 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x76, 0x6d, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, - 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x49, - 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x44, 0x59, 0x4e, 0x41, - 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, - 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, - 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, - 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, 0x03, 0x2a, 0x61, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, - 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, - 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, - 0x8e, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, - 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, - 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, - 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x04, - 0x32, 0xd2, 0x11, 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, - 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, - 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x67, 0x68, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, + 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, + 0x22, 0x31, 0x0a, 0x19, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x23, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, + 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, + 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, + 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, + 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, + 0x0f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, + 0x10, 0x03, 0x2a, 0x61, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, + 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, + 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, + 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, + 0x54, 0x45, 0x44, 0x10, 0x03, 0x2a, 0x6b, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, + 0x0a, 0x11, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, + 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, + 0x10, 0x03, 0x32, 0x86, 0x11, 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, + 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, - 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, - 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, - 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, - 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, - 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, - 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x39, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, + 0x6d, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, + 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, + 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, + 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, + 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x34, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, + 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, + 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, + 0x73, 0x69, 0x70, 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, - 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x06, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, + 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x12, 0x57, 0x0a, 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, + 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, - 0x70, 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, - 0x61, 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, - 0x76, 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, - 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, - 0x0a, 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, - 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, + 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, + 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, + 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, + 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, + 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, + 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, + 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, + 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, + 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, - 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, - 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, - 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, - 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, - 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, - 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, - 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, - 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, - 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, - 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, - 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, + 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, + 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -3500,7 +3440,7 @@ func file_vm_vm_proto_rawDescGZIP() []byte { } var file_vm_vm_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 45) +var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 44) var file_vm_vm_proto_goTypes = []interface{}{ (State)(0), // 0: vm.State (Status)(0), // 1: vm.Status @@ -3538,119 +3478,115 @@ var file_vm_vm_proto_goTypes = []interface{}{ (*GetAncestorsResponse)(nil), // 33: vm.GetAncestorsResponse (*BatchedParseBlockRequest)(nil), // 34: vm.BatchedParseBlockRequest (*BatchedParseBlockResponse)(nil), // 35: vm.BatchedParseBlockResponse - (*VerifyHeightIndexResponse)(nil), // 36: vm.VerifyHeightIndexResponse - (*GetBlockIDAtHeightRequest)(nil), // 37: vm.GetBlockIDAtHeightRequest - (*GetBlockIDAtHeightResponse)(nil), // 38: vm.GetBlockIDAtHeightResponse - (*GatherResponse)(nil), // 39: vm.GatherResponse - (*StateSyncEnabledResponse)(nil), // 40: vm.StateSyncEnabledResponse - (*GetOngoingSyncStateSummaryResponse)(nil), // 41: vm.GetOngoingSyncStateSummaryResponse - (*GetLastStateSummaryResponse)(nil), // 42: vm.GetLastStateSummaryResponse - (*ParseStateSummaryRequest)(nil), // 43: vm.ParseStateSummaryRequest - (*ParseStateSummaryResponse)(nil), // 44: vm.ParseStateSummaryResponse - (*GetStateSummaryRequest)(nil), // 45: vm.GetStateSummaryRequest - (*GetStateSummaryResponse)(nil), // 46: vm.GetStateSummaryResponse - (*StateSummaryAcceptRequest)(nil), // 47: vm.StateSummaryAcceptRequest - (*StateSummaryAcceptResponse)(nil), // 48: vm.StateSummaryAcceptResponse - (*timestamppb.Timestamp)(nil), // 49: google.protobuf.Timestamp - (*_go.MetricFamily)(nil), // 50: io.prometheus.client.MetricFamily - (*emptypb.Empty)(nil), // 51: google.protobuf.Empty + (*GetBlockIDAtHeightRequest)(nil), // 36: vm.GetBlockIDAtHeightRequest + (*GetBlockIDAtHeightResponse)(nil), // 37: vm.GetBlockIDAtHeightResponse + (*GatherResponse)(nil), // 38: vm.GatherResponse + (*StateSyncEnabledResponse)(nil), // 39: vm.StateSyncEnabledResponse + (*GetOngoingSyncStateSummaryResponse)(nil), // 40: vm.GetOngoingSyncStateSummaryResponse + (*GetLastStateSummaryResponse)(nil), // 41: vm.GetLastStateSummaryResponse + (*ParseStateSummaryRequest)(nil), // 42: vm.ParseStateSummaryRequest + (*ParseStateSummaryResponse)(nil), // 43: vm.ParseStateSummaryResponse + (*GetStateSummaryRequest)(nil), // 44: vm.GetStateSummaryRequest + (*GetStateSummaryResponse)(nil), // 45: vm.GetStateSummaryResponse + (*StateSummaryAcceptRequest)(nil), // 46: vm.StateSummaryAcceptRequest + (*StateSummaryAcceptResponse)(nil), // 47: vm.StateSummaryAcceptResponse + (*timestamppb.Timestamp)(nil), // 48: google.protobuf.Timestamp + (*_go.MetricFamily)(nil), // 49: io.prometheus.client.MetricFamily + (*emptypb.Empty)(nil), // 50: google.protobuf.Empty } var file_vm_vm_proto_depIdxs = []int32{ - 49, // 0: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp + 48, // 0: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp 0, // 1: vm.SetStateRequest.state:type_name -> vm.State - 49, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp + 48, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp 9, // 3: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler - 49, // 4: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 48, // 4: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp 1, // 5: vm.ParseBlockResponse.status:type_name -> vm.Status - 49, // 6: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 48, // 6: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp 1, // 7: vm.GetBlockResponse.status:type_name -> vm.Status - 49, // 8: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 48, // 8: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp 2, // 9: vm.GetBlockResponse.err:type_name -> vm.Error - 49, // 10: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp - 49, // 11: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 49, // 12: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 48, // 10: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp + 48, // 11: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 48, // 12: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp 13, // 13: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse - 2, // 14: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error - 2, // 15: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error - 50, // 16: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily - 2, // 17: vm.StateSyncEnabledResponse.err:type_name -> vm.Error - 2, // 18: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error - 2, // 19: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error - 2, // 20: vm.ParseStateSummaryResponse.err:type_name -> vm.Error - 2, // 21: vm.GetStateSummaryResponse.err:type_name -> vm.Error - 3, // 22: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode - 2, // 23: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error - 4, // 24: vm.VM.Initialize:input_type -> vm.InitializeRequest - 6, // 25: vm.VM.SetState:input_type -> vm.SetStateRequest - 51, // 26: vm.VM.Shutdown:input_type -> google.protobuf.Empty - 51, // 27: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty - 30, // 28: vm.VM.Connected:input_type -> vm.ConnectedRequest - 31, // 29: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest - 10, // 30: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest - 12, // 31: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest - 14, // 32: vm.VM.GetBlock:input_type -> vm.GetBlockRequest - 16, // 33: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest - 51, // 34: vm.VM.Health:input_type -> google.protobuf.Empty - 51, // 35: vm.VM.Version:input_type -> google.protobuf.Empty - 23, // 36: vm.VM.AppRequest:input_type -> vm.AppRequestMsg - 24, // 37: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg - 25, // 38: vm.VM.AppResponse:input_type -> vm.AppResponseMsg - 26, // 39: vm.VM.AppGossip:input_type -> vm.AppGossipMsg - 51, // 40: vm.VM.Gather:input_type -> google.protobuf.Empty - 27, // 41: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg - 28, // 42: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg - 29, // 43: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg - 32, // 44: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest - 34, // 45: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest - 51, // 46: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty - 37, // 47: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest - 51, // 48: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty - 51, // 49: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty - 51, // 50: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty - 43, // 51: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest - 45, // 52: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest - 17, // 53: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest - 19, // 54: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest - 20, // 55: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest - 47, // 56: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest - 5, // 57: vm.VM.Initialize:output_type -> vm.InitializeResponse - 7, // 58: vm.VM.SetState:output_type -> vm.SetStateResponse - 51, // 59: vm.VM.Shutdown:output_type -> google.protobuf.Empty - 8, // 60: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse - 51, // 61: vm.VM.Connected:output_type -> google.protobuf.Empty - 51, // 62: vm.VM.Disconnected:output_type -> google.protobuf.Empty - 11, // 63: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse - 13, // 64: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse - 15, // 65: vm.VM.GetBlock:output_type -> vm.GetBlockResponse - 51, // 66: vm.VM.SetPreference:output_type -> google.protobuf.Empty - 21, // 67: vm.VM.Health:output_type -> vm.HealthResponse - 22, // 68: vm.VM.Version:output_type -> vm.VersionResponse - 51, // 69: vm.VM.AppRequest:output_type -> google.protobuf.Empty - 51, // 70: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty - 51, // 71: vm.VM.AppResponse:output_type -> google.protobuf.Empty - 51, // 72: vm.VM.AppGossip:output_type -> google.protobuf.Empty - 39, // 73: vm.VM.Gather:output_type -> vm.GatherResponse - 51, // 74: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty - 51, // 75: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty - 51, // 76: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty - 33, // 77: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse - 35, // 78: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse - 36, // 79: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse - 38, // 80: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse - 40, // 81: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse - 41, // 82: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse - 42, // 83: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse - 44, // 84: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse - 46, // 85: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse - 18, // 86: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse - 51, // 87: vm.VM.BlockAccept:output_type -> google.protobuf.Empty - 51, // 88: vm.VM.BlockReject:output_type -> google.protobuf.Empty - 48, // 89: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse - 57, // [57:90] is the sub-list for method output_type - 24, // [24:57] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 2, // 14: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error + 49, // 15: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily + 2, // 16: vm.StateSyncEnabledResponse.err:type_name -> vm.Error + 2, // 17: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error + 2, // 18: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error + 2, // 19: vm.ParseStateSummaryResponse.err:type_name -> vm.Error + 2, // 20: vm.GetStateSummaryResponse.err:type_name -> vm.Error + 3, // 21: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode + 2, // 22: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error + 4, // 23: vm.VM.Initialize:input_type -> vm.InitializeRequest + 6, // 24: vm.VM.SetState:input_type -> vm.SetStateRequest + 50, // 25: vm.VM.Shutdown:input_type -> google.protobuf.Empty + 50, // 26: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty + 30, // 27: vm.VM.Connected:input_type -> vm.ConnectedRequest + 31, // 28: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest + 10, // 29: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest + 12, // 30: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest + 14, // 31: vm.VM.GetBlock:input_type -> vm.GetBlockRequest + 16, // 32: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest + 50, // 33: vm.VM.Health:input_type -> google.protobuf.Empty + 50, // 34: vm.VM.Version:input_type -> google.protobuf.Empty + 23, // 35: vm.VM.AppRequest:input_type -> vm.AppRequestMsg + 24, // 36: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg + 25, // 37: vm.VM.AppResponse:input_type -> vm.AppResponseMsg + 26, // 38: vm.VM.AppGossip:input_type -> vm.AppGossipMsg + 50, // 39: vm.VM.Gather:input_type -> google.protobuf.Empty + 27, // 40: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg + 28, // 41: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg + 29, // 42: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg + 32, // 43: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest + 34, // 44: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest + 36, // 45: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest + 50, // 46: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty + 50, // 47: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty + 50, // 48: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty + 42, // 49: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest + 44, // 50: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest + 17, // 51: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest + 19, // 52: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest + 20, // 53: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest + 46, // 54: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest + 5, // 55: vm.VM.Initialize:output_type -> vm.InitializeResponse + 7, // 56: vm.VM.SetState:output_type -> vm.SetStateResponse + 50, // 57: vm.VM.Shutdown:output_type -> google.protobuf.Empty + 8, // 58: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse + 50, // 59: vm.VM.Connected:output_type -> google.protobuf.Empty + 50, // 60: vm.VM.Disconnected:output_type -> google.protobuf.Empty + 11, // 61: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse + 13, // 62: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse + 15, // 63: vm.VM.GetBlock:output_type -> vm.GetBlockResponse + 50, // 64: vm.VM.SetPreference:output_type -> google.protobuf.Empty + 21, // 65: vm.VM.Health:output_type -> vm.HealthResponse + 22, // 66: vm.VM.Version:output_type -> vm.VersionResponse + 50, // 67: vm.VM.AppRequest:output_type -> google.protobuf.Empty + 50, // 68: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty + 50, // 69: vm.VM.AppResponse:output_type -> google.protobuf.Empty + 50, // 70: vm.VM.AppGossip:output_type -> google.protobuf.Empty + 38, // 71: vm.VM.Gather:output_type -> vm.GatherResponse + 50, // 72: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty + 50, // 73: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty + 50, // 74: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty + 33, // 75: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse + 35, // 76: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse + 37, // 77: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse + 39, // 78: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse + 40, // 79: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse + 41, // 80: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse + 43, // 81: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse + 45, // 82: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse + 18, // 83: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse + 50, // 84: vm.VM.BlockAccept:output_type -> google.protobuf.Empty + 50, // 85: vm.VM.BlockReject:output_type -> google.protobuf.Empty + 47, // 86: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse + 55, // [55:87] is the sub-list for method output_type + 23, // [23:55] is the sub-list for method input_type + 23, // [23:23] is the sub-list for extension type_name + 23, // [23:23] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name } func init() { file_vm_vm_proto_init() } @@ -4044,18 +3980,6 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyHeightIndexResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightRequest); i { case 0: return &v.state @@ -4067,7 +3991,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBlockIDAtHeightResponse); i { case 0: return &v.state @@ -4079,7 +4003,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GatherResponse); i { case 0: return &v.state @@ -4091,7 +4015,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSyncEnabledResponse); i { case 0: return &v.state @@ -4103,7 +4027,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetOngoingSyncStateSummaryResponse); i { case 0: return &v.state @@ -4115,7 +4039,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetLastStateSummaryResponse); i { case 0: return &v.state @@ -4127,7 +4051,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryRequest); i { case 0: return &v.state @@ -4139,7 +4063,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ParseStateSummaryResponse); i { case 0: return &v.state @@ -4151,7 +4075,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryRequest); i { case 0: return &v.state @@ -4163,7 +4087,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetStateSummaryResponse); i { case 0: return &v.state @@ -4175,7 +4099,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptRequest); i { case 0: return &v.state @@ -4187,7 +4111,7 @@ func file_vm_vm_proto_init() { return nil } } - file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptResponse); i { case 0: return &v.state @@ -4208,7 +4132,7 @@ func file_vm_vm_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vm_vm_proto_rawDesc, NumEnums: 4, - NumMessages: 45, + NumMessages: 44, NumExtensions: 0, NumServices: 1, }, diff --git a/proto/pb/vm/vm_grpc.pb.go b/proto/pb/vm/vm_grpc.pb.go index 6d7bb17f6c33..4a67f13408ce 100644 --- a/proto/pb/vm/vm_grpc.pb.go +++ b/proto/pb/vm/vm_grpc.pb.go @@ -42,7 +42,6 @@ const ( VM_CrossChainAppResponse_FullMethodName = "/vm.VM/CrossChainAppResponse" VM_GetAncestors_FullMethodName = "/vm.VM/GetAncestors" VM_BatchedParseBlock_FullMethodName = "/vm.VM/BatchedParseBlock" - VM_VerifyHeightIndex_FullMethodName = "/vm.VM/VerifyHeightIndex" VM_GetBlockIDAtHeight_FullMethodName = "/vm.VM/GetBlockIDAtHeight" VM_StateSyncEnabled_FullMethodName = "/vm.VM/StateSyncEnabled" VM_GetOngoingSyncStateSummary_FullMethodName = "/vm.VM/GetOngoingSyncStateSummary" @@ -102,7 +101,6 @@ type VMClient interface { GetAncestors(ctx context.Context, in *GetAncestorsRequest, opts ...grpc.CallOption) (*GetAncestorsResponse, error) BatchedParseBlock(ctx context.Context, in *BatchedParseBlockRequest, opts ...grpc.CallOption) (*BatchedParseBlockResponse, error) // HeightIndexedChainVM - VerifyHeightIndex(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VerifyHeightIndexResponse, error) GetBlockIDAtHeight(ctx context.Context, in *GetBlockIDAtHeightRequest, opts ...grpc.CallOption) (*GetBlockIDAtHeightResponse, error) // StateSyncableVM // @@ -331,15 +329,6 @@ func (c *vMClient) BatchedParseBlock(ctx context.Context, in *BatchedParseBlockR return out, nil } -func (c *vMClient) VerifyHeightIndex(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VerifyHeightIndexResponse, error) { - out := new(VerifyHeightIndexResponse) - err := c.cc.Invoke(ctx, VM_VerifyHeightIndex_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *vMClient) GetBlockIDAtHeight(ctx context.Context, in *GetBlockIDAtHeightRequest, opts ...grpc.CallOption) (*GetBlockIDAtHeightResponse, error) { out := new(GetBlockIDAtHeightResponse) err := c.cc.Invoke(ctx, VM_GetBlockIDAtHeight_FullMethodName, in, out, opts...) @@ -477,7 +466,6 @@ type VMServer interface { GetAncestors(context.Context, *GetAncestorsRequest) (*GetAncestorsResponse, error) BatchedParseBlock(context.Context, *BatchedParseBlockRequest) (*BatchedParseBlockResponse, error) // HeightIndexedChainVM - VerifyHeightIndex(context.Context, *emptypb.Empty) (*VerifyHeightIndexResponse, error) GetBlockIDAtHeight(context.Context, *GetBlockIDAtHeightRequest) (*GetBlockIDAtHeightResponse, error) // StateSyncableVM // @@ -571,9 +559,6 @@ func (UnimplementedVMServer) GetAncestors(context.Context, *GetAncestorsRequest) func (UnimplementedVMServer) BatchedParseBlock(context.Context, *BatchedParseBlockRequest) (*BatchedParseBlockResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BatchedParseBlock not implemented") } -func (UnimplementedVMServer) VerifyHeightIndex(context.Context, *emptypb.Empty) (*VerifyHeightIndexResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyHeightIndex not implemented") -} func (UnimplementedVMServer) GetBlockIDAtHeight(context.Context, *GetBlockIDAtHeightRequest) (*GetBlockIDAtHeightResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetBlockIDAtHeight not implemented") } @@ -1013,24 +998,6 @@ func _VM_BatchedParseBlock_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _VM_VerifyHeightIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VMServer).VerifyHeightIndex(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: VM_VerifyHeightIndex_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VMServer).VerifyHeightIndex(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - func _VM_GetBlockIDAtHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetBlockIDAtHeightRequest) if err := dec(in); err != nil { @@ -1306,10 +1273,6 @@ var VM_ServiceDesc = grpc.ServiceDesc{ MethodName: "BatchedParseBlock", Handler: _VM_BatchedParseBlock_Handler, }, - { - MethodName: "VerifyHeightIndex", - Handler: _VM_VerifyHeightIndex_Handler, - }, { MethodName: "GetBlockIDAtHeight", Handler: _VM_GetBlockIDAtHeight_Handler, diff --git a/proto/pb/warp/message.pb.go b/proto/pb/warp/message.pb.go index d6cb37360817..14827b49b549 100644 --- a/proto/pb/warp/message.pb.go +++ b/proto/pb/warp/message.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.33.0 // protoc (unknown) // source: warp/message.proto diff --git a/proto/sdk/sdk.proto b/proto/sdk/sdk.proto index f42912391fe7..3f91f23efdfe 100644 --- a/proto/sdk/sdk.proto +++ b/proto/sdk/sdk.proto @@ -5,8 +5,6 @@ package sdk; option go_package = "github.com/ava-labs/avalanchego/proto/pb/sdk"; message PullGossipRequest { - // TODO: Remove reservation after v1.11.x activates. - reserved 1; bytes salt = 2; bytes filter = 3; } diff --git a/proto/vm/vm.proto b/proto/vm/vm.proto index 4a0557ba4e67..038744d4e2d5 100644 --- a/proto/vm/vm.proto +++ b/proto/vm/vm.proto @@ -56,7 +56,6 @@ service VM { rpc BatchedParseBlock(BatchedParseBlockRequest) returns (BatchedParseBlockResponse); // HeightIndexedChainVM - rpc VerifyHeightIndex(google.protobuf.Empty) returns (VerifyHeightIndexResponse); rpc GetBlockIDAtHeight(GetBlockIDAtHeightRequest) returns (GetBlockIDAtHeightResponse); // StateSyncableVM @@ -101,8 +100,7 @@ enum Error { ERROR_UNSPECIFIED = 0; ERROR_CLOSED = 1; ERROR_NOT_FOUND = 2; - ERROR_HEIGHT_INDEX_INCOMPLETE = 3; - ERROR_STATE_SYNC_NOT_IMPLEMENTED = 4; + ERROR_STATE_SYNC_NOT_IMPLEMENTED = 3; } message InitializeRequest { @@ -334,10 +332,6 @@ message BatchedParseBlockResponse { repeated ParseBlockResponse response = 1; } -message VerifyHeightIndexResponse { - Error err = 1; -} - message GetBlockIDAtHeightRequest { uint64 height = 1; } diff --git a/scripts/build_antithesis_avalanchego_workload.sh b/scripts/build_antithesis_avalanchego_workload.sh new file mode 100755 index 000000000000..fce00e263d0b --- /dev/null +++ b/scripts/build_antithesis_avalanchego_workload.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Directory above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) +# Load the constants +source "$AVALANCHE_PATH"/scripts/constants.sh + +echo "Building Workload..." +go build -o "$AVALANCHE_PATH/build/antithesis-avalanchego-workload" "$AVALANCHE_PATH/tests/antithesis/avalanchego/"*.go diff --git a/scripts/build_antithesis_images.sh b/scripts/build_antithesis_images.sh new file mode 100755 index 000000000000..958f80a250b9 --- /dev/null +++ b/scripts/build_antithesis_images.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Builds docker images for antithesis testing. + +# e.g., +# TEST_SETUP=avalanchego ./scripts/build_antithesis_images.sh # Build local images for avalanchego +# TEST_SETUP=avalanchego NODE_ONLY=1 ./scripts/build_antithesis_images.sh # Build only a local node image for avalanchego +# TEST_SETUP=xsvm ./scripts/build_antithesis_images.sh # Build local images for xsvm +# TEST_SETUP=xsvm IMAGE_PREFIX=/ TAG=latest ./scripts/build_antithesis_images.sh # Specify a prefix to enable image push and use a specific tag + +# Directory above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +# Specifying an image prefix will ensure the image is pushed after build +IMAGE_PREFIX="${IMAGE_PREFIX:-}" + +TAG="${TAG:-}" +if [[ -z "${TAG}" ]]; then + # Default to tagging with the commit hash + source "${AVALANCHE_PATH}"/scripts/constants.sh + TAG="${commit_hash}" +fi + +# The dockerfiles don't specify the golang version to minimize the changes required to bump +# the version. Instead, the golang version is provided as an argument. +GO_VERSION="$(go list -m -f '{{.GoVersion}}')" + +function build_images { + local test_setup=$1 + local uninstrumented_node_dockerfile=$2 + local image_prefix=$3 + local node_only=${4:-} + + # Define image names + local base_image_name="antithesis-${test_setup}" + if [[ -n "${image_prefix}" ]]; then + base_image_name="${image_prefix}/${base_image_name}" + fi + local node_image_name="${base_image_name}-node:${TAG}" + local workload_image_name="${base_image_name}-workload:${TAG}" + local config_image_name="${base_image_name}-config:${TAG}" + # The same builder image is used to build node and workload images for all test + # setups. It is not intended to be pushed. + local builder_image_name="antithesis-avalanchego-builder:${TAG}" + + # Define dockerfiles + local base_dockerfile="${AVALANCHE_PATH}/tests/antithesis/${test_setup}/Dockerfile" + local builder_dockerfile="${base_dockerfile}.builder-instrumented" + local node_dockerfile="${base_dockerfile}.node" + # Working directory for instrumented builds + local builder_workdir="/avalanchego_instrumented/customer" + if [[ "$(go env GOARCH)" == "arm64" ]]; then + # Antithesis instrumentation is only supported on amd64. On apple silicon (arm64), + # uninstrumented Dockerfiles will be used to enable local test development. + builder_dockerfile="${base_dockerfile}.builder-uninstrumented" + node_dockerfile="${uninstrumented_node_dockerfile}" + # Working directory for uninstrumented builds + builder_workdir="/build" + fi + + # Define default build command + local docker_cmd="docker buildx build\ + --build-arg GO_VERSION=${GO_VERSION}\ + --build-arg NODE_IMAGE=${node_image_name}\ + --build-arg BUILDER_IMAGE=${builder_image_name}\ + --build-arg BUILDER_WORKDIR=${builder_workdir}\ + --build-arg TAG=${TAG}" + + if [[ "${test_setup}" == "xsvm" ]]; then + # The xsvm node image is built on the avalanchego node image, which is assumed to have already been + # built. The image name doesn't include the image prefix because it is not intended to be pushed. + docker_cmd="${docker_cmd} --build-arg AVALANCHEGO_NODE_IMAGE=antithesis-avalanchego-node:${TAG}" + fi + + if [[ "${test_setup}" == "avalanchego" ]]; then + # Build the image that enables compiling golang binaries for the node and workload + # image builds. The builder image is intended to enable building instrumented binaries + # if built on amd64 and non-instrumented binaries if built on arm64. + # + # The builder image is not intended to be pushed so it needs to be built in advance of + # adding `--push` to docker_cmd. Since it is never prefixed with `[registry]/[repo]`, + # attempting to push will result in an error like `unauthorized: access token has + # insufficient scopes`. + ${docker_cmd} -t "${builder_image_name}" -f "${builder_dockerfile}" "${AVALANCHE_PATH}" + fi + + if [[ -n "${image_prefix}" && -z "${node_only}" ]]; then + # Push images with an image prefix since the prefix defines a + # registry location, and only if building all images. When + # building just the node image the image is only intended to be + # used locally. + docker_cmd="${docker_cmd} --push" + fi + + # Build node image first to allow the workload image to use it. + ${docker_cmd} -t "${node_image_name}" -f "${node_dockerfile}" "${AVALANCHE_PATH}" + + if [[ -n "${node_only}" ]]; then + # Skip building the config and workload images. Supports building the avalanchego + # node image as the base image for the xsvm node image. + return + fi + + TARGET_PATH="${AVALANCHE_PATH}/build/antithesis/${test_setup}" + if [[ -d "${TARGET_PATH}" ]]; then + # Ensure the target path is empty before generating the compose config + rm -r "${TARGET_PATH:?}" + fi + + # Define the env vars for the compose config generation + COMPOSE_ENV="TARGET_PATH=${TARGET_PATH} IMAGE_TAG=${TAG}" + + if [[ "${test_setup}" == "xsvm" ]]; then + # Ensure avalanchego and xsvm binaries are available to create an initial db state that includes subnets. + "${AVALANCHE_PATH}"/scripts/build.sh + "${AVALANCHE_PATH}"/scripts/build_xsvm.sh + COMPOSE_ENV="${COMPOSE_ENV} AVALANCHEGO_PATH=${AVALANCHE_PATH}/build/avalanchego AVALANCHEGO_PLUGIN_DIR=${HOME}/.avalanchego/plugins" + fi + + # Generate compose config for copying into the config image + # shellcheck disable=SC2086 + env ${COMPOSE_ENV} go run "${AVALANCHE_PATH}/tests/antithesis/${test_setup}/gencomposeconfig" + + # Build the config image + ${docker_cmd} -t "${config_image_name}" -f "${base_dockerfile}.config" "${AVALANCHE_PATH}" + + # Build the workload image + ${docker_cmd} -t "${workload_image_name}" -f "${base_dockerfile}.workload" "${AVALANCHE_PATH}" +} + +TEST_SETUP="${TEST_SETUP:-}" +if [[ "${TEST_SETUP}" == "avalanchego" ]]; then + build_images avalanchego "${AVALANCHE_PATH}/Dockerfile" "${IMAGE_PREFIX}" "${NODE_ONLY:-}" +elif [[ "${TEST_SETUP}" == "xsvm" ]]; then + # Only build the node image to use as the base for the xsvm image. Provide an empty + # image prefix (the 3rd argument) to prevent the image from being pushed + NODE_ONLY=1 + build_images avalanchego "${AVALANCHE_PATH}/Dockerfile" "" "${NODE_ONLY}" + + build_images xsvm "${AVALANCHE_PATH}/vms/example/xsvm/Dockerfile" "${IMAGE_PREFIX}" +else + echo "TEST_SETUP must be set. Valid values are 'avalanchego' or 'xsvm'" + exit 255 +fi diff --git a/scripts/build_antithesis_xsvm_workload.sh b/scripts/build_antithesis_xsvm_workload.sh new file mode 100755 index 000000000000..153965eeb63b --- /dev/null +++ b/scripts/build_antithesis_xsvm_workload.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Directory above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) +# Load the constants +source "$AVALANCHE_PATH"/scripts/constants.sh + +echo "Building Workload..." +go build -o "$AVALANCHE_PATH/build/antithesis-xsvm-workload" "$AVALANCHE_PATH/tests/antithesis/xsvm/"*.go diff --git a/scripts/build_avalanche.sh b/scripts/build_avalanche.sh index eda63a5b959b..74ec675e2363 100755 --- a/scripts/build_avalanche.sh +++ b/scripts/build_avalanche.sh @@ -22,34 +22,6 @@ while getopts 'r' flag; do esac done -# Changes to the minimum golang version must also be replicated in -# scripts/build_avalanche.sh (here) -# Dockerfile -# README.md -# go.mod -go_version_minimum="1.21.7" - -go_version() { - go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' -} - -version_lt() { - # Return true if $1 is a lower version than than $2, - local ver1=$1 - local ver2=$2 - # Reverse sort the versions, if the 1st item != ver1 then ver1 < ver2 - if [[ $(echo -e -n "$ver1\n$ver2\n" | sort -rV | head -n1) != "$ver1" ]]; then - return 0 - else - return 1 - fi -} - -if version_lt "$(go_version)" "$go_version_minimum"; then - echo "AvalancheGo requires Go >= $go_version_minimum, Go $(go_version) found." >&2 - exit 1 -fi - # Avalanchego root folder AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the constants diff --git a/scripts/build_image.sh b/scripts/build_image.sh index 38403a185a3a..3a7b7e588d05 100755 --- a/scripts/build_image.sh +++ b/scripts/build_image.sh @@ -2,25 +2,94 @@ set -euo pipefail +# e.g., +# ./scripts/build_image.sh # Build local single-arch image +# DOCKER_IMAGE=myavalanchego ./scripts/build_image.sh # Build local single arch image with a custom image name +# DOCKER_IMAGE=avaplatform/avalanchego ./scripts/build_image.sh # Build and push multi-arch image to docker hub +# DOCKER_IMAGE=localhost:5001/avalanchego ./scripts/build_image.sh # Build and push multi-arch image to private registry +# DOCKER_IMAGE=localhost:5001/myavalanchego ./scripts/build_image.sh # Build and push multi-arch image to private registry with a custom image name + +# Multi-arch builds require Docker Buildx and QEMU. buildx should be enabled by +# default in the verson of docker included with Ubuntu 22.04, and qemu can be +# installed as follows: +# +# sudo apt-get install qemu qemu-user-static +# +# After installing qemu, it will also be necessary to start a new builder that can +# support multiplatform builds: +# +# docker buildx create --use +# +# Reference: https://docs.docker.com/buildx/working-with-buildx/ + # Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh -if [[ $current_branch == *"-race" ]]; then +if [[ $image_tag == *"-race" ]]; then echo "Branch name must not end in '-race'" exit 1 fi -# WARNING: this will use the most recent commit even if there are un-committed changes present -full_commit_hash="$(git --git-dir="$AVALANCHE_PATH/.git" rev-parse HEAD)" -commit_hash="${full_commit_hash::8}" +# The published name should be 'avaplatform/avalanchego', but to avoid unintentional +# pushes it is defaulted to 'avalanchego' (without a repo or registry name) which can +# only be used to create local images. +DOCKER_IMAGE=${DOCKER_IMAGE:-"avalanchego"} + +# buildx (BuildKit) improves the speed and UI of builds over the legacy builder and +# simplifies creation of multi-arch images. +# +# Reference: https://docs.docker.com/build/buildkit/ +DOCKER_CMD="docker buildx build" -echo "Building Docker Image with tags: $avalanchego_dockerhub_repo:$commit_hash , $avalanchego_dockerhub_repo:$current_branch" -docker build -t "$avalanchego_dockerhub_repo:$commit_hash" \ - -t "$avalanchego_dockerhub_repo:$current_branch" "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" +# The dockerfile doesn't specify the golang version to minimize the +# changes required to bump the version. Instead, the golang version is +# provided as an argument. +GO_VERSION="$(go list -m -f '{{.GoVersion}}')" +DOCKER_CMD="${DOCKER_CMD} --build-arg GO_VERSION=${GO_VERSION}" -echo "Building Docker Image with tags: $avalanchego_dockerhub_repo:$commit_hash-race , $avalanchego_dockerhub_repo:$current_branch-race" -docker build --build-arg="RACE_FLAG=-r" -t "$avalanchego_dockerhub_repo:$commit_hash-race" \ - -t "$avalanchego_dockerhub_repo:$current_branch-race" "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" +if [[ "${DOCKER_IMAGE}" == *"/"* ]]; then + # Build a multi-arch image since the image name includes a slash which indicates + # the use of a registry e.g. + # + # - dockerhub: [repo]/[image name]:[tag] + # - private registry: [private registry hostname]/[image name]:[tag] + # + # A registry is required to build a multi-arch image since a multi-arch image is + # not really an image at all. A multi-arch image (also called a manifest) is + # basically a list of arch-specific images available from the same registry that + # hosts the manifest. Manifests are not supported for local images. + # + # Reference: https://docs.docker.com/build/building/multi-platform/ + PLATFORMS="${PLATFORMS:-linux/amd64,linux/arm64}" + DOCKER_CMD="${DOCKER_CMD} --push --platform=${PLATFORMS}" + + # A populated DOCKER_USERNAME env var triggers login + if [[ -n "${DOCKER_USERNAME:-}" ]]; then + echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin + fi +else + # Build a single-arch image since the image name does not include a slash which + # indicates that a registry is not available. + # + # Building a single-arch image with buildx and having the resulting image show up + # in the local store of docker images (ala 'docker build') requires explicitly + # loading it from the buildx store with '--load'. + DOCKER_CMD="${DOCKER_CMD} --load" +fi + +echo "Building Docker Image with tags: $DOCKER_IMAGE:$commit_hash , $DOCKER_IMAGE:$image_tag" +${DOCKER_CMD} -t "$DOCKER_IMAGE:$commit_hash" -t "$DOCKER_IMAGE:$image_tag" \ + "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" + +echo "Building Docker Image with tags: $DOCKER_IMAGE:$commit_hash-race , $DOCKER_IMAGE:$image_tag-race" +${DOCKER_CMD} --build-arg="RACE_FLAG=-r" -t "$DOCKER_IMAGE:$commit_hash-race" -t "$DOCKER_IMAGE:$image_tag-race" \ + "$AVALANCHE_PATH" -f "$AVALANCHE_PATH/Dockerfile" + +# Only tag the latest image for the master branch when images are pushed to a registry +if [[ "${DOCKER_IMAGE}" == *"/"* && $image_tag == "master" ]]; then + echo "Tagging current avalanchego images as $DOCKER_IMAGE:latest" + docker buildx imagetools create -t "$DOCKER_IMAGE:latest" "$DOCKER_IMAGE:$commit_hash" +fi diff --git a/scripts/build_test.sh b/scripts/build_test.sh index 2251cc64f6ba..c0c9b72e3230 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -7,6 +7,15 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh -# Ensure execution of fixture unit tests under tests/ but exclude ginkgo tests in tests/e2e and tests/upgrade -# shellcheck disable=SC2046 -go test -shuffle=on -race -timeout="${TIMEOUT:-120s}" -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) +EXCLUDED_TARGETS="| grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade" + +if [[ "$(go env GOOS)" == "windows" ]]; then + # Test discovery for the antithesis test setups is broken due to + # their dependence on the linux-only Antithesis SDK. + EXCLUDED_TARGETS="${EXCLUDED_TARGETS} | grep -v tests/antithesis" +fi + +TEST_TARGETS="$(eval "go list ./... ${EXCLUDED_TARGETS}")" + +# shellcheck disable=SC2086 +go test -shuffle=on -race -timeout="${TIMEOUT:-120s}" -coverprofile="coverage.out" -covermode="atomic" ${TEST_TARGETS} diff --git a/scripts/constants.sh b/scripts/constants.sh index 85ea5226952d..eeaeb08b5317 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -13,14 +13,21 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Direct # Where AvalancheGo binary goes avalanchego_path="$AVALANCHE_PATH/build/avalanchego" -# Avalabs docker hub -# avaplatform/avalanchego - defaults to local as to avoid unintentional pushes -# You should probably set it - export DOCKER_REPO='avaplatform/avalanchego' -avalanchego_dockerhub_repo=${DOCKER_REPO:-"avalanchego"} - -# Current branch +# Image tag based on current branch (shared between image build and its test script) # TODO: fix "fatal: No names found, cannot describe anything" in github CI -current_branch=$(git symbolic-ref -q --short HEAD || git describe --tags --exact-match || true) +image_tag=$(git symbolic-ref -q --short HEAD || git describe --tags --exact-match || true) +if [[ -z $image_tag ]]; then + # Supply a default tag when one is not discovered + image_tag=ci_dummy +elif [[ "$image_tag" == */* ]]; then + # Slashes are not legal for docker image tags - replace with dashes + image_tag=$(echo "$image_tag" | tr '/' '-') +fi + +# Current commit (shared between image build and its test script) +# WARNING: this will use the most recent commit even if there are un-committed changes present +full_commit_hash="$(git --git-dir="$AVALANCHE_PATH/.git" rev-parse HEAD)" +commit_hash="${full_commit_hash::8}" git_commit=${AVALANCHEGO_COMMIT:-$( git rev-list -1 HEAD )} diff --git a/scripts/lint.sh b/scripts/lint.sh index b2cbaa50fcff..9fb23ae325be 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -32,7 +32,7 @@ fi TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_no_error_inline_func"} function test_golangci_lint { - go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.56.1 + go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.58.1 golangci-lint run --config .golangci.yml } diff --git a/scripts/mocks.mockgen.source.txt b/scripts/mocks.mockgen.source.txt index 02782a7b7d9c..61efc2240902 100644 --- a/scripts/mocks.mockgen.source.txt +++ b/scripts/mocks.mockgen.source.txt @@ -1,7 +1,6 @@ snow/engine/common/sender.go=StateSummarySender,AcceptedStateSummarySender,FrontierSender,AcceptedSender,FetchSender,AppSender,QuerySender,CrossChainAppSender,NetworkAppSender,Gossiper=snow/engine/common/mock_sender.go snow/networking/router/router.go=InternalHandler=snow/networking/router/mock_router.go snow/networking/sender/external_sender.go==snow/networking/sender/mock_external_sender.go -snow/validators/manager.go=SetCallbackListener=snow/validators/mock_manager.go vms/avm/block/executor/manager.go==vms/avm/block/executor/mock_manager.go vms/avm/txs/tx.go==vms/avm/txs/mock_unsigned_tx.go vms/platformvm/block/executor/manager.go==vms/platformvm/block/executor/mock_manager.go diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index ba2be886b0b6..139252666ab0 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -5,7 +5,7 @@ github.com/ava-labs/avalanchego/database=Batch=database/mock_batch.go github.com/ava-labs/avalanchego/database=Iterator=database/mock_iterator.go github.com/ava-labs/avalanchego/message=OutboundMessage=message/mock_message.go github.com/ava-labs/avalanchego/message=OutboundMsgBuilder=message/mock_outbound_message_builder.go -github.com/ava-labs/avalanchego/snow/consensus/snowman=Block=snow/consensus/snowman/mock_block.go +github.com/ava-labs/avalanchego/snow/consensus/snowman=Block=snow/consensus/snowman/snowmantest/mock_block.go github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex=LinearizableVM=snow/engine/avalanche/vertex/mock_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block=BuildBlockWithContextChainVM=snow/engine/snowman/block/mock_build_block_with_context_vm.go github.com/ava-labs/avalanchego/snow/engine/snowman/block=ChainVM=snow/engine/snowman/block/mock_chain_vm.go diff --git a/scripts/protobuf_codegen.sh b/scripts/protobuf_codegen.sh index 5230ca7b83a2..d57868924906 100755 --- a/scripts/protobuf_codegen.sh +++ b/scripts/protobuf_codegen.sh @@ -7,20 +7,15 @@ if ! [[ "$0" =~ scripts/protobuf_codegen.sh ]]; then exit 255 fi -## install "buf" -# any version changes here should also be bumped in Dockerfile.buf -# ref. https://docs.buf.build/installation -# ref. https://github.com/bufbuild/buf/releases -BUF_VERSION='1.29.0' +## ensure the correct version of "buf" is installed +BUF_VERSION='1.31.0' if [[ $(buf --version | cut -f2 -d' ') != "${BUF_VERSION}" ]]; then echo "could not find buf ${BUF_VERSION}, is it installed + in PATH?" exit 255 fi ## install "protoc-gen-go" -# any version changes here should also be bumped in Dockerfile.buf -# ref. https://github.com/protocolbuffers/protobuf-go/releases -PROTOC_GEN_GO_VERSION='v1.30.0' +PROTOC_GEN_GO_VERSION='v1.33.0' go install -v google.golang.org/protobuf/cmd/protoc-gen-go@${PROTOC_GEN_GO_VERSION} if [[ $(protoc-gen-go --version | cut -f2 -d' ') != "${PROTOC_GEN_GO_VERSION}" ]]; then # e.g., protoc-gen-go v1.28.1 @@ -29,9 +24,6 @@ if [[ $(protoc-gen-go --version | cut -f2 -d' ') != "${PROTOC_GEN_GO_VERSION}" ] fi ### install "protoc-gen-go-grpc" -# any version changes here should also be bumped in Dockerfile.buf -# ref. https://pkg.go.dev/google.golang.org/grpc/cmd/protoc-gen-go-grpc -# ref. https://github.com/grpc/grpc-go/blob/master/cmd/protoc-gen-go-grpc/main.go PROTOC_GEN_GO_GRPC_VERSION='1.3.0' go install -v google.golang.org/grpc/cmd/protoc-gen-go-grpc@v${PROTOC_GEN_GO_GRPC_VERSION} if [[ $(protoc-gen-go-grpc --version | cut -f2 -d' ') != "${PROTOC_GEN_GO_GRPC_VERSION}" ]]; then diff --git a/scripts/run_prometheus.sh b/scripts/run_prometheus.sh new file mode 100755 index 000000000000..195222723107 --- /dev/null +++ b/scripts/run_prometheus.sh @@ -0,0 +1,120 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Starts a prometheus instance in agent-mode, forwarding to a central +# instance. Intended to enable metrics collection from temporary networks running +# locally and in CI. +# +# The prometheus instance will remain running in the background and will forward +# metrics to the central instance for all tmpnet networks. +# +# To stop it: +# +# $ kill -9 `cat ~/.tmpnet/prometheus/run.pid` && rm ~/.tmpnet/prometheus/run.pid +# + +# e.g., +# PROMETHEUS_ID= PROMETHEUS_PASSWORD= ./scripts/run_prometheus.sh +if ! [[ "$0" =~ scripts/run_prometheus.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +PROMETHEUS_WORKING_DIR="${HOME}/.tmpnet/prometheus" +PIDFILE="${PROMETHEUS_WORKING_DIR}"/run.pid + +# First check if an agent-mode prometheus is already running. A single instance can collect +# metrics from all local temporary networks. +if pgrep --pidfile="${PIDFILE}" -f 'prometheus.*enable-feature=agent' &> /dev/null; then + echo "prometheus is already running locally with --enable-feature=agent" + exit 0 +fi + +PROMETHEUS_URL="${PROMETHEUS_URL:-https://prometheus-experimental.avax-dev.network}" +if [[ -z "${PROMETHEUS_URL}" ]]; then + echo "Please provide a value for PROMETHEUS_URL" + exit 1 +fi + +PROMETHEUS_ID="${PROMETHEUS_ID:-}" +if [[ -z "${PROMETHEUS_ID}" ]]; then + echo "Please provide a value for PROMETHEUS_ID" + exit 1 +fi + +PROMETHEUS_PASSWORD="${PROMETHEUS_PASSWORD:-}" +if [[ -z "${PROMETHEUS_PASSWORD}" ]]; then + echo "Plase provide a value for PROMETHEUS_PASSWORD" + exit 1 +fi + +# This was the LTS version when this script was written. Probably not +# much reason to update it unless something breaks since the usage +# here is only to collect metrics from temporary networks. +VERSION="2.45.3" + +# Ensure the prometheus command is locally available +CMD=prometheus +if ! command -v "${CMD}" &> /dev/null; then + # Try to use a local version + CMD="${PWD}/bin/prometheus" + if ! command -v "${CMD}" &> /dev/null; then + echo "prometheus not found, attempting to install..." + + # Determine the arch + if which sw_vers &> /dev/null; then + echo "on macos, only amd64 binaries are available so rosetta is required on apple silicon machines." + echo "to avoid using rosetta, install via homebrew: brew install prometheus" + DIST=darwin + else + ARCH="$(uname -i)" + if [[ "${ARCH}" != "x86_64" ]]; then + echo "on linux, only amd64 binaries are available. manual installation of prometheus is required." + exit 1 + else + DIST="linux" + fi + fi + + # Install the specified release + PROMETHEUS_FILE="prometheus-${VERSION}.${DIST}-amd64" + URL="https://github.com/prometheus/prometheus/releases/download/v${VERSION}/${PROMETHEUS_FILE}.tar.gz" + curl -s -L "${URL}" | tar zxv -C /tmp > /dev/null + mkdir -p "$(dirname "${CMD}")" + cp /tmp/"${PROMETHEUS_FILE}/prometheus" "${CMD}" + fi +fi + +# Configure prometheus +FILE_SD_PATH="${PROMETHEUS_WORKING_DIR}/file_sd_configs" +mkdir -p "${FILE_SD_PATH}" + +echo "writing configuration..." +cat >"${PROMETHEUS_WORKING_DIR}"/prometheus.yaml < prometheus.log 2>&1 & +echo $! > "${PIDFILE}" +echo "running with pid $(cat "${PIDFILE}")" diff --git a/scripts/run_promtail.sh b/scripts/run_promtail.sh new file mode 100755 index 000000000000..9b386d3d55f2 --- /dev/null +++ b/scripts/run_promtail.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Starts a promtail instance to collect logs from temporary networks +# running locally and in CI. +# +# The promtail instance will remain running in the background and will forward +# logs to the central instance for all tmpnet networks. +# +# To stop it: +# +# $ kill -9 `cat ~/.tmpnet/promtail/run.pid` && rm ~/.tmpnet/promtail/run.pid +# + +# e.g., +# LOKI_ID= LOKI_PASSWORD= ./scripts/run_promtail.sh +if ! [[ "$0" =~ scripts/run_promtail.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +PROMTAIL_WORKING_DIR="${HOME}/.tmpnet/promtail" +PIDFILE="${PROMTAIL_WORKING_DIR}"/run.pid + +# First check if promtail is already running. A single instance can +# collect logs from all local temporary networks. +if pgrep --pidfile="${PIDFILE}" &> /dev/null; then + echo "promtail is already running" + exit 0 +fi + +LOKI_URL="${LOKI_URL:-https://loki-experimental.avax-dev.network}" +if [[ -z "${LOKI_URL}" ]]; then + echo "Please provide a value for LOKI_URL" + exit 1 +fi + +LOKI_ID="${LOKI_ID:-}" +if [[ -z "${LOKI_ID}" ]]; then + echo "Please provide a value for LOKI_ID" + exit 1 +fi + +LOKI_PASSWORD="${LOKI_PASSWORD:-}" +if [[ -z "${LOKI_PASSWORD}" ]]; then + echo "Plase provide a value for LOKI_PASSWORD" + exit 1 +fi + +# Version as of this writing +VERSION="v2.9.5" + +# Ensure the promtail command is locally available +CMD=promtail +if ! command -v "${CMD}" &> /dev/null; then + # Try to use a local version + CMD="${PWD}/bin/promtail" + if ! command -v "${CMD}" &> /dev/null; then + echo "promtail not found, attempting to install..." + # Determine the arch + if which sw_vers &> /dev/null; then + DIST="darwin-$(uname -m)" + else + ARCH="$(uname -i)" + if [[ "${ARCH}" == "aarch64" ]]; then + ARCH="arm64" + elif [[ "${ARCH}" == "x86_64" ]]; then + ARCH="amd64" + fi + DIST="linux-${ARCH}" + fi + + # Install the specified release + PROMTAIL_FILE="promtail-${DIST}" + ZIP_PATH="/tmp/${PROMTAIL_FILE}.zip" + BIN_DIR="$(dirname "${CMD}")" + URL="https://github.com/grafana/loki/releases/download/${VERSION}/promtail-${DIST}.zip" + curl -L -o "${ZIP_PATH}" "${URL}" + unzip "${ZIP_PATH}" -d "${BIN_DIR}" + mv "${BIN_DIR}/${PROMTAIL_FILE}" "${CMD}" + fi +fi + +# Configure promtail +FILE_SD_PATH="${PROMTAIL_WORKING_DIR}/file_sd_configs" +mkdir -p "${FILE_SD_PATH}" + +echo "writing configuration..." +cat >"${PROMTAIL_WORKING_DIR}"/promtail.yaml < promtail.log 2>&1 & +echo $! > "${PIDFILE}" +echo "running with pid $(cat "${PIDFILE}")" diff --git a/scripts/tests.build_antithesis_images.sh b/scripts/tests.build_antithesis_images.sh new file mode 100755 index 000000000000..8fdce84ab641 --- /dev/null +++ b/scripts/tests.build_antithesis_images.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Validates the construction of the antithesis images for a test setup specified by TEST_SETUP. +# +# 1. Building the antithesis test image +# 2. Extracting the docker compose configuration from the image +# 3. Running the workload and its target network without error for a minute +# 4. Stopping the workload and its target network +# + +# e.g., +# TEST_SETUP=avalanchego ./scripts/tests.build_antithesis_images.sh # Test build of images for avalanchego test setup +# DEBUG=1 TEST_SETUP=avalanchego ./scripts/tests.build_antithesis_images.sh # Retain the temporary compose path for troubleshooting + +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +# Discover the default tag that will be used for the image +source "${AVALANCHE_PATH}"/scripts/constants.sh +export TAG="${commit_hash}" + +# Build the images for the specified test setup +export TEST_SETUP="${TEST_SETUP:-}" +bash -x "${AVALANCHE_PATH}"/scripts/build_antithesis_images.sh + +# Create a container from the config image to extract compose configuration from +IMAGE_NAME="antithesis-${TEST_SETUP}-config" +CONTAINER_NAME="tmp-${IMAGE_NAME}" +docker create --name "${CONTAINER_NAME}" "${IMAGE_NAME}:${TAG}" /bin/true + +# Create a temporary directory to write the compose configuration to +TMPDIR="$(mktemp -d)" +echo "using temporary directory ${TMPDIR} as the docker-compose path" + +COMPOSE_FILE="${TMPDIR}/docker-compose.yml" +COMPOSE_CMD="docker-compose -f ${COMPOSE_FILE}" + +# Ensure cleanup +function cleanup { + echo "removing temporary container" + docker rm "${CONTAINER_NAME}" + echo "stopping and removing the docker compose project" + ${COMPOSE_CMD} down --volumes + if [[ -z "${DEBUG:-}" ]]; then + echo "removing temporary dir" + rm -rf "${TMPDIR}" + fi +} +trap cleanup EXIT + +# Copy the docker-compose.yml file out of the container +docker cp "${CONTAINER_NAME}":/docker-compose.yml "${COMPOSE_FILE}" + +# Copy the volume paths out of the container +docker cp "${CONTAINER_NAME}":/volumes "${TMPDIR}/" + +# Run the docker compose project for 30 seconds without error. Local +# network bootstrap is ~6s, but github workers can be much slower. +${COMPOSE_CMD} up -d +sleep 30 +if ${COMPOSE_CMD} ps -q | xargs docker inspect -f '{{ .State.Status }}' | grep -v 'running'; then + echo "An error occurred." + exit 255 +fi + +# Success! diff --git a/scripts/tests.build_image.sh b/scripts/tests.build_image.sh new file mode 100755 index 000000000000..af3cea84f1da --- /dev/null +++ b/scripts/tests.build_image.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# This test script is intended to execute successfully on a ubuntu 22.04 host with either the +# amd64 or arm64 arches. Recent docker (with buildx support) and qemu are required. See +# build_image.sh for more details. + +# TODO(marun) Perform more extensive validation (e.g. e2e testing) against one or more images + +# Directory above this script +AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +source "$AVALANCHE_PATH"/scripts/constants.sh + +build_and_test() { + local image_name=$1 + + DOCKER_IMAGE="$image_name" ./scripts/build_image.sh + + echo "listing images" + docker images + + local host_arch + host_arch="$(go env GOARCH)" + + if [[ "$image_name" == *"/"* ]]; then + # Test all arches if testing a multi-arch image + local arches=("amd64" "arm64") + else + # Test only the host platform for single arch builds + local arches=("$host_arch") + fi + + # Check all of the images expected to have been built + local target_images=( + "$image_name:$commit_hash" + "$image_name:$image_tag" + "$image_name:$commit_hash-race" + "$image_name:$image_tag-race" + ) + + for arch in "${arches[@]}"; do + for target_image in "${target_images[@]}"; do + if [[ "$host_arch" == "amd64" && "$arch" == "arm64" && "$target_image" =~ "-race" ]]; then + # Error reported when trying to sanity check this configuration in github ci: + # + # FATAL: ThreadSanitizer: unsupported VMA range + # FATAL: Found 39 - Supported 48 + # + echo "skipping sanity check for $target_image" + echo "image is for arm64 and binary is compiled with race detection" + echo "amd64 github workers are known to run kernels incompatible with these images" + else + echo "checking sanity of image $target_image for $arch by running 'avalanchego --version'" + docker run -t --rm --platform "linux/$arch" "$target_image" /avalanchego/build/avalanchego --version + fi + done + done +} + +echo "checking build of single-arch images" +build_and_test avalanchego + +echo "starting local docker registry to allow verification of multi-arch image builds" +REGISTRY_CONTAINER_ID="$(docker run --rm -d -P registry:2)" +REGISTRY_PORT="$(docker port "$REGISTRY_CONTAINER_ID" 5000/tcp | grep -v "::" | awk -F: '{print $NF}')" + +echo "starting docker builder that supports multiplatform builds" +# - creating a new builder enables multiplatform builds +# - '--driver-opt network=host' enables the builder to use the local registry +docker buildx create --use --name ci-builder --driver-opt network=host + +# Ensure registry and builder cleanup on teardown +function cleanup { + echo "stopping local docker registry" + docker stop "${REGISTRY_CONTAINER_ID}" + echo "removing multiplatform builder" + docker buildx rm ci-builder +} +trap cleanup EXIT + +echo "checking build of multi-arch images" +build_and_test "localhost:${REGISTRY_PORT}/avalanchego" diff --git a/scripts/tests.e2e.existing.sh b/scripts/tests.e2e.existing.sh index d369a777d388..4b28fc1ad271 100755 --- a/scripts/tests.e2e.existing.sh +++ b/scripts/tests.e2e.existing.sh @@ -2,29 +2,17 @@ set -euo pipefail -################################################################ -# This script deploys a temporary network and configures -# tests.e2e.sh to execute the e2e suite against it. This -# validates that tmpnetctl is capable of starting a network and -# that the e2e suite is capable of executing against a network -# that it did not create. -################################################################ +# This script verifies that a network can be reused across test runs. # e.g., # ./scripts/build.sh -# ./scripts/tests.e2e.existing.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo -# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially +# ./scripts/tests.e2e.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo # AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.existing.sh # Customization of avalanchego path if ! [[ "$0" =~ scripts/tests.e2e.existing.sh ]]; then echo "must be run from repository root" exit 255 fi -# Ensure an absolute path to avoid dependency on the working directory -# of script execution. -AVALANCHEGO_PATH="$(realpath "${AVALANCHEGO_PATH:-./build/avalanchego}")" -export AVALANCHEGO_PATH - # Provide visual separation between testing and setup/teardown function print_separator { printf '%*s\n' "${COLUMNS:-80}" '' | tr ' ' ─ @@ -33,33 +21,28 @@ function print_separator { # Ensure network cleanup on teardown function cleanup { print_separator - echo "cleaning up temporary network" - if [[ -n "${TMPNET_NETWORK_DIR:-}" ]]; then - ./build/tmpnetctl stop-network - fi + echo "cleaning up reusable network" + ginkgo -v ./tests/e2e/e2e.test -- --stop-network } trap cleanup EXIT -# Start a temporary network -./scripts/build_tmpnetctl.sh print_separator -./build/tmpnetctl start-network +echo "starting initial test run that should create the reusable network" +./scripts/tests.e2e.sh --reuse-network --ginkgo.focus-file=xsvm.go "${@}" -# Determine the network configuration path from the latest symlink -LATEST_SYMLINK_PATH="${HOME}/.tmpnet/networks/latest" -if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then - TMPNET_NETWORK_DIR="$(realpath "${LATEST_SYMLINK_PATH}")" - export TMPNET_NETWORK_DIR -else - echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" - exit 255 -fi +print_separator +echo "determining the network path of the reusable network created by the first test run" +SYMLINK_PATH="${HOME}/.tmpnet/networks/latest_avalanchego-e2e" +INITIAL_NETWORK_DIR="$(realpath "${SYMLINK_PATH}")" print_separator -# - Setting E2E_USE_EXISTING_NETWORK configures tests.e2e.sh to use -# the temporary network identified by TMPNET_NETWORK_DIR. -# - Only a single test (selected with --ginkgo.focus-file) is required -# to validate that an existing network can be used by an e2e test -# suite run. Executing more tests would be duplicative of the testing -# performed against a network created by the test suite. -E2E_USE_EXISTING_NETWORK=1 ./scripts/tests.e2e.sh --ginkgo.focus-file=permissionless_subnets.go +echo "starting second test run that should reuse the network created by the first run" +./scripts/tests.e2e.sh --reuse-network --ginkgo.focus-file=xsvm.go "${@}" + +SUBSEQUENT_NETWORK_DIR="$(realpath "${SYMLINK_PATH}")" +echo "checking that the symlink path remains the same, indicating that the network was reused" +if [[ "${INITIAL_NETWORK_DIR}" != "${SUBSEQUENT_NETWORK_DIR}" ]]; then + print_separator + echo "network was not reused across test runs" + exit 1 +fi diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh index 9aaf63554005..564116b40fda 100755 --- a/scripts/tests.e2e.sh +++ b/scripts/tests.e2e.sh @@ -7,7 +7,6 @@ set -euo pipefail # ./scripts/tests.e2e.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo # E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially # AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.sh # Customization of avalanchego path -# E2E_USE_EXISTING_NETWORK=1 TMPNET_NETWORK_DIR=/path/to ./scripts/tests.e2e.sh # Execute against an existing network if ! [[ "$0" =~ scripts/tests.e2e.sh ]]; then echo "must be run from repository root" exit 255 @@ -27,16 +26,14 @@ go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 ACK_GINKGO_RC=true ginkgo build ./tests/e2e ./tests/e2e/e2e.test --help -################################# -# Since TMPNET_NETWORK_DIR may be set in the environment (e.g. to configure ginkgo -# or tmpnetctl), configuring the use of an existing network with this script -# requires the extra step of setting E2E_USE_EXISTING_NETWORK=1. -if [[ -n "${E2E_USE_EXISTING_NETWORK:-}" && -n "${TMPNET_NETWORK_DIR:-}" ]]; then - E2E_ARGS="--use-existing-network" -else - AVALANCHEGO_PATH="$(realpath "${AVALANCHEGO_PATH:-./build/avalanchego}")" - E2E_ARGS="--avalanchego-path=${AVALANCHEGO_PATH}" -fi +# Enable subnet testing by building xsvm +./scripts/build_xsvm.sh +echo "" + +# Ensure an absolute path to avoid dependency on the working directory +# of script execution. +AVALANCHEGO_PATH="$(realpath "${AVALANCHEGO_PATH:-./build/avalanchego}")" +E2E_ARGS="--avalanchego-path=${AVALANCHEGO_PATH}" ################################# # Determine ginkgo args diff --git a/scripts/tests.upgrade.sh b/scripts/tests.upgrade.sh index 9c9cf13dc40c..8cc158d87d49 100755 --- a/scripts/tests.upgrade.sh +++ b/scripts/tests.upgrade.sh @@ -3,9 +3,9 @@ set -euo pipefail # e.g., -# ./scripts/tests.upgrade.sh # Use default version -# ./scripts/tests.upgrade.sh 1.10.18 # Specify a version -# AVALANCHEGO_PATH=./path/to/avalanchego ./scripts/tests.upgrade.sh 1.10.18 # Customization of avalanchego path +# ./scripts/tests.upgrade.sh # Use default version +# ./scripts/tests.upgrade.sh 1.11.0 # Specify a version +# AVALANCHEGO_PATH=./path/to/avalanchego ./scripts/tests.upgrade.sh 1.11.0 # Customization of avalanchego path if ! [[ "$0" =~ scripts/tests.upgrade.sh ]]; then echo "must be run from repository root" exit 255 @@ -16,9 +16,8 @@ fi # local network, this flag must be updated to the last compatible # version with the latest code. # -# v1.10.18 includes restrictions on ports sent over the p2p network along with -# proposervm and P-chain rule changes on the local network. -DEFAULT_VERSION="1.10.18" +# v1.11.0 activates Durango. +DEFAULT_VERSION="1.11.0" VERSION="${1:-${DEFAULT_VERSION}}" if [[ -z "${VERSION}" ]]; then diff --git a/snow/README.md b/snow/README.md index b5b16bc938ba..99cfc5351356 100644 --- a/snow/README.md +++ b/snow/README.md @@ -47,7 +47,7 @@ Currently, Avalanchego implements its own message serialization to communicate. ### [Network](https://github.com/ava-labs/avalanchego/blob/master/network/network.go) -The networking interface is shared across all chains. It implements functions from the `ExternalSender` interface. The two functions it implements are `Send` and `Gossip`. `Send` sends a message of type `OutboundMessage` to a specific set of nodes (specified by an array of `NodeIDs`). `Gossip` sends a message of type `OutboundMessage` to a random group of nodes in a subnet (can be a validator or a non-validator). Gossipping is used to push transactions across the network. The networking protocol uses TLS to pass messages between peers. +The networking interface is shared across all chains. It implements functions from the `ExternalSender` interface. The two functions it implements are `Send` and `Gossip`. `Send` sends a message of type `OutboundMessage` to a specific set of nodes (specified by an array of `NodeIDs`). `Gossip` sends a message of type `OutboundMessage` to a random group of nodes in a subnet (can be a validator or a non-validator). Gossiping is used to push transactions across the network. The networking protocol uses TLS to pass messages between peers. Along with sending and gossiping, the networking library is also responsible for making connections and maintaining connections. Any node whether they are a validator or non-validator will attempt to connect to the primary network. diff --git a/snow/choices/test_decidable.go b/snow/choices/test_decidable.go index 39e8ed67b7c1..e750c2465f91 100644 --- a/snow/choices/test_decidable.go +++ b/snow/choices/test_decidable.go @@ -48,3 +48,7 @@ func (d *TestDecidable) Reject(context.Context) error { func (d *TestDecidable) Status() Status { return d.StatusV } + +func (d *TestDecidable) SetStatus(status Status) { + d.StatusV = status +} diff --git a/snow/consensus/snowball/binary_snowball.go b/snow/consensus/snowball/binary_snowball.go index 2e17bc93501a..16649c3252a9 100644 --- a/snow/consensus/snowball/binary_snowball.go +++ b/snow/consensus/snowball/binary_snowball.go @@ -7,9 +7,9 @@ import "fmt" var _ Binary = (*binarySnowball)(nil) -func newBinarySnowball(beta, choice int) binarySnowball { +func newBinarySnowball(alphaPreference int, terminationConditions []terminationCondition, choice int) binarySnowball { return binarySnowball{ - binarySnowflake: newBinarySnowflake(beta, choice), + binarySnowflake: newBinarySnowflake(alphaPreference, terminationConditions, choice), preference: choice, } } @@ -39,14 +39,14 @@ func (sb *binarySnowball) Preference() int { return sb.preference } -func (sb *binarySnowball) RecordSuccessfulPoll(choice int) { - sb.increasePreferenceStrength(choice) - sb.binarySnowflake.RecordSuccessfulPoll(choice) -} - -func (sb *binarySnowball) RecordPollPreference(choice int) { - sb.increasePreferenceStrength(choice) - sb.binarySnowflake.RecordPollPreference(choice) +func (sb *binarySnowball) RecordPoll(count, choice int) { + if count >= sb.alphaPreference { + sb.preferenceStrength[choice]++ + if sb.preferenceStrength[choice] > sb.preferenceStrength[1-choice] { + sb.preference = choice + } + } + sb.binarySnowflake.RecordPoll(count, choice) } func (sb *binarySnowball) String() string { @@ -57,10 +57,3 @@ func (sb *binarySnowball) String() string { sb.preferenceStrength[1], &sb.binarySnowflake) } - -func (sb *binarySnowball) increasePreferenceStrength(choice int) { - sb.preferenceStrength[choice]++ - if sb.preferenceStrength[choice] > sb.preferenceStrength[1-choice] { - sb.preference = choice - } -} diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go index 2c2a8421e043..968743ef36a8 100644 --- a/snow/consensus/snowball/binary_snowball_test.go +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -15,25 +15,27 @@ func TestBinarySnowball(t *testing.T) { red := 0 blue := 1 + alphaPreference, alphaConfidence := 2, 3 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.True(sb.Finalized()) } @@ -44,33 +46,35 @@ func TestBinarySnowballRecordPollPreference(t *testing.T) { red := 0 blue := 1 + alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordPollPreference(red) + sb.RecordPoll(alphaPreference, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) require.Equal(red, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 1, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" + expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 1, SF(Confidence = [2], Finalized = true, SL(Preference = 0)))" require.Equal(expected, sb.String()) } @@ -80,27 +84,29 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { red := 0 blue := 1 + alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) sb.RecordUnsuccessfulPoll() - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" + expected := "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 3, SF(Confidence = [2], Finalized = true, SL(Preference = 1)))" require.Equal(expected, sb.String()) } @@ -110,37 +116,39 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { blue := 0 red := 1 + alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) sb.RecordUnsuccessfulPoll() require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) sb.RecordUnsuccessfulPoll() require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(blue, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 1, PreferenceStrength[0] = 2, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" + expected := "SB(Preference = 1, PreferenceStrength[0] = 2, PreferenceStrength[1] = 2, SF(Confidence = [2], Finalized = true, SL(Preference = 0)))" require.Equal(expected, sb.String()) } @@ -150,26 +158,28 @@ func TestBinarySnowballLockColor(t *testing.T) { red := 0 blue := 1 + alphaPreference, alphaConfidence := 1, 2 beta := 1 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newBinarySnowball(beta, red) + sb := newBinarySnowball(alphaPreference, terminationConditions, red) - sb.RecordSuccessfulPoll(red) + sb.RecordPoll(alphaConfidence, red) require.Equal(red, sb.Preference()) require.True(sb.Finalized()) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(red, sb.Preference()) require.True(sb.Finalized()) - sb.RecordPollPreference(blue) - sb.RecordSuccessfulPoll(blue) + sb.RecordPoll(alphaPreference, blue) + sb.RecordPoll(alphaConfidence, blue) require.Equal(red, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = 1, PreferenceStrength[0] = 1, PreferenceStrength[1] = 3, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" + expected := "SB(Preference = 1, PreferenceStrength[0] = 1, PreferenceStrength[1] = 3, SF(Confidence = [1], Finalized = true, SL(Preference = 0)))" require.Equal(expected, sb.String()) } diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go index 5f897af88430..6dc856a7bc7c 100644 --- a/snow/consensus/snowball/binary_snowflake.go +++ b/snow/consensus/snowball/binary_snowflake.go @@ -7,59 +7,81 @@ import "fmt" var _ Binary = (*binarySnowflake)(nil) -func newBinarySnowflake(beta, choice int) binarySnowflake { +func newBinarySnowflake(alphaPreference int, terminationConditions []terminationCondition, choice int) binarySnowflake { return binarySnowflake{ - binarySlush: newBinarySlush(choice), - beta: beta, + binarySlush: newBinarySlush(choice), + alphaPreference: alphaPreference, + terminationConditions: terminationConditions, + confidence: make([]int, len(terminationConditions)), } } // binarySnowflake is the implementation of a binary snowflake instance +// Invariant: +// len(terminationConditions) == len(confidence) +// terminationConditions[i].alphaConfidence < terminationConditions[i+1].alphaConfidence +// terminationConditions[i].beta <= terminationConditions[i+1].beta +// confidence[i] >= confidence[i+1] (except after finalizing due to early termination) type binarySnowflake struct { // wrap the binary slush logic binarySlush - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int + // alphaPreference is the threshold required to update the preference + alphaPreference int - // beta is the number of consecutive successful queries required for - // finalization. - beta int + // terminationConditions gives the ascending ordered list of alphaConfidence values + // required to increment the corresponding confidence counter. + // The corresponding beta values give the threshold required to finalize this instance. + terminationConditions []terminationCondition + + // confidence is the number of consecutive succcessful polls for a given + // alphaConfidence threshold. + // This instance finalizes when confidence[i] >= terminationConditions[i].beta for any i + confidence []int // finalized prevents the state from changing after the required number of // consecutive polls has been reached finalized bool } -func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { +func (sf *binarySnowflake) RecordPoll(count, choice int) { if sf.finalized { return // This instance is already decided. } - if preference := sf.Preference(); preference == choice { - sf.confidence++ - } else { - // confidence is set to 1 because there has already been 1 successful - // poll, namely this poll. - sf.confidence = 1 + if count < sf.alphaPreference { + sf.RecordUnsuccessfulPoll() + return } - sf.finalized = sf.confidence >= sf.beta + // If I am changing my preference, reset confidence counters + // before recording a successful poll on the slush instance. + if choice != sf.Preference() { + clear(sf.confidence) + } sf.binarySlush.RecordSuccessfulPoll(choice) -} -func (sf *binarySnowflake) RecordPollPreference(choice int) { - if sf.finalized { - return // This instance is already decided. + for i, terminationCondition := range sf.terminationConditions { + // If I did not reach this alpha threshold, I did not + // reach any more alpha thresholds. + // Clear the remaining confidence counters. + if count < terminationCondition.alphaConfidence { + clear(sf.confidence[i:]) + return + } + + // I reached this alpha threshold, increment the confidence counter + // and check if I can finalize. + sf.confidence[i]++ + if sf.confidence[i] >= terminationCondition.beta { + sf.finalized = true + return + } } - - sf.confidence = 0 - sf.binarySlush.RecordSuccessfulPoll(choice) } func (sf *binarySnowflake) RecordUnsuccessfulPoll() { - sf.confidence = 0 + clear(sf.confidence) } func (sf *binarySnowflake) Finalized() bool { @@ -67,7 +89,7 @@ func (sf *binarySnowflake) Finalized() bool { } func (sf *binarySnowflake) String() string { - return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", + return fmt.Sprintf("SF(Confidence = %v, Finalized = %v, %s)", sf.confidence, sf.finalized, &sf.binarySlush) diff --git a/snow/consensus/snowball/binary_snowflake_test.go b/snow/consensus/snowball/binary_snowflake_test.go index 085b94c5f450..ca2347aa086d 100644 --- a/snow/consensus/snowball/binary_snowflake_test.go +++ b/snow/consensus/snowball/binary_snowflake_test.go @@ -15,37 +15,80 @@ func TestBinarySnowflake(t *testing.T) { blue := 0 red := 1 + alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newBinarySnowflake(beta, red) + sf := newBinarySnowflake(alphaPreference, terminationConditions, red) require.Equal(red, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(blue) + sf.RecordPoll(alphaConfidence, blue) require.Equal(blue, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(red) + sf.RecordPoll(alphaConfidence, red) require.Equal(red, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(blue) + sf.RecordPoll(alphaConfidence, blue) require.Equal(blue, sf.Preference()) require.False(sf.Finalized()) - sf.RecordPollPreference(red) + sf.RecordPoll(alphaPreference, red) require.Equal(red, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(blue) + sf.RecordPoll(alphaConfidence, blue) require.Equal(blue, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(blue) + sf.RecordPoll(alphaConfidence, blue) require.Equal(blue, sf.Preference()) require.True(sf.Finalized()) } + +type binarySnowflakeTest struct { + require *require.Assertions + + binarySnowflake +} + +func newBinarySnowflakeTest(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[int] { + require := require.New(t) + + return &binarySnowflakeTest{ + require: require, + binarySnowflake: newBinarySnowflake(alphaPreference, terminationConditions, 0), + } +} + +func (sf *binarySnowflakeTest) RecordPoll(count int, choice int) { + sf.binarySnowflake.RecordPoll(count, choice) +} + +func (sf *binarySnowflakeTest) AssertEqual(expectedConfidences []int, expectedFinalized bool, expectedPreference int) { + sf.require.Equal(expectedPreference, sf.Preference()) + sf.require.Equal(expectedConfidences, sf.binarySnowflake.confidence) + sf.require.Equal(expectedFinalized, sf.Finalized()) +} + +func TestBinarySnowflakeErrorDrivenSingleChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeSingleChoiceSuite[int]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newBinarySnowflakeTest, 0) + }) + } +} + +func TestBinarySnowflakeErrorDrivenMultiChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeMultiChoiceSuite[int]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newBinarySnowflakeTest, 0, 1) + }) + } +} diff --git a/snow/consensus/snowball/consensus.go b/snow/consensus/snowball/consensus.go index 82d57b749f26..19df9e75fa9a 100644 --- a/snow/consensus/snowball/consensus.go +++ b/snow/consensus/snowball/consensus.go @@ -47,10 +47,9 @@ type Factory interface { } // Nnary is a snow instance deciding between an unbounded number of values. -// The caller samples k nodes and then calls -// 1. RecordSuccessfulPoll if choice collects >= alphaConfidence votes -// 2. RecordPollPreference if choice collects >= alphaPreference votes -// 3. RecordUnsuccessfulPoll otherwise +// The caller samples k nodes and calls RecordPoll with the result. +// RecordUnsuccessfulPoll resets the confidence counters when one or +// more consecutive polls fail to reach alphaPreference votes. type Nnary interface { fmt.Stringer @@ -60,14 +59,8 @@ type Nnary interface { // Returns the currently preferred choice to be finalized Preference() ids.ID - // RecordSuccessfulPoll records a successful poll towards finalizing the - // specified choice. Assumes the choice was previously added. - RecordSuccessfulPoll(choice ids.ID) - - // RecordPollPreference records a poll that preferred the specified choice - // but did not contribute towards finalizing the specified choice. Assumes - // the choice was previously added. - RecordPollPreference(choice ids.ID) + // RecordPoll records the results of a network poll + RecordPoll(count int, choice ids.ID) // RecordUnsuccessfulPoll resets the snowflake counter of this instance RecordUnsuccessfulPoll() @@ -77,23 +70,17 @@ type Nnary interface { } // Binary is a snow instance deciding between two values. -// The caller samples k nodes and then calls -// 1. RecordSuccessfulPoll if choice collects >= alphaConfidence votes -// 2. RecordPollPreference if choice collects >= alphaPreference votes -// 3. RecordUnsuccessfulPoll otherwise +// The caller samples k nodes and calls RecordPoll with the result. +// RecordUnsuccessfulPoll resets the confidence counters when one or +// more consecutive polls fail to reach alphaPreference votes. type Binary interface { fmt.Stringer // Returns the currently preferred choice to be finalized Preference() int - // RecordSuccessfulPoll records a successful poll towards finalizing the - // specified choice - RecordSuccessfulPoll(choice int) - - // RecordPollPreference records a poll that preferred the specified choice - // but did not contribute towards finalizing the specified choice - RecordPollPreference(choice int) + // RecordPoll records the results of a network poll + RecordPoll(count, choice int) // RecordUnsuccessfulPoll resets the snowflake counter of this instance RecordUnsuccessfulPoll() @@ -103,20 +90,14 @@ type Binary interface { } // Unary is a snow instance deciding on one value. -// The caller samples k nodes and then calls -// 1. RecordSuccessfulPoll if choice collects >= alphaConfidence votes -// 2. RecordPollPreference if choice collects >= alphaPreference votes -// 3. RecordUnsuccessfulPoll otherwise +// The caller samples k nodes and calls RecordPoll with the result. +// RecordUnsuccessfulPoll resets the confidence counters when one or +// more consecutive polls fail to reach alphaPreference votes. type Unary interface { fmt.Stringer - // RecordSuccessfulPoll records a successful poll that reaches an alpha - // confidence threshold. - RecordSuccessfulPoll() - - // RecordPollPreference records a poll that receives an alpha preference - // threshold, but not an alpha confidence threshold. - RecordPollPreference() + // RecordPoll records the results of a network poll + RecordPoll(count int) // RecordUnsuccessfulPoll resets the snowflake counter of this instance RecordUnsuccessfulPoll() @@ -124,9 +105,8 @@ type Unary interface { // Return whether a choice has been finalized Finalized() bool - // Returns a new binary snowball instance with the agreement parameters - // transferred. Takes in the new beta value and the original choice - Extend(beta, originalPreference int) Binary + // Returns a new binary snowball instance with the original choice. + Extend(originalPreference int) Binary // Returns a new unary snowflake instance with the same state Clone() Unary diff --git a/snow/consensus/snowball/consensus_performance_test.go b/snow/consensus/snowball/consensus_performance_test.go index 776e11fa7172..dbd3d95e6730 100644 --- a/snow/consensus/snowball/consensus_performance_test.go +++ b/snow/consensus/snowball/consensus_performance_test.go @@ -22,8 +22,7 @@ func TestDualAlphaOptimization(t *testing.T) { K: 20, AlphaPreference: 15, AlphaConfidence: 15, - BetaVirtuous: 15, - BetaRogue: 20, + Beta: 20, } seed uint64 = 0 source = prng.NewMT19937() diff --git a/snow/consensus/snowball/factory.go b/snow/consensus/snowball/factory.go index 1de693b46d1f..e9ae98180e6e 100644 --- a/snow/consensus/snowball/factory.go +++ b/snow/consensus/snowball/factory.go @@ -13,23 +13,23 @@ var ( type snowballFactory struct{} func (snowballFactory) NewNnary(params Parameters, choice ids.ID) Nnary { - sb := newNnarySnowball(params.BetaVirtuous, params.BetaRogue, choice) + sb := newNnarySnowball(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta), choice) return &sb } func (snowballFactory) NewUnary(params Parameters) Unary { - sb := newUnarySnowball(params.BetaVirtuous) + sb := newUnarySnowball(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta)) return &sb } type snowflakeFactory struct{} func (snowflakeFactory) NewNnary(params Parameters, choice ids.ID) Nnary { - sf := newNnarySnowflake(params.BetaVirtuous, params.BetaRogue, choice) + sf := newNnarySnowflake(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta), choice) return &sf } func (snowflakeFactory) NewUnary(params Parameters) Unary { - sf := newUnarySnowflake(params.BetaVirtuous) + sf := newUnarySnowflake(params.AlphaPreference, newSingleTerminationCondition(params.AlphaConfidence, params.Beta)) return &sf } diff --git a/snow/consensus/snowball/flat.go b/snow/consensus/snowball/flat.go index 01b5975cba0a..3d159ca7a352 100644 --- a/snow/consensus/snowball/flat.go +++ b/snow/consensus/snowball/flat.go @@ -28,17 +28,6 @@ type Flat struct { func (f *Flat) RecordPoll(votes bag.Bag[ids.ID]) bool { pollMode, numVotes := votes.Mode() - switch { - // AlphaConfidence is guaranteed to be >= AlphaPreference, so we must check - // if the poll had enough votes to increase the confidence first. - case numVotes >= f.params.AlphaConfidence: - f.RecordSuccessfulPoll(pollMode) - return true - case numVotes >= f.params.AlphaPreference: - f.RecordPollPreference(pollMode) - return true - default: - f.RecordUnsuccessfulPoll() - return false - } + f.Nnary.RecordPoll(numVotes, pollMode) + return numVotes >= f.params.AlphaPreference } diff --git a/snow/consensus/snowball/flat_test.go b/snow/consensus/snowball/flat_test.go index 3f6eab8d5fdf..51b7390bb88d 100644 --- a/snow/consensus/snowball/flat_test.go +++ b/snow/consensus/snowball/flat_test.go @@ -18,8 +18,7 @@ func TestFlat(t *testing.T) { K: 3, AlphaPreference: 2, AlphaConfidence: 3, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } f := NewFlat(SnowballFactory, params, Red) f.Add(Green) @@ -51,12 +50,12 @@ func TestFlat(t *testing.T) { require.True(f.RecordPoll(threeGreen)) require.Equal(Green, f.Preference()) - require.False(f.Finalized()) // Not finalized before BetaRogue rounds + require.False(f.Finalized()) // Not finalized before Beta rounds require.True(f.RecordPoll(threeGreen)) require.Equal(Green, f.Preference()) require.True(f.Finalized()) - expected := "SB(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w, PreferenceStrength = 4, SF(Confidence = 2, Finalized = true, SL(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w)))" + expected := "SB(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w, PreferenceStrength = 4, SF(Confidence = [2], Finalized = true, SL(Preference = 2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w)))" require.Equal(expected, f.String()) } diff --git a/snow/consensus/snowball/nnary_snowball.go b/snow/consensus/snowball/nnary_snowball.go index 77ee48440cbc..dfecaf3450fe 100644 --- a/snow/consensus/snowball/nnary_snowball.go +++ b/snow/consensus/snowball/nnary_snowball.go @@ -11,9 +11,9 @@ import ( var _ Nnary = (*nnarySnowball)(nil) -func newNnarySnowball(betaVirtuous, betaRogue int, choice ids.ID) nnarySnowball { +func newNnarySnowball(alphaPreference int, terminationConditions []terminationCondition, choice ids.ID) nnarySnowball { return nnarySnowball{ - nnarySnowflake: newNnarySnowflake(betaVirtuous, betaRogue, choice), + nnarySnowflake: newNnarySnowflake(alphaPreference, terminationConditions, choice), preference: choice, preferenceStrength: make(map[ids.ID]int), } @@ -47,27 +47,20 @@ func (sb *nnarySnowball) Preference() ids.ID { return sb.preference } -func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { - sb.increasePreferenceStrength(choice) - sb.nnarySnowflake.RecordSuccessfulPoll(choice) -} +func (sb *nnarySnowball) RecordPoll(count int, choice ids.ID) { + if count >= sb.alphaPreference { + preferenceStrength := sb.preferenceStrength[choice] + 1 + sb.preferenceStrength[choice] = preferenceStrength -func (sb *nnarySnowball) RecordPollPreference(choice ids.ID) { - sb.increasePreferenceStrength(choice) - sb.nnarySnowflake.RecordPollPreference(choice) + if preferenceStrength > sb.maxPreferenceStrength { + sb.preference = choice + sb.maxPreferenceStrength = preferenceStrength + } + } + sb.nnarySnowflake.RecordPoll(count, choice) } func (sb *nnarySnowball) String() string { return fmt.Sprintf("SB(Preference = %s, PreferenceStrength = %d, %s)", sb.preference, sb.maxPreferenceStrength, &sb.nnarySnowflake) } - -func (sb *nnarySnowball) increasePreferenceStrength(choice ids.ID) { - preferenceStrength := sb.preferenceStrength[choice] + 1 - sb.preferenceStrength[choice] = preferenceStrength - - if preferenceStrength > sb.maxPreferenceStrength { - sb.preference = choice - sb.maxPreferenceStrength = preferenceStrength - } -} diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go index 18bea5eef65e..8a5e66143db2 100644 --- a/snow/consensus/snowball/nnary_snowball_test.go +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -12,41 +12,42 @@ import ( func TestNnarySnowball(t *testing.T) { require := require.New(t) - betaVirtuous := 2 - betaRogue := 2 + alphaPreference, alphaConfidence := 1, 2 + beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) sb.Add(Blue) sb.Add(Green) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordPollPreference(Red) + sb.RecordPoll(alphaPreference, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordPollPreference(Blue) + sb.RecordPoll(alphaPreference, Blue) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sb.Preference()) require.True(sb.Finalized()) } @@ -54,15 +55,16 @@ func TestNnarySnowball(t *testing.T) { func TestVirtuousNnarySnowball(t *testing.T) { require := require.New(t) - betaVirtuous := 1 - betaRogue := 2 + alphaPreference, alphaConfidence := 1, 2 + beta := 1 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Red, sb.Preference()) require.True(sb.Finalized()) } @@ -70,36 +72,37 @@ func TestVirtuousNnarySnowball(t *testing.T) { func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { require := require.New(t) - betaVirtuous := 2 - betaRogue := 2 + alphaPreference, alphaConfidence := 1, 2 + beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) sb.Add(Blue) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sb.Preference()) require.False(sb.Finalized()) sb.RecordUnsuccessfulPoll() - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sb.Preference()) require.True(sb.Finalized()) - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, PreferenceStrength = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, PreferenceStrength = 3, SF(Confidence = [2], Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" require.Equal(expected, sb.String()) for i := 0; i < 4; i++ { - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Blue, sb.Preference()) require.True(sb.Finalized()) @@ -109,20 +112,21 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { require := require.New(t) - betaVirtuous := 2 - betaRogue := 2 + alphaPreference, alphaConfidence := 1, 2 + beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowball(betaVirtuous, betaRogue, Red) + sb := newNnarySnowball(alphaPreference, terminationConditions, Red) sb.Add(Blue) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Blue) + sb.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sb.nnarySnowflake.Preference()) - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Blue, sb.Preference()) require.Equal(Red, sb.nnarySnowflake.Preference()) diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index 897b2bb1e3a1..3fe9f517c0e7 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -11,73 +11,84 @@ import ( var _ Nnary = (*nnarySnowflake)(nil) -func newNnarySnowflake(betaVirtuous, betaRogue int, choice ids.ID) nnarySnowflake { +func newNnarySnowflake(alphaPreference int, terminationConditions []terminationCondition, choice ids.ID) nnarySnowflake { return nnarySnowflake{ - nnarySlush: newNnarySlush(choice), - betaVirtuous: betaVirtuous, - betaRogue: betaRogue, + nnarySlush: newNnarySlush(choice), + alphaPreference: alphaPreference, + terminationConditions: terminationConditions, + confidence: make([]int, len(terminationConditions)), } } // nnarySnowflake is the implementation of a snowflake instance with an // unbounded number of choices +// Invariant: +// len(terminationConditions) == len(confidence) +// terminationConditions[i].alphaConfidence < terminationConditions[i+1].alphaConfidence +// terminationConditions[i].beta <= terminationConditions[i+1].beta +// confidence[i] >= confidence[i+1] (except after finalizing due to early termination) type nnarySnowflake struct { // wrap the n-nary slush logic nnarySlush - // betaVirtuous is the number of consecutive successful queries required for - // finalization on a virtuous instance. - betaVirtuous int + // alphaPreference is the threshold required to update the preference + alphaPreference int - // betaRogue is the number of consecutive successful queries required for - // finalization on a rogue instance. - betaRogue int + // terminationConditions gives the ascending ordered list of alphaConfidence values + // required to increment the corresponding confidence counter. + // The corresponding beta values give the threshold required to finalize this instance. + terminationConditions []terminationCondition - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int - - // rogue tracks if this instance has multiple choices or only one - rogue bool + // confidence is the number of consecutive succcessful polls for a given + // alphaConfidence threshold. + // This instance finalizes when confidence[i] >= terminationConditions[i].beta for any i + confidence []int // finalized prevents the state from changing after the required number of // consecutive polls has been reached finalized bool } -func (sf *nnarySnowflake) Add(choice ids.ID) { - sf.rogue = sf.rogue || choice != sf.preference -} +func (*nnarySnowflake) Add(_ ids.ID) {} -func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { +func (sf *nnarySnowflake) RecordPoll(count int, choice ids.ID) { if sf.finalized { return // This instance is already decided. } - if preference := sf.Preference(); preference == choice { - sf.confidence++ - } else { - // confidence is set to 1 because there has already been 1 successful - // poll, namely this poll. - sf.confidence = 1 + if count < sf.alphaPreference { + sf.RecordUnsuccessfulPoll() + return } - sf.finalized = (!sf.rogue && sf.confidence >= sf.betaVirtuous) || - sf.confidence >= sf.betaRogue + // If I am changing my preference, reset confidence counters + // before recording a successful poll on the slush instance. + if choice != sf.Preference() { + clear(sf.confidence) + } sf.nnarySlush.RecordSuccessfulPoll(choice) -} -func (sf *nnarySnowflake) RecordPollPreference(choice ids.ID) { - if sf.finalized { - return // This instance is already decided. + for i, terminationCondition := range sf.terminationConditions { + // If I did not reach this alpha threshold, I did not + // reach any more alpha thresholds. + // Clear the remaining confidence counters. + if count < terminationCondition.alphaConfidence { + clear(sf.confidence[i:]) + return + } + + // I reached this alpha threshold, increment the confidence counter + // and check if I can finalize. + sf.confidence[i]++ + if sf.confidence[i] >= terminationCondition.beta { + sf.finalized = true + return + } } - - sf.confidence = 0 - sf.nnarySlush.RecordSuccessfulPoll(choice) } func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { - sf.confidence = 0 + clear(sf.confidence) } func (sf *nnarySnowflake) Finalized() bool { @@ -85,7 +96,7 @@ func (sf *nnarySnowflake) Finalized() bool { } func (sf *nnarySnowflake) String() string { - return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", + return fmt.Sprintf("SF(Confidence = %v, Finalized = %v, %s)", sf.confidence, sf.finalized, &sf.nnarySlush) diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go index 5df8c2966335..ad090ee0f0df 100644 --- a/snow/consensus/snowball/nnary_snowflake_test.go +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -7,42 +7,45 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" ) func TestNnarySnowflake(t *testing.T) { require := require.New(t) - betaVirtuous := 2 - betaRogue := 2 + alphaPreference, alphaConfidence := 1, 2 + beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newNnarySnowflake(betaVirtuous, betaRogue, Red) + sf := newNnarySnowflake(alphaPreference, terminationConditions, Red) sf.Add(Blue) sf.Add(Green) require.Equal(Red, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(Blue) + sf.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sf.Preference()) require.False(sf.Finalized()) - sf.RecordPollPreference(Red) + sf.RecordPoll(alphaPreference, Red) require.Equal(Red, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(Red) + sf.RecordPoll(alphaConfidence, Red) require.Equal(Red, sf.Preference()) require.False(sf.Finalized()) - sf.RecordSuccessfulPoll(Red) + sf.RecordPoll(alphaConfidence, Red) require.Equal(Red, sf.Preference()) require.True(sf.Finalized()) - sf.RecordPollPreference(Blue) + sf.RecordPoll(alphaPreference, Blue) require.Equal(Red, sf.Preference()) require.True(sf.Finalized()) - sf.RecordSuccessfulPoll(Blue) + sf.RecordPoll(alphaConfidence, Blue) require.Equal(Red, sf.Preference()) require.True(sf.Finalized()) } @@ -50,10 +53,11 @@ func TestNnarySnowflake(t *testing.T) { func TestNnarySnowflakeConfidenceReset(t *testing.T) { require := require.New(t) - betaVirtuous := 4 - betaRogue := 4 + alphaPreference, alphaConfidence := 1, 2 + beta := 4 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newNnarySnowflake(betaVirtuous, betaRogue, Red) + sf := newNnarySnowflake(alphaPreference, terminationConditions, Red) sf.Add(Blue) sf.Add(Green) @@ -61,21 +65,21 @@ func TestNnarySnowflakeConfidenceReset(t *testing.T) { require.False(sf.Finalized()) // Increase Blue's confidence without finalizing - for i := 0; i < betaRogue-1; i++ { - sf.RecordSuccessfulPoll(Blue) + for i := 0; i < beta-1; i++ { + sf.RecordPoll(alphaConfidence, Blue) require.Equal(Blue, sf.Preference()) require.False(sf.Finalized()) } // Increase Red's confidence without finalizing - for i := 0; i < betaRogue-1; i++ { - sf.RecordSuccessfulPoll(Red) + for i := 0; i < beta-1; i++ { + sf.RecordPoll(alphaConfidence, Red) require.Equal(Red, sf.Preference()) require.False(sf.Finalized()) } // One more round of voting for Red should accept Red - sf.RecordSuccessfulPoll(Red) + sf.RecordPoll(alphaConfidence, Red) require.Equal(Red, sf.Preference()) require.True(sf.Finalized()) } @@ -83,48 +87,60 @@ func TestNnarySnowflakeConfidenceReset(t *testing.T) { func TestVirtuousNnarySnowflake(t *testing.T) { require := require.New(t) - betaVirtuous := 2 - betaRogue := 3 + alphaPreference, alphaConfidence := 1, 2 + beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newNnarySnowflake(betaVirtuous, betaRogue, Red) + sb := newNnarySnowflake(alphaPreference, terminationConditions, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) - sb.RecordSuccessfulPoll(Red) + sb.RecordPoll(alphaConfidence, Red) require.Equal(Red, sb.Preference()) require.True(sb.Finalized()) } -func TestRogueNnarySnowflake(t *testing.T) { - require := require.New(t) - - betaVirtuous := 1 - betaRogue := 2 +type nnarySnowflakeTest struct { + require *require.Assertions - sb := newNnarySnowflake(betaVirtuous, betaRogue, Red) - require.False(sb.rogue) + nnarySnowflake +} - sb.Add(Red) - require.False(sb.rogue) +func newNnarySnowflakeTest(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[ids.ID] { + require := require.New(t) - sb.Add(Blue) - require.True(sb.rogue) + return &nnarySnowflakeTest{ + require: require, + nnarySnowflake: newNnarySnowflake(alphaPreference, terminationConditions, Red), + } +} - sb.Add(Red) - require.True(sb.rogue) +func (sf *nnarySnowflakeTest) RecordPoll(count int, choice ids.ID) { + sf.nnarySnowflake.RecordPoll(count, choice) +} - require.Equal(Red, sb.Preference()) - require.False(sb.Finalized()) +func (sf *nnarySnowflakeTest) AssertEqual(expectedConfidences []int, expectedFinalized bool, expectedPreference ids.ID) { + sf.require.Equal(expectedPreference, sf.Preference()) + sf.require.Equal(expectedConfidences, sf.nnarySnowflake.confidence) + sf.require.Equal(expectedFinalized, sf.Finalized()) +} - sb.RecordSuccessfulPoll(Red) - require.Equal(Red, sb.Preference()) - require.False(sb.Finalized()) +func TestNnarySnowflakeErrorDrivenSingleChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeSingleChoiceSuite[ids.ID]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newNnarySnowflakeTest, Red) + }) + } +} - sb.RecordSuccessfulPoll(Red) - require.Equal(Red, sb.Preference()) - require.True(sb.Finalized()) +func TestNnarySnowflakeErrorDrivenMultiChoice(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeMultiChoiceSuite[ids.ID]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newNnarySnowflakeTest, Red, Green) + }) + } } diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go index bf458fbf9f40..3e2ba03f2081 100644 --- a/snow/consensus/snowball/parameters.go +++ b/snow/consensus/snowball/parameters.go @@ -39,8 +39,7 @@ var ( K: 20, AlphaPreference: 15, AlphaConfidence: 15, - BetaVirtuous: 20, - BetaRogue: 20, + Beta: 20, ConcurrentRepolls: 4, OptimalProcessing: 10, MaxOutstandingItems: 256, @@ -61,12 +60,9 @@ type Parameters struct { AlphaPreference int `json:"alphaPreference" yaml:"alphaPreference"` // AlphaConfidence is the vote threshold to increase your confidence. AlphaConfidence int `json:"alphaConfidence" yaml:"alphaConfidence"` - // BetaVirtuous is the number of consecutive successful queries required for - // finalization on a virtuous instance. - BetaVirtuous int `json:"betaVirtuous" yaml:"betaVirtuous"` - // BetaRogue is the number of consecutive successful queries required for - // finalization on a rogue instance. - BetaRogue int `json:"betaRogue" yaml:"betaRogue"` + // Beta is the number of consecutive successful queries required for + // finalization. + Beta int `json:"beta" yaml:"beta"` // ConcurrentRepolls is the number of outstanding polls the engine will // target to have while there is something processing. ConcurrentRepolls int `json:"concurrentRepolls" yaml:"concurrentRepolls"` @@ -87,8 +83,7 @@ type Parameters struct { // An initialization is valid if the following conditions are met: // // - K/2 < AlphaPreference <= AlphaConfidence <= K -// - 0 < BetaVirtuous <= BetaRogue -// - 0 < ConcurrentRepolls <= BetaRogue +// - 0 < ConcurrentRepolls <= Beta // - 0 < OptimalProcessing // - 0 < MaxOutstandingItems // - 0 < MaxItemProcessingTime @@ -103,16 +98,12 @@ func (p Parameters) Verify() error { return fmt.Errorf("%w: alphaPreference = %d, alphaConfidence = %d: fails the condition that: alphaPreference <= alphaConfidence", ErrParametersInvalid, p.AlphaPreference, p.AlphaConfidence) case p.K < p.AlphaConfidence: return fmt.Errorf("%w: k = %d, alphaConfidence = %d: fails the condition that: alphaConfidence <= k", ErrParametersInvalid, p.K, p.AlphaConfidence) - case p.BetaVirtuous <= 0: - return fmt.Errorf("%w: betaVirtuous = %d: fails the condition that: 0 < betaVirtuous", ErrParametersInvalid, p.BetaVirtuous) - case p.BetaRogue == 3 && p.BetaVirtuous == 28: - return fmt.Errorf("%w: betaVirtuous = %d, betaRogue = %d: fails the condition that: betaVirtuous <= betaRogue\n%s", ErrParametersInvalid, p.BetaVirtuous, p.BetaRogue, errMsg) - case p.BetaRogue < p.BetaVirtuous: - return fmt.Errorf("%w: betaVirtuous = %d, betaRogue = %d: fails the condition that: betaVirtuous <= betaRogue", ErrParametersInvalid, p.BetaVirtuous, p.BetaRogue) + case p.AlphaConfidence == 3 && p.AlphaPreference == 28: + return fmt.Errorf("%w: alphaConfidence = %d, alphaPreference = %d: fails the condition that: alphaPreference <= alphaConfidence\n%s", ErrParametersInvalid, p.AlphaConfidence, p.AlphaPreference, errMsg) case p.ConcurrentRepolls <= 0: return fmt.Errorf("%w: concurrentRepolls = %d: fails the condition that: 0 < concurrentRepolls", ErrParametersInvalid, p.ConcurrentRepolls) - case p.ConcurrentRepolls > p.BetaRogue: - return fmt.Errorf("%w: concurrentRepolls = %d, betaRogue = %d: fails the condition that: concurrentRepolls <= betaRogue", ErrParametersInvalid, p.ConcurrentRepolls, p.BetaRogue) + case p.ConcurrentRepolls > p.Beta: + return fmt.Errorf("%w: concurrentRepolls = %d, beta = %d: fails the condition that: concurrentRepolls <= beta", ErrParametersInvalid, p.ConcurrentRepolls, p.Beta) case p.OptimalProcessing <= 0: return fmt.Errorf("%w: optimalProcessing = %d: fails the condition that: 0 < optimalProcessing", ErrParametersInvalid, p.OptimalProcessing) case p.MaxOutstandingItems <= 0: @@ -131,3 +122,17 @@ func (p Parameters) MinPercentConnectedHealthy() float64 { alphaRatio := float64(p.AlphaConfidence) / float64(p.K) return alphaRatio*(1-MinPercentConnectedBuffer) + MinPercentConnectedBuffer } + +type terminationCondition struct { + alphaConfidence int + beta int +} + +func newSingleTerminationCondition(alphaConfidence int, beta int) []terminationCondition { + return []terminationCondition{ + { + alphaConfidence: alphaConfidence, + beta: beta, + }, + } +} diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index 525001fd535f..60a9612ef6ee 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -21,8 +21,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -36,8 +35,7 @@ func TestParametersVerify(t *testing.T) { K: 0, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -51,8 +49,7 @@ func TestParametersVerify(t *testing.T) { K: 2, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -66,8 +63,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 0, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -81,8 +77,7 @@ func TestParametersVerify(t *testing.T) { K: 3, AlphaPreference: 3, AlphaConfidence: 2, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -91,13 +86,12 @@ func TestParametersVerify(t *testing.T) { expectedError: ErrParametersInvalid, }, { - name: "invalid BetaVirtuous", + name: "invalid beta", params: Parameters{ K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 0, - BetaRogue: 1, + Beta: 0, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -106,13 +100,12 @@ func TestParametersVerify(t *testing.T) { expectedError: ErrParametersInvalid, }, { - name: "first half fun BetaRogue", + name: "first half fun alphaConfidence", params: Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 28, - BetaRogue: 30, + K: 30, + AlphaPreference: 28, + AlphaConfidence: 30, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -121,13 +114,12 @@ func TestParametersVerify(t *testing.T) { expectedError: nil, }, { - name: "second half fun BetaRogue", + name: "second half fun alphaConfidence", params: Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 3, + K: 3, + AlphaPreference: 2, + AlphaConfidence: 3, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -136,28 +128,12 @@ func TestParametersVerify(t *testing.T) { expectedError: nil, }, { - name: "fun invalid BetaRogue", + name: "fun invalid alphaConfidence", params: Parameters{ K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 28, - BetaRogue: 3, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - expectedError: ErrParametersInvalid, - }, - { - name: "invalid BetaRogue", - params: Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 1, + AlphaPreference: 28, + AlphaConfidence: 3, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -171,8 +147,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 0, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -186,8 +161,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 2, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -201,8 +175,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 0, MaxOutstandingItems: 1, @@ -216,8 +189,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 0, @@ -231,8 +203,7 @@ func TestParametersVerify(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, diff --git a/snow/consensus/snowball/test_snowflake.go b/snow/consensus/snowball/test_snowflake.go new file mode 100644 index 000000000000..78ce95b27e3d --- /dev/null +++ b/snow/consensus/snowball/test_snowflake.go @@ -0,0 +1,145 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import "testing" + +const alphaPreference = 3 + +var terminationConditions = []terminationCondition{ + { + alphaConfidence: 3, + beta: 4, + }, + { + alphaConfidence: 4, + beta: 3, + }, + { + alphaConfidence: 5, + beta: 2, + }, +} + +type snowflakeTestConstructor[T comparable] func(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[T] + +type snowflakeTest[T comparable] interface { + RecordPoll(count int, optionalMode T) + RecordUnsuccessfulPoll() + AssertEqual(expectedConfidences []int, expectedFinalized bool, expectedPreference T) +} + +func executeErrorDrivenTerminatesInBetaPolls[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice T) { + for i, terminationCondition := range terminationConditions { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + for poll := 0; poll < terminationCondition.beta; poll++ { + sfTest.RecordPoll(terminationCondition.alphaConfidence, choice) + + expectedConfidences := make([]int, len(terminationConditions)) + for j := 0; j < i+1; j++ { + expectedConfidences[j] = poll + 1 + } + sfTest.AssertEqual(expectedConfidences, poll+1 >= terminationCondition.beta, choice) + } + } +} + +func executeErrorDrivenReset[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice T) { + for i, terminationCondition := range terminationConditions { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + // Accumulate confidence up to 1 less than beta, reset, and confirm + // expected behavior from fresh state. + for poll := 0; poll < terminationCondition.beta-1; poll++ { + sfTest.RecordPoll(terminationCondition.alphaConfidence, choice) + } + sfTest.RecordUnsuccessfulPoll() + zeroConfidence := make([]int, len(terminationConditions)) + sfTest.AssertEqual(zeroConfidence, false, choice) + + for poll := 0; poll < terminationCondition.beta; poll++ { + sfTest.RecordPoll(terminationCondition.alphaConfidence, choice) + + expectedConfidences := make([]int, len(terminationConditions)) + for j := 0; j < i+1; j++ { + expectedConfidences[j] = poll + 1 + } + sfTest.AssertEqual(expectedConfidences, poll+1 >= terminationCondition.beta, choice) + } + } +} + +func executeErrorDrivenResetHighestAlphaConfidence[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice T) { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + sfTest.RecordPoll(5, choice) + sfTest.AssertEqual([]int{1, 1, 1}, false, choice) + sfTest.RecordPoll(4, choice) + sfTest.AssertEqual([]int{2, 2, 0}, false, choice) + sfTest.RecordPoll(3, choice) + sfTest.AssertEqual([]int{3, 0, 0}, false, choice) + sfTest.RecordPoll(5, choice) + sfTest.AssertEqual([]int{4, 0, 0}, true, choice) +} + +type snowflakeTestSingleChoice[T comparable] struct { + name string + f func(*testing.T, snowflakeTestConstructor[T], T) +} + +func getErrorDrivenSnowflakeSingleChoiceSuite[T comparable]() []snowflakeTestSingleChoice[T] { + return []snowflakeTestSingleChoice[T]{ + { + name: "TerminateInBetaPolls", + f: executeErrorDrivenTerminatesInBetaPolls[T], + }, + { + name: "Reset", + f: executeErrorDrivenReset[T], + }, + { + name: "ResetHighestAlphaConfidence", + f: executeErrorDrivenResetHighestAlphaConfidence[T], + }, + } +} + +func executeErrorDrivenSwitchChoices[T comparable](t *testing.T, newSnowflakeTest snowflakeTestConstructor[T], choice0, choice1 T) { + sfTest := newSnowflakeTest(t, alphaPreference, terminationConditions) + + sfTest.RecordPoll(3, choice0) + sfTest.AssertEqual([]int{1, 0, 0}, false, choice0) + + sfTest.RecordPoll(2, choice1) + sfTest.AssertEqual([]int{0, 0, 0}, false, choice0) + + sfTest.RecordPoll(3, choice0) + sfTest.AssertEqual([]int{1, 0, 0}, false, choice0) + + sfTest.RecordPoll(0, choice0) + sfTest.AssertEqual([]int{0, 0, 0}, false, choice0) + + sfTest.RecordPoll(3, choice1) + sfTest.AssertEqual([]int{1, 0, 0}, false, choice1) + + sfTest.RecordPoll(5, choice1) + sfTest.AssertEqual([]int{2, 1, 1}, false, choice1) + sfTest.RecordPoll(5, choice1) + sfTest.AssertEqual([]int{3, 2, 2}, true, choice1) +} + +type snowflakeTestMultiChoice[T comparable] struct { + name string + f func(*testing.T, snowflakeTestConstructor[T], T, T) +} + +func getErrorDrivenSnowflakeMultiChoiceSuite[T comparable]() []snowflakeTestMultiChoice[T] { + return []snowflakeTestMultiChoice[T]{ + { + name: "SwitchChoices", + f: executeErrorDrivenSwitchChoices[T], + }, + } +} diff --git a/snow/consensus/snowball/tree.go b/snow/consensus/snowball/tree.go index 4b8d8cfe8dcb..c6773e30c54e 100644 --- a/snow/consensus/snowball/tree.go +++ b/snow/consensus/snowball/tree.go @@ -139,7 +139,7 @@ type node interface { } // unary is a node with either no children, or a single child. It handles the -// voting on a range of identical, virtuous, snow instances. +// voting on a range of identical, unary, snow instances. type unaryNode struct { // tree references the tree that contains this node tree *Tree @@ -350,7 +350,7 @@ func (u *unaryNode) Add(newChoice ids.ID) node { b := &binaryNode{ tree: u.tree, bit: index, - snow: u.snow.Extend(u.tree.params.BetaRogue, bit), + snow: u.snow.Extend(bit), shouldReset: [2]bool{u.shouldReset, u.shouldReset}, } b.preferences[bit] = u.preference @@ -416,22 +416,15 @@ func (u *unaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { u.shouldReset = true // Make sure my child is also reset correctly } - switch numVotes := votes.Len(); { - case numVotes >= u.tree.params.AlphaConfidence: - // I got enough votes to increase my confidence - u.snow.RecordSuccessfulPoll() - case numVotes >= u.tree.params.AlphaPreference: - // I got enough votes to update my preference, but not increase my - // confidence. - u.snow.RecordPollPreference() - default: - // I didn't get enough votes, I must reset and my child must reset as - // well + numVotes := votes.Len() + if numVotes < u.tree.params.AlphaPreference { u.snow.RecordUnsuccessfulPoll() u.shouldReset = true return u, false } + u.snow.RecordPoll(numVotes) + if u.child != nil { // We are guaranteed that u.commonPrefix will equal // u.child.DecidedPrefix(). Otherwise, there must have been a @@ -445,7 +438,6 @@ func (u *unaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { // If I'm now decided, return my child return newChild, true } - u.child = newChild // The child's preference may have changed u.preference = u.child.Preference() @@ -470,7 +462,7 @@ func (u *unaryNode) Printable() (string, []node) { } // binaryNode is a node with either no children, or two children. It handles the -// voting of a single, rogue, snow instance. +// voting of a single, binary, snow instance. type binaryNode struct { // tree references the tree that contains this node tree *Tree @@ -508,9 +500,7 @@ func (b *binaryNode) Add(id ids.ID) node { // If child is nil, then we are running an instance on the last bit. Finding // two hashes that are equal up to the last bit would be really cool though. // Regardless, the case is handled - if child != nil && - // + 1 is used because we already explicitly check the p.bit bit - ids.EqualSubset(b.bit+1, child.DecidedPrefix(), b.preferences[bit], id) { + if child != nil { b.children[bit] = child.Add(id) } // If child is nil, then the id has already been added to the tree, so @@ -541,36 +531,23 @@ func (b *binaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) b.shouldReset[1-bit] = true // They didn't get the threshold of votes prunedVotes := splitVotes[bit] - switch numVotes := prunedVotes.Len(); { - case numVotes >= b.tree.params.AlphaConfidence: - // I got enough votes to increase my confidence. - b.snow.RecordSuccessfulPoll(bit) - case numVotes >= b.tree.params.AlphaPreference: - // I got enough votes to update my preference, but not increase my - // confidence. - b.snow.RecordPollPreference(bit) - default: + numVotes := prunedVotes.Len() + if numVotes < b.tree.params.AlphaPreference { b.snow.RecordUnsuccessfulPoll() // The winning child didn't get enough votes either b.shouldReset[bit] = true return b, false } + b.snow.RecordPoll(numVotes, bit) + if child := b.children[bit]; child != nil { - // The votes are filtered to ensure that they are votes that should - // count for the child - decidedPrefix := child.DecidedPrefix() - filteredVotes := prunedVotes.Filter(func(id ids.ID) bool { - return ids.EqualSubset(b.bit+1, decidedPrefix, b.preferences[bit], id) - }) - - newChild, _ := child.RecordPoll(filteredVotes, b.shouldReset[bit]) + newChild, _ := child.RecordPoll(prunedVotes, b.shouldReset[bit]) if b.snow.Finalized() { // If we are decided here, that means we must have decided due // to this poll. Therefore, we must have decided on bit. return newChild, true } - b.children[bit] = newChild b.preferences[bit] = newChild.Preference() } b.shouldReset[bit] = false // We passed the reset down diff --git a/snow/consensus/snowball/tree_test.go b/snow/consensus/snowball/tree_test.go index 1d337afdda6e..fc9c8944b768 100644 --- a/snow/consensus/snowball/tree_test.go +++ b/snow/consensus/snowball/tree_test.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/utils/bag" ) -const initialUnaryDescription = "SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" +const initialUnaryDescription = "SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 256)" func TestSnowballSingleton(t *testing.T) { require := require.New(t) @@ -24,8 +24,7 @@ func TestSnowballSingleton(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 5, + Beta: 2, } tree := NewTree(SnowballFactory, params, Red) @@ -65,8 +64,7 @@ func TestSnowballRecordUnsuccessfulPoll(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, } tree := NewTree(SnowballFactory, params, Red) @@ -95,8 +93,7 @@ func TestSnowballBinary(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, Red) tree.Add(Blue) @@ -138,8 +135,7 @@ func TestSnowballLastBinary(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, zero) tree.Add(one) @@ -147,8 +143,8 @@ func TestSnowballLastBinary(t *testing.T) { // Should do nothing tree.Add(one) - expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255` + expected := `SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 255) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 255` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -158,15 +154,109 @@ func TestSnowballLastBinary(t *testing.T) { require.Equal(one, tree.Preference()) require.False(tree.Finalized()) - expected = `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 255) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 255` + expected = `SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [0, 255) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 255` require.Equal(expected, tree.String()) require.True(tree.RecordPoll(oneBag)) require.Equal(one, tree.Preference()) require.True(tree.Finalized()) - expected = "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 1))) Bit = 255" + expected = "SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = [2], Finalized = true, SL(Preference = 1))) Bit = 255" + require.Equal(expected, tree.String()) +} + +func TestSnowballFirstBinary(t *testing.T) { + require := require.New(t) + + zero := ids.Empty + one := ids.ID{0x01} + + params := Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + Beta: 2, + } + tree := NewTree(SnowballFactory, params, zero) + tree.Add(one) + + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` + require.Equal(expected, tree.String()) + require.Equal(zero, tree.Preference()) + require.False(tree.Finalized()) + + oneBag := bag.Of(one) + require.True(tree.RecordPoll(oneBag)) + require.Equal(one, tree.Preference()) + require.False(tree.Finalized()) + + expected = `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` + require.Equal(expected, tree.String()) + + require.True(tree.RecordPoll(oneBag)) + require.Equal(one, tree.Preference()) + require.True(tree.Finalized()) + + expected = `SB(PreferenceStrength = 2, SF(Confidence = [2], Finalized = true)) Bits = [1, 256)` + require.Equal(expected, tree.String()) +} + +func TestSnowballAddDecidedFirstBit(t *testing.T) { + require := require.New(t) + + zero := ids.Empty + c1000 := ids.ID{0x01} + c1100 := ids.ID{0x03} + c0110 := ids.ID{0x06} + + params := Parameters{ + K: 1, + AlphaPreference: 1, + AlphaConfidence: 1, + Beta: 2, + } + tree := NewTree(SnowballFactory, params, zero) + tree.Add(c1000) + tree.Add(c1100) + + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` + require.Equal(expected, tree.String()) + require.Equal(zero, tree.Preference()) + require.False(tree.Finalized()) + + oneBag := bag.Of(c1000) + require.True(tree.RecordPoll(oneBag)) + require.Equal(c1000, tree.Preference()) + require.False(tree.Finalized()) + + expected = `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` + require.Equal(expected, tree.String()) + + threeBag := bag.Of(c1100) + require.True(tree.RecordPoll(threeBag)) + require.Equal(c1000, tree.Preference()) + require.False(tree.Finalized()) + + expected = `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` + require.Equal(expected, tree.String()) + + // Adding six should have no effect because the first bit is already decided + tree.Add(c0110) require.Equal(expected, tree.String()) } @@ -176,26 +266,21 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { zero := ids.ID{0b00000000} one := ids.ID{0b00000001} two := ids.ID{0b00000010} - four := ids.ID{0b00000100} params := Parameters{ K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, zero) - tree.Add(one) - tree.Add(four) + tree.Add(two) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -205,24 +290,33 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) } - tree.Add(two) + twoBag := bag.Of(two) + require.True(tree.RecordPoll(twoBag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` + require.Equal(expected, tree.String()) + require.Equal(zero, tree.Preference()) + require.False(tree.Finalized()) + } + + tree.Add(one) + + { + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -239,16 +333,15 @@ func TestSnowballNewUnary(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 3, + Beta: 3, } tree := NewTree(SnowballFactory, params, zero) tree.Add(one) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -258,9 +351,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -269,9 +362,9 @@ func TestSnowballNewUnary(t *testing.T) { require.True(tree.RecordPoll(oneBag)) { - expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)` + expected := `SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = [2], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 2, SF(Confidence = [2], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(one, tree.Preference()) require.False(tree.Finalized()) @@ -289,21 +382,20 @@ func TestSnowballTransitiveReset(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, zero) tree.Add(two) tree.Add(eight) { - expected := `SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -313,13 +405,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := `SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -329,13 +421,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := `SB(PreferenceStrength = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 1, SF(Confidence = [0], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -344,13 +436,13 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := `SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1) - SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3) - SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3 - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [0, 1) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [2, 3) + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 3 + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [4, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) @@ -359,7 +451,7 @@ func TestSnowballTransitiveReset(t *testing.T) { require.True(tree.RecordPoll(zeroBag)) { - expected := "SB(PreferenceStrength = 3, SF(Confidence = 2, Finalized = true)) Bits = [4, 256)" + expected := "SB(PreferenceStrength = 3, SF(Confidence = [2], Finalized = true)) Bits = [4, 256)" require.Equal(expected, tree.String()) require.Equal(zero, tree.Preference()) require.True(tree.Finalized()) @@ -373,8 +465,7 @@ func TestSnowballTrinary(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, Green) tree.Add(Red) @@ -427,8 +518,7 @@ func TestSnowballCloseTrinary(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, yellow) tree.Add(cyan) @@ -464,56 +554,6 @@ func TestSnowballCloseTrinary(t *testing.T) { require.False(tree.Finalized()) } -func TestSnowballAddRejected(t *testing.T) { - require := require.New(t) - - c0000 := ids.ID{0x00} // 0000 - c1000 := ids.ID{0x01} // 1000 - c0101 := ids.ID{0x0a} // 0101 - c0010 := ids.ID{0x04} // 0010 - - params := Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, - } - tree := NewTree(SnowballFactory, params, c0000) - tree.Add(c1000) - tree.Add(c0010) - - require.Equal(c0000, tree.Preference()) - require.False(tree.Finalized()) - - c0010Bag := bag.Of(c0010) - require.True(tree.RecordPoll(c0010Bag)) - - { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` - require.Equal(expected, tree.String()) - require.Equal(c0010, tree.Preference()) - require.False(tree.Finalized()) - } - - tree.Add(c0101) - - { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` - require.Equal(expected, tree.String()) - require.Equal(c0010, tree.Preference()) - require.False(tree.Finalized()) - } -} - func TestSnowballResetChild(t *testing.T) { require := require.New(t) @@ -525,8 +565,7 @@ func TestSnowballResetChild(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, c0000) tree.Add(c0100) @@ -539,11 +578,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -553,11 +592,11 @@ func TestSnowballResetChild(t *testing.T) { require.False(tree.RecordPoll(emptyBag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -566,11 +605,11 @@ func TestSnowballResetChild(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1 - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -588,8 +627,7 @@ func TestSnowballResetSibling(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, c0000) tree.Add(c0100) @@ -602,11 +640,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -616,11 +654,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c1000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -629,11 +667,11 @@ func TestSnowballResetSibling(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 2, SF(Confidence = 1, Finalized = true)) Bits = [2, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 2, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 2, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 2, SF(Confidence = [1], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0100, tree.Preference()) require.False(tree.Finalized()) @@ -648,8 +686,7 @@ func TestSnowball5Colors(t *testing.T) { K: 5, AlphaPreference: 5, AlphaConfidence: 5, - BetaVirtuous: 20, - BetaRogue: 30, + Beta: 20, } colors := []ids.ID{} @@ -688,8 +725,7 @@ func TestSnowballFineGrained(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, c0000) @@ -700,9 +736,9 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1100) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -711,11 +747,11 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1000) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -724,14 +760,14 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c0010) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -741,13 +777,14 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(Preference = 1, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -757,18 +794,17 @@ func TestSnowballFineGrained(t *testing.T) { require.True(tree.RecordPoll(c0010Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) } require.True(tree.RecordPoll(c0010Bag)) - { - expected := "SB(PreferenceStrength = 2, SF(Confidence = 2, Finalized = true)) Bits = [3, 256)" + expected := "SB(PreferenceStrength = 2, SF(Confidence = [2], Finalized = true)) Bits = [3, 256)" require.Equal(expected, tree.String()) require.Equal(c0010, tree.Preference()) require.True(tree.Finalized()) @@ -782,8 +818,7 @@ func TestSnowballDoubleAdd(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, } tree := NewTree(SnowballFactory, params, Red) tree.Add(Red) @@ -803,8 +838,7 @@ func TestSnowballConsistent(t *testing.T) { K: 20, AlphaPreference: 15, AlphaConfidence: 15, - BetaVirtuous: 20, - BetaRogue: 30, + Beta: 20, } seed uint64 = 0 source = prng.NewMT19937() @@ -836,8 +870,7 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, c0000) @@ -848,9 +881,9 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c1000) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -859,12 +892,12 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0010) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2) - SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 0, PreferenceStrength[1] = 0, SF(Confidence = [0], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -874,11 +907,12 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.True(tree.RecordPoll(c0000Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [1, 2) + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -887,11 +921,13 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { tree.Add(c0100) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0 - SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 0 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 1 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [2, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [1, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -901,9 +937,11 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.True(tree.RecordPoll(c0100Bag)) { - expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2 - SB(PreferenceStrength = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256) - SB(PreferenceStrength = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)` + expected := `SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 1, SF(Confidence = [1], Finalized = false, SL(Preference = 1))) Bit = 1 + SB(Preference = 0, PreferenceStrength[0] = 1, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0))) Bit = 2 + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 0, SF(Confidence = [0], Finalized = false)) Bits = [3, 256) + SB(PreferenceStrength = 1, SF(Confidence = [1], Finalized = false)) Bits = [2, 256)` require.Equal(expected, tree.String()) require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) @@ -917,8 +955,7 @@ func TestSnowballRecordPreferencePollBinary(t *testing.T) { K: 3, AlphaPreference: 2, AlphaConfidence: 3, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, Red) tree.Add(Blue) @@ -952,8 +989,7 @@ func TestSnowballRecordPreferencePollUnary(t *testing.T) { K: 3, AlphaPreference: 2, AlphaConfidence: 3, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, } tree := NewTree(SnowballFactory, params, Red) require.Equal(Red, tree.Preference()) diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 638b6d798d85..c67180f4c384 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -3,13 +3,16 @@ package snowball -import "fmt" +import ( + "fmt" + "slices" +) var _ Unary = (*unarySnowball)(nil) -func newUnarySnowball(beta int) unarySnowball { +func newUnarySnowball(alphaPreference int, terminationConditions []terminationCondition) unarySnowball { return unarySnowball{ - unarySnowflake: newUnarySnowflake(beta), + unarySnowflake: newUnarySnowflake(alphaPreference, terminationConditions), } } @@ -22,23 +25,21 @@ type unarySnowball struct { preferenceStrength int } -func (sb *unarySnowball) RecordSuccessfulPoll() { - sb.preferenceStrength++ - sb.unarySnowflake.RecordSuccessfulPoll() -} - -func (sb *unarySnowball) RecordPollPreference() { - sb.preferenceStrength++ - sb.unarySnowflake.RecordUnsuccessfulPoll() +func (sb *unarySnowball) RecordPoll(count int) { + if count >= sb.alphaPreference { + sb.preferenceStrength++ + } + sb.unarySnowflake.RecordPoll(count) } -func (sb *unarySnowball) Extend(beta int, choice int) Binary { +func (sb *unarySnowball) Extend(choice int) Binary { bs := &binarySnowball{ binarySnowflake: binarySnowflake{ - binarySlush: binarySlush{preference: choice}, - confidence: sb.confidence, - beta: beta, - finalized: sb.Finalized(), + binarySlush: binarySlush{preference: choice}, + confidence: slices.Clone(sb.confidence), + alphaPreference: sb.alphaPreference, + terminationConditions: sb.terminationConditions, + finalized: sb.Finalized(), }, preference: choice, } @@ -48,6 +49,7 @@ func (sb *unarySnowball) Extend(beta int, choice int) Binary { func (sb *unarySnowball) Clone() Unary { newSnowball := *sb + newSnowball.confidence = slices.Clone(sb.confidence) return &newSnowball } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index d94d2b61d63d..007d2ab53090 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedPreferenceStrength, expectedConfidence int, expectedFinalized bool) { +func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedPreferenceStrength int, expectedConfidence []int, expectedFinalized bool) { require := require.New(t) require.Equal(expectedPreferenceStrength, sb.preferenceStrength) @@ -20,55 +20,57 @@ func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedPreferenceS func TestUnarySnowball(t *testing.T) { require := require.New(t) + alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sb := newUnarySnowball(beta) + sb := newUnarySnowball(alphaPreference, terminationConditions) - sb.RecordSuccessfulPoll() - UnarySnowballStateTest(t, &sb, 1, 1, false) + sb.RecordPoll(alphaConfidence) + UnarySnowballStateTest(t, &sb, 1, []int{1}, false) - sb.RecordPollPreference() - UnarySnowballStateTest(t, &sb, 2, 0, false) + sb.RecordPoll(alphaPreference) + UnarySnowballStateTest(t, &sb, 2, []int{0}, false) - sb.RecordSuccessfulPoll() - UnarySnowballStateTest(t, &sb, 3, 1, false) + sb.RecordPoll(alphaConfidence) + UnarySnowballStateTest(t, &sb, 3, []int{1}, false) sb.RecordUnsuccessfulPoll() - UnarySnowballStateTest(t, &sb, 3, 0, false) + UnarySnowballStateTest(t, &sb, 3, []int{0}, false) - sb.RecordSuccessfulPoll() - UnarySnowballStateTest(t, &sb, 4, 1, false) + sb.RecordPoll(alphaConfidence) + UnarySnowballStateTest(t, &sb, 4, []int{1}, false) sbCloneIntf := sb.Clone() require.IsType(&unarySnowball{}, sbCloneIntf) sbClone := sbCloneIntf.(*unarySnowball) - UnarySnowballStateTest(t, sbClone, 4, 1, false) + UnarySnowballStateTest(t, sbClone, 4, []int{1}, false) - binarySnowball := sbClone.Extend(beta, 0) + binarySnowball := sbClone.Extend(0) - expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))" + expected := "SB(Preference = 0, PreferenceStrength[0] = 4, PreferenceStrength[1] = 0, SF(Confidence = [1], Finalized = false, SL(Preference = 0)))" require.Equal(expected, binarySnowball.String()) binarySnowball.RecordUnsuccessfulPoll() for i := 0; i < 5; i++ { require.Zero(binarySnowball.Preference()) require.False(binarySnowball.Finalized()) - binarySnowball.RecordSuccessfulPoll(1) + binarySnowball.RecordPoll(alphaConfidence, 1) binarySnowball.RecordUnsuccessfulPoll() } require.Equal(1, binarySnowball.Preference()) require.False(binarySnowball.Finalized()) - binarySnowball.RecordSuccessfulPoll(1) + binarySnowball.RecordPoll(alphaConfidence, 1) require.Equal(1, binarySnowball.Preference()) require.False(binarySnowball.Finalized()) - binarySnowball.RecordSuccessfulPoll(1) + binarySnowball.RecordPoll(alphaConfidence, 1) require.Equal(1, binarySnowball.Preference()) require.True(binarySnowball.Finalized()) - expected = "SB(PreferenceStrength = 4, SF(Confidence = 1, Finalized = false))" + expected = "SB(PreferenceStrength = 4, SF(Confidence = [1], Finalized = false))" require.Equal(expected, sb.String()) } diff --git a/snow/consensus/snowball/unary_snowflake.go b/snow/consensus/snowball/unary_snowflake.go index f9c9b6241449..a49152966d9a 100644 --- a/snow/consensus/snowball/unary_snowflake.go +++ b/snow/consensus/snowball/unary_snowflake.go @@ -3,67 +3,92 @@ package snowball -import "fmt" +import ( + "fmt" + "slices" +) var _ Unary = (*unarySnowflake)(nil) -func newUnarySnowflake(beta int) unarySnowflake { +func newUnarySnowflake(alphaPreference int, terminationConditions []terminationCondition) unarySnowflake { return unarySnowflake{ - beta: beta, + alphaPreference: alphaPreference, + terminationConditions: terminationConditions, + confidence: make([]int, len(terminationConditions)), } } // unarySnowflake is the implementation of a unary snowflake instance +// Invariant: +// len(terminationConditions) == len(confidence) +// terminationConditions[i].alphaConfidence < terminationConditions[i+1].alphaConfidence +// terminationConditions[i].beta <= terminationConditions[i+1].beta +// confidence[i] >= confidence[i+1] (except after finalizing due to early termination) type unarySnowflake struct { - // beta is the number of consecutive successful queries required for - // finalization. - beta int + // alphaPreference is the threshold required to update the preference + alphaPreference int - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int + // terminationConditions gives the ascending ordered list of alphaConfidence values + // required to increment the corresponding confidence counter. + // The corresponding beta values give the threshold required to finalize this instance. + terminationConditions []terminationCondition + + // confidence is the number of consecutive succcessful polls for a given + // alphaConfidence threshold. + // This instance finalizes when confidence[i] >= terminationConditions[i].beta for any i + confidence []int // finalized prevents the state from changing after the required number of // consecutive polls has been reached finalized bool } -func (sf *unarySnowflake) RecordSuccessfulPoll() { - sf.confidence++ - sf.finalized = sf.finalized || sf.confidence >= sf.beta -} +func (sf *unarySnowflake) RecordPoll(count int) { + for i, terminationCondition := range sf.terminationConditions { + // If I did not reach this alpha threshold, I did not + // reach any more alpha thresholds. + // Clear the remaining confidence counters. + if count < terminationCondition.alphaConfidence { + clear(sf.confidence[i:]) + return + } -// RecordPollPreference fails to reach an alpha threshold to increase our -// confidence, so this calls RecordUnsuccessfulPoll to reset the confidence -// counter. -func (sf *unarySnowflake) RecordPollPreference() { - sf.RecordUnsuccessfulPoll() + // I reached this alpha threshold, increment the confidence counter + // and check if I can finalize. + sf.confidence[i]++ + if sf.confidence[i] >= terminationCondition.beta { + sf.finalized = true + return + } + } } func (sf *unarySnowflake) RecordUnsuccessfulPoll() { - sf.confidence = 0 + clear(sf.confidence) } func (sf *unarySnowflake) Finalized() bool { return sf.finalized } -func (sf *unarySnowflake) Extend(beta int, choice int) Binary { +func (sf *unarySnowflake) Extend(choice int) Binary { return &binarySnowflake{ - binarySlush: binarySlush{preference: choice}, - confidence: sf.confidence, - beta: beta, - finalized: sf.finalized, + binarySlush: binarySlush{preference: choice}, + confidence: slices.Clone(sf.confidence), + alphaPreference: sf.alphaPreference, + terminationConditions: sf.terminationConditions, + finalized: sf.finalized, } } func (sf *unarySnowflake) Clone() Unary { newSnowflake := *sf + newSnowflake.confidence = slices.Clone(sf.confidence) return &newSnowflake } func (sf *unarySnowflake) String() string { - return fmt.Sprintf("SF(Confidence = %d, Finalized = %v)", + return fmt.Sprintf("SF(Confidence = %v, Finalized = %v)", sf.confidence, sf.finalized) } diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go index 0791b688065e..ee099460e52b 100644 --- a/snow/consensus/snowball/unary_snowflake_test.go +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -9,54 +9,88 @@ import ( "github.com/stretchr/testify/require" ) -func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence int, expectedFinalized bool) { +func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidences []int, expectedFinalized bool) { require := require.New(t) - require.Equal(expectedConfidence, sf.confidence) + require.Equal(expectedConfidences, sf.confidence) require.Equal(expectedFinalized, sf.Finalized()) } func TestUnarySnowflake(t *testing.T) { require := require.New(t) + alphaPreference, alphaConfidence := 1, 2 beta := 2 + terminationConditions := newSingleTerminationCondition(alphaConfidence, beta) - sf := newUnarySnowflake(beta) + sf := newUnarySnowflake(alphaPreference, terminationConditions) - sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 1, false) + sf.RecordPoll(alphaConfidence) + UnarySnowflakeStateTest(t, &sf, []int{1}, false) sf.RecordUnsuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 0, false) + UnarySnowflakeStateTest(t, &sf, []int{0}, false) - sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 1, false) + sf.RecordPoll(alphaConfidence) + UnarySnowflakeStateTest(t, &sf, []int{1}, false) sfCloneIntf := sf.Clone() require.IsType(&unarySnowflake{}, sfCloneIntf) sfClone := sfCloneIntf.(*unarySnowflake) - UnarySnowflakeStateTest(t, sfClone, 1, false) + UnarySnowflakeStateTest(t, sfClone, []int{1}, false) - binarySnowflake := sfClone.Extend(beta, 0) + binarySnowflake := sfClone.Extend(0) binarySnowflake.RecordUnsuccessfulPoll() - binarySnowflake.RecordSuccessfulPoll(1) + binarySnowflake.RecordPoll(alphaConfidence, 1) require.False(binarySnowflake.Finalized()) - binarySnowflake.RecordSuccessfulPoll(1) + binarySnowflake.RecordPoll(alphaConfidence, 1) require.Equal(1, binarySnowflake.Preference()) require.True(binarySnowflake.Finalized()) - sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 2, true) + sf.RecordPoll(alphaConfidence) + UnarySnowflakeStateTest(t, &sf, []int{2}, true) sf.RecordUnsuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 0, true) + UnarySnowflakeStateTest(t, &sf, []int{0}, true) - sf.RecordSuccessfulPoll() - UnarySnowflakeStateTest(t, &sf, 1, true) + sf.RecordPoll(alphaConfidence) + UnarySnowflakeStateTest(t, &sf, []int{1}, true) +} + +type unarySnowflakeTest struct { + require *require.Assertions + + unarySnowflake +} + +func newUnarySnowflakeTest(t *testing.T, alphaPreference int, terminationConditions []terminationCondition) snowflakeTest[struct{}] { + require := require.New(t) + + return &unarySnowflakeTest{ + require: require, + unarySnowflake: newUnarySnowflake(alphaPreference, terminationConditions), + } +} + +func (sf *unarySnowflakeTest) RecordPoll(count int, _ struct{}) { + sf.unarySnowflake.RecordPoll(count) +} + +func (sf *unarySnowflakeTest) AssertEqual(expectedConfidences []int, expectedFinalized bool, _ struct{}) { + sf.require.Equal(expectedConfidences, sf.unarySnowflake.confidence) + sf.require.Equal(expectedFinalized, sf.Finalized()) +} + +func TestUnarySnowflakeErrorDriven(t *testing.T) { + for _, test := range getErrorDrivenSnowflakeSingleChoiceSuite[struct{}]() { + t.Run(test.name, func(t *testing.T) { + test.f(t, newUnarySnowflakeTest, struct{}{}) + }) + } } diff --git a/snow/consensus/snowman/bootstrapper/sampler.go b/snow/consensus/snowman/bootstrapper/sampler.go index b43f6d915745..56b27d3076ff 100644 --- a/snow/consensus/snowman/bootstrapper/sampler.go +++ b/snow/consensus/snowman/bootstrapper/sampler.go @@ -4,11 +4,15 @@ package bootstrapper import ( + "errors" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" ) +var errUnexpectedSamplerFailure = errors.New("unexpected sampler failure") + // Sample keys from [elements] uniformly by weight without replacement. The // returned set will have size less than or equal to [maxSize]. This function // will error if the sum of all weights overflows. @@ -36,9 +40,9 @@ func Sample[T comparable](elements map[T]uint64, maxSize int) (set.Set[T], error } maxSize = int(min(uint64(maxSize), totalWeight)) - indices, err := sampler.Sample(maxSize) - if err != nil { - return nil, err + indices, ok := sampler.Sample(maxSize) + if !ok { + return nil, errUnexpectedSamplerFailure } sampledElements := set.NewSet[T](maxSize) diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go index 3f1006416366..19fc1600e4f0 100644 --- a/snow/consensus/snowman/consensus.go +++ b/snow/consensus/snowman/consensus.go @@ -31,19 +31,20 @@ type Consensus interface { // Returns the number of blocks processing NumProcessing() int - // Adds a new decision. Assumes the dependency has already been added. + // Add a new block. + // + // Add should not be called multiple times with the same block. + // The parent block should either be the last accepted block or processing. + // // Returns if a critical error has occurred. - Add(context.Context, Block) error - - // Decided returns true if the block has been decided. - Decided(Block) bool + Add(Block) error // Processing returns true if the block ID is currently processing. Processing(ids.ID) bool - // IsPreferred returns true if the block is currently on the preferred - // chain. - IsPreferred(Block) bool + // IsPreferred returns true if the block ID is preferred. Only the last + // accepted block and processing blocks are considered preferred. + IsPreferred(ids.ID) bool // Returns the ID and height of the last accepted decision. LastAccepted() (ids.ID, uint64) diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 1069ae3583f6..1de19a0b2df4 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -11,7 +11,6 @@ import ( "runtime" "strings" "testing" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -20,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/bag" ) @@ -27,20 +27,12 @@ import ( type testFunc func(*testing.T, Factory) var ( - GenesisID = ids.Empty.Prefix(0) - GenesisHeight = uint64(0) - GenesisTimestamp = time.Unix(1, 0) - Genesis = &TestBlock{TestDecidable: choices.TestDecidable{ - IDV: GenesisID, - StatusV: choices.Accepted, - }} - testFuncs = []testFunc{ InitializeTest, NumProcessingTest, AddToTailTest, AddToNonTailTest, - AddToUnknownTest, + AddOnUnknownParentTest, StatusOrProcessingPreviouslyAcceptedTest, StatusOrProcessingPreviouslyRejectedTest, StatusOrProcessingUnissuedTest, @@ -53,21 +45,19 @@ var ( RecordPollTransitivelyResetConfidenceTest, RecordPollInvalidVoteTest, RecordPollTransitiveVotingTest, - RecordPollDivergedVotingTest, RecordPollDivergedVotingWithNoConflictingBitTest, RecordPollChangePreferredChainTest, LastAcceptedTest, MetricsProcessingErrorTest, MetricsAcceptedErrorTest, MetricsRejectedErrorTest, - ErrorOnInitialRejectionTest, ErrorOnAcceptTest, ErrorOnRejectSiblingTest, ErrorOnTransitiveRejectionTest, RandomizedConsistencyTest, ErrorOnAddDecidedBlockTest, - ErrorOnAddDuplicateBlockIDTest, RecordPollWithDefaultParameters, + RecordPollRegressionCalculateInDegreeIndegreeCalculation, } errTest = errors.New("non-nil error") @@ -98,17 +88,22 @@ func InitializeTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - require.Equal(GenesisID, sm.Preference()) + require.Equal(snowmantest.GenesisID, sm.Preference()) require.Zero(sm.NumProcessing()) } @@ -124,34 +119,30 @@ func NumProcessingTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) require.Zero(sm.NumProcessing()) // Adding to the previous preference will update the preference - require.NoError(sm.Add(context.Background(), block)) - + require.NoError(sm.Add(block)) require.Equal(1, sm.NumProcessing()) votes := bag.Of(block.ID()) require.NoError(sm.RecordPoll(context.Background(), votes)) - require.Zero(sm.NumProcessing()) } @@ -167,28 +158,26 @@ func AddToTailTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) // Adding to the previous preference will update the preference - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) require.Equal(block.ID(), sm.Preference()) - require.True(sm.IsPreferred(block)) + require.True(sm.IsPreferred(block.ID())) pref, ok := sm.PreferenceAtHeight(block.Height()) require.True(ok) @@ -207,45 +196,36 @@ func AddToNonTailTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - firstBlock := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - secondBlock := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + firstBlock := snowmantest.BuildChild(snowmantest.Genesis) + secondBlock := snowmantest.BuildChild(snowmantest.Genesis) // Adding to the previous preference will update the preference - require.NoError(sm.Add(context.Background(), firstBlock)) + require.NoError(sm.Add(firstBlock)) require.Equal(firstBlock.IDV, sm.Preference()) // Adding to something other than the previous preference won't update the // preference - require.NoError(sm.Add(context.Background(), secondBlock)) + require.NoError(sm.Add(secondBlock)) require.Equal(firstBlock.IDV, sm.Preference()) } // Make sure that adding a block that is detached from the rest of the tree -// rejects the block -func AddToUnknownTest(t *testing.T, factory Factory) { +// returns an error +func AddOnUnknownParentTest(t *testing.T, factory Factory) { require := require.New(t) sm := factory.New() @@ -256,34 +236,32 @@ func AddToUnknownTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - - parent := &TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Unknown, - }} + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ + block := &snowmantest.Block{ TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), + IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - ParentV: parent.IDV, - HeightV: parent.HeightV + 1, + ParentV: ids.GenerateTestID(), + HeightV: snowmantest.GenesisHeight + 2, } - // Adding a block with an unknown parent means the parent must have already - // been rejected. Therefore the block should be immediately rejected - require.NoError(sm.Add(context.Background(), block)) - require.Equal(GenesisID, sm.Preference()) - require.Equal(choices.Rejected, block.Status()) + // Adding a block with an unknown parent should error. + err := sm.Add(block) + require.ErrorIs(err, errUnknownParentBlock) } func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { @@ -297,23 +275,27 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - require.Equal(choices.Accepted, Genesis.Status()) - require.False(sm.Processing(Genesis.ID())) - require.True(sm.Decided(Genesis)) - require.True(sm.IsPreferred(Genesis)) + require.Equal(choices.Accepted, snowmantest.Genesis.Status()) + require.False(sm.Processing(snowmantest.Genesis.ID())) + require.True(sm.IsPreferred(snowmantest.Genesis.ID())) - pref, ok := sm.PreferenceAtHeight(Genesis.Height()) + pref, ok := sm.PreferenceAtHeight(snowmantest.Genesis.Height()) require.True(ok) - require.Equal(Genesis.ID(), pref) + require.Equal(snowmantest.Genesis.ID(), pref) } func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { @@ -327,28 +309,26 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Rejected, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) + require.NoError(block.Reject(context.Background())) require.Equal(choices.Rejected, block.Status()) require.False(sm.Processing(block.ID())) - require.True(sm.Decided(block)) - require.False(sm.IsPreferred(block)) + require.False(sm.IsPreferred(block.ID())) _, ok := sm.PreferenceAtHeight(block.Height()) require.False(ok) @@ -365,28 +345,25 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) require.Equal(choices.Processing, block.Status()) require.False(sm.Processing(block.ID())) - require.False(sm.Decided(block)) - require.False(sm.IsPreferred(block)) + require.False(sm.IsPreferred(block.ID())) _, ok := sm.PreferenceAtHeight(block.Height()) require.False(ok) @@ -403,29 +380,26 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 3, - BetaRogue: 5, + Beta: 3, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) require.Equal(choices.Processing, block.Status()) require.True(sm.Processing(block.ID())) - require.False(sm.Decided(block)) - require.True(sm.IsPreferred(block)) + require.True(sm.IsPreferred(block.ID())) pref, ok := sm.PreferenceAtHeight(block.Height()) require.True(ok) @@ -443,25 +417,23 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 3, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) votes := bag.Of(block.ID()) require.NoError(sm.RecordPoll(context.Background(), votes)) @@ -486,34 +458,25 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - firstBlock := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - secondBlock := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + firstBlock := snowmantest.BuildChild(snowmantest.Genesis) + secondBlock := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), firstBlock)) - require.NoError(sm.Add(context.Background(), secondBlock)) + require.NoError(sm.Add(firstBlock)) + require.NoError(sm.Add(secondBlock)) votes := bag.Of(firstBlock.ID()) @@ -543,34 +506,25 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { K: 2, AlphaPreference: 2, AlphaConfidence: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - firstBlock := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - secondBlock := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + firstBlock := snowmantest.BuildChild(snowmantest.Genesis) + secondBlock := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(sm.Add(context.Background(), firstBlock)) - require.NoError(sm.Add(context.Background(), secondBlock)) + require.NoError(sm.Add(firstBlock)) + require.NoError(sm.Add(secondBlock)) votes := bag.Of(firstBlock.ID(), secondBlock.ID()) @@ -604,19 +558,24 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - votes := bag.Of(GenesisID) + votes := bag.Of(snowmantest.GenesisID) require.NoError(sm.RecordPoll(context.Background(), votes)) require.Zero(sm.NumProcessing()) - require.Equal(GenesisID, sm.Preference()) + require.Equal(snowmantest.GenesisID, sm.Preference()) } func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { @@ -630,43 +589,27 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3), - StatusV: choices.Processing, - }, - ParentV: block1.IDV, - HeightV: block1.HeightV + 1, - } + block0 := snowmantest.BuildChild(snowmantest.Genesis) + block1 := snowmantest.BuildChild(snowmantest.Genesis) + block2 := snowmantest.BuildChild(block1) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) // Current graph structure: // G @@ -701,52 +644,29 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3), - StatusV: choices.Processing, - }, - ParentV: block1.IDV, - HeightV: block1.HeightV + 1, - } - block3 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(4), - StatusV: choices.Processing, - }, - ParentV: block1.IDV, - HeightV: block1.HeightV + 1, - } + block0 := snowmantest.BuildChild(snowmantest.Genesis) + block1 := snowmantest.BuildChild(snowmantest.Genesis) + block2 := snowmantest.BuildChild(block1) + block3 := snowmantest.BuildChild(block1) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) - require.NoError(sm.Add(context.Background(), block3)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) + require.NoError(sm.Add(block3)) // Current graph structure: // G @@ -794,26 +714,24 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - unknownBlockID := ids.Empty.Prefix(2) + block := snowmantest.BuildChild(snowmantest.Genesis) + unknownBlockID := ids.GenerateTestID() - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) validVotes := bag.Of(block.ID()) require.NoError(sm.RecordPoll(context.Background(), validVotes)) @@ -836,61 +754,31 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { K: 3, AlphaPreference: 3, AlphaConfidence: 3, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: block0.IDV, - HeightV: block0.HeightV + 1, - } - block2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3), - StatusV: choices.Processing, - }, - ParentV: block1.IDV, - HeightV: block1.HeightV + 1, - } - block3 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(4), - StatusV: choices.Processing, - }, - ParentV: block0.IDV, - HeightV: block0.HeightV + 1, - } - block4 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(5), - StatusV: choices.Processing, - }, - ParentV: block3.IDV, - HeightV: block3.HeightV + 1, - } - - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) - require.NoError(sm.Add(context.Background(), block3)) - require.NoError(sm.Add(context.Background(), block4)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) + + block0 := snowmantest.BuildChild(snowmantest.Genesis) + block1 := snowmantest.BuildChild(block0) + block2 := snowmantest.BuildChild(block1) + block3 := snowmantest.BuildChild(block0) + block4 := snowmantest.BuildChild(block3) + + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) + require.NoError(sm.Add(block3)) + require.NoError(sm.Add(block4)) // Current graph structure: // G @@ -937,110 +825,6 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { require.Equal(choices.Rejected, block4.Status()) } -func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { - sm := factory.New() - require := require.New(t) - - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) - params := snowball.Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{0x0f}, // 1111 - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{0x08}, // 0001 - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{0x01}, // 1000 - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block3 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: block2.IDV, - HeightV: block2.HeightV + 1, - } - - require.NoError(sm.Add(context.Background(), block0)) - - require.NoError(sm.Add(context.Background(), block1)) - - // The first bit is contested as either 0 or 1. When voting for [block0] and - // when the first bit is 1, the following bits have been decided to follow - // the 255 remaining bits of [block0]. - votes0 := bag.Of(block0.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes0)) - - // Although we are adding in [block2] here - the underlying snowball - // instance has already decided it is rejected. Snowman doesn't actually - // know that though, because that is an implementation detail of the - // Snowball trie that is used. - require.NoError(sm.Add(context.Background(), block2)) - - // Because [block2] is effectively rejected, [block3] is also effectively - // rejected. - require.NoError(sm.Add(context.Background(), block3)) - - require.Equal(block0.ID(), sm.Preference()) - require.Equal(choices.Processing, block0.Status(), "should not be accepted yet") - require.Equal(choices.Processing, block1.Status(), "should not be rejected yet") - require.Equal(choices.Processing, block2.Status(), "should not be rejected yet") - require.Equal(choices.Processing, block3.Status(), "should not be rejected yet") - - // Current graph structure: - // G - // / \ - // * | - // / \ | - // 0 2 1 - // | - // 3 - // Tail = 0 - - // Transitively votes for [block2] by voting for its child [block3]. - // Because [block2] shares the first bit with [block0] and the following - // bits have been finalized for [block0], the voting results in accepting - // [block0]. When [block0] is accepted, [block1] and [block2] are rejected - // as conflicting. [block2]'s child, [block3], is then rejected - // transitively. - votes3 := bag.Of(block3.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes3)) - - require.Zero(sm.NumProcessing()) - require.Equal(choices.Accepted, block0.Status()) - require.Equal(choices.Rejected, block1.Status()) - require.Equal(choices.Rejected, block2.Status()) - require.Equal(choices.Rejected, block3.Status()) -} - func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Factory) { sm := factory.New() require := require.New(t) @@ -1051,50 +835,48 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ + block0 := &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: ids.ID{0x06}, // 0110 StatusV: choices.Processing, }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, + ParentV: snowmantest.GenesisID, + HeightV: snowmantest.GenesisHeight + 1, } - block1 := &TestBlock{ + block1 := &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: ids.ID{0x08}, // 0001 StatusV: choices.Processing, }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, + ParentV: snowmantest.GenesisID, + HeightV: snowmantest.GenesisHeight + 1, } - block2 := &TestBlock{ + block2 := &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: ids.ID{0x01}, // 1000 StatusV: choices.Processing, }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block3 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: block2.IDV, - HeightV: block2.HeightV + 1, + ParentV: snowmantest.GenesisID, + HeightV: snowmantest.GenesisHeight + 1, } + block3 := snowmantest.BuildChild(block2) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) // When voting for [block0], we end up finalizing the first bit as 0. The // second bit is contested as either 0 or 1. For when the second bit is 1, @@ -1107,11 +889,11 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // instance has already decided it is rejected. Snowman doesn't actually // know that though, because that is an implementation detail of the // Snowball trie that is used. - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block2)) // Because [block2] is effectively rejected, [block3] is also effectively // rejected. - require.NoError(sm.Add(context.Background(), block3)) + require.NoError(sm.Add(block3)) require.Equal(block0.ID(), sm.Preference()) require.Equal(choices.Processing, block0.Status(), "should not be decided yet") @@ -1156,59 +938,36 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 10, - BetaRogue: 10, + Beta: 10, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - a1Block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - b1Block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - a2Block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: a1Block.IDV, - HeightV: a1Block.HeightV + 1, - } - b2Block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: b1Block.IDV, - HeightV: b1Block.HeightV + 1, - } + a1Block := snowmantest.BuildChild(snowmantest.Genesis) + b1Block := snowmantest.BuildChild(snowmantest.Genesis) + a2Block := snowmantest.BuildChild(a1Block) + b2Block := snowmantest.BuildChild(b1Block) - require.NoError(sm.Add(context.Background(), a1Block)) - require.NoError(sm.Add(context.Background(), a2Block)) - require.NoError(sm.Add(context.Background(), b1Block)) - require.NoError(sm.Add(context.Background(), b2Block)) + require.NoError(sm.Add(a1Block)) + require.NoError(sm.Add(a2Block)) + require.NoError(sm.Add(b1Block)) + require.NoError(sm.Add(b2Block)) require.Equal(a2Block.ID(), sm.Preference()) - require.True(sm.IsPreferred(a1Block)) - require.True(sm.IsPreferred(a2Block)) - require.False(sm.IsPreferred(b1Block)) - require.False(sm.IsPreferred(b2Block)) + require.True(sm.IsPreferred(a1Block.ID())) + require.True(sm.IsPreferred(a2Block.ID())) + require.False(sm.IsPreferred(b1Block.ID())) + require.False(sm.IsPreferred(b2Block.ID())) pref, ok := sm.PreferenceAtHeight(a1Block.Height()) require.True(ok) @@ -1222,10 +981,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.NoError(sm.RecordPoll(context.Background(), b2Votes)) require.Equal(b2Block.ID(), sm.Preference()) - require.False(sm.IsPreferred(a1Block)) - require.False(sm.IsPreferred(a2Block)) - require.True(sm.IsPreferred(b1Block)) - require.True(sm.IsPreferred(b2Block)) + require.False(sm.IsPreferred(a1Block.ID())) + require.False(sm.IsPreferred(a2Block.ID())) + require.True(sm.IsPreferred(b1Block.ID())) + require.True(sm.IsPreferred(b2Block.ID())) pref, ok = sm.PreferenceAtHeight(b1Block.Height()) require.True(ok) @@ -1240,10 +999,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.NoError(sm.RecordPoll(context.Background(), a1Votes)) require.Equal(a2Block.ID(), sm.Preference()) - require.True(sm.IsPreferred(a1Block)) - require.True(sm.IsPreferred(a2Block)) - require.False(sm.IsPreferred(b1Block)) - require.False(sm.IsPreferred(b2Block)) + require.True(sm.IsPreferred(a1Block.ID())) + require.True(sm.IsPreferred(a2Block.ID())) + require.False(sm.IsPreferred(b1Block.ID())) + require.False(sm.IsPreferred(b2Block.ID())) pref, ok = sm.PreferenceAtHeight(a1Block.Height()) require.True(ok) @@ -1264,60 +1023,43 @@ func LastAcceptedTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: block0.IDV, - HeightV: block0.HeightV + 1, - } - block2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: block1.IDV, - HeightV: block1.HeightV + 1, - } - block1Conflict := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: block0.IDV, - HeightV: block0.HeightV + 1, - } + block0 := snowmantest.BuildChild(snowmantest.Genesis) + block1 := snowmantest.BuildChild(block0) + block2 := snowmantest.BuildChild(block1) + block1Conflict := snowmantest.BuildChild(block0) lastAcceptedID, lastAcceptedHeight := sm.LastAccepted() - require.Equal(GenesisID, lastAcceptedID) - require.Equal(GenesisHeight, lastAcceptedHeight) + require.Equal(snowmantest.GenesisID, lastAcceptedID) + require.Equal(snowmantest.GenesisHeight, lastAcceptedHeight) + + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block1Conflict)) + require.NoError(sm.Add(block2)) + + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() + require.Equal(snowmantest.GenesisID, lastAcceptedID) + require.Equal(snowmantest.GenesisHeight, lastAcceptedHeight) - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block1Conflict)) - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.RecordPoll(context.Background(), bag.Of(block0.IDV))) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() - require.Equal(GenesisID, lastAcceptedID) - require.Equal(GenesisHeight, lastAcceptedHeight) + require.Equal(snowmantest.GenesisID, lastAcceptedID) + require.Equal(snowmantest.GenesisHeight, lastAcceptedHeight) require.NoError(sm.RecordPoll(context.Background(), bag.Of(block1.IDV))) @@ -1333,6 +1075,12 @@ func LastAcceptedTest(t *testing.T, factory Factory) { require.NoError(sm.RecordPoll(context.Background(), bag.Of(block2.IDV))) + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() + require.Equal(block1.IDV, lastAcceptedID) + require.Equal(block1.HeightV, lastAcceptedHeight) + + require.NoError(sm.RecordPoll(context.Background(), bag.Of(block2.IDV))) + lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(block2.IDV, lastAcceptedID) require.Equal(block2.HeightV, lastAcceptedHeight) @@ -1349,22 +1097,26 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - numProcessing := prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "blks_processing", - }) + numProcessing := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "blks_processing", + }) require.NoError(ctx.Registerer.Register(numProcessing)) - err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + err := sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + ) require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } @@ -1379,22 +1131,26 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - numAccepted := prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "blks_accepted_count", - }) + numAccepted := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "blks_accepted_count", + }) require.NoError(ctx.Registerer.Register(numAccepted)) - err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + err := sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + ) require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } @@ -1409,65 +1165,29 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - numRejected := prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "blks_rejected_count", - }) + numRejected := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "blks_rejected_count", + }) require.NoError(ctx.Registerer.Register(numRejected)) - err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + err := sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + ) require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } -func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { - require := require.New(t) - - sm := factory.New() - - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) - params := snowball.Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - - rejectedBlock := &TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Rejected, - }} - - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentV: rejectedBlock.IDV, - HeightV: rejectedBlock.HeightV + 1, - } - - err := sm.Add(context.Background(), block) - require.ErrorIs(err, errTest) -} - func ErrorOnAcceptTest(t *testing.T, factory Factory) { require := require.New(t) @@ -1479,27 +1199,25 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - AcceptV: errTest, - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block := snowmantest.BuildChild(snowmantest.Genesis) + block.AcceptV = errTest - require.NoError(sm.Add(context.Background(), block)) + require.NoError(sm.Add(block)) votes := bag.Of(block.ID()) err := sm.RecordPoll(context.Background(), votes) @@ -1517,36 +1235,27 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } + block0 := snowmantest.BuildChild(snowmantest.Genesis) + block1 := snowmantest.BuildChild(snowmantest.Genesis) + block1.RejectV = errTest - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) votes := bag.Of(block0.ID()) err := sm.RecordPoll(context.Background(), votes) @@ -1564,45 +1273,29 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2), - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3), - RejectV: errTest, - StatusV: choices.Processing, - }, - ParentV: block1.IDV, - HeightV: block1.HeightV + 1, - } + block0 := snowmantest.BuildChild(snowmantest.Genesis) + block1 := snowmantest.BuildChild(snowmantest.Genesis) + block2 := snowmantest.BuildChild(block1) + block2.RejectV = errTest - require.NoError(sm.Add(context.Background(), block0)) - require.NoError(sm.Add(context.Background(), block1)) - require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(block0)) + require.NoError(sm.Add(block1)) + require.NoError(sm.Add(block2)) votes := bag.Of(block0.ID()) err := sm.RecordPoll(context.Background(), votes) @@ -1619,8 +1312,7 @@ func RandomizedConsistencyTest(t *testing.T, factory Factory) { K: 20, AlphaPreference: 15, AlphaConfidence: 15, - BetaVirtuous: 20, - BetaRogue: 30, + Beta: 20, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -1655,69 +1347,25 @@ func ErrorOnAddDecidedBlockTest(t *testing.T, factory Factory) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{0x03}, // 0b0011 - StatusV: choices.Accepted, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - err := sm.Add(context.Background(), block0) - require.ErrorIs(err, errDuplicateAdd) + err := sm.Add(snowmantest.Genesis) + require.ErrorIs(err, errUnknownParentBlock) } -func ErrorOnAddDuplicateBlockIDTest(t *testing.T, factory Factory) { - sm := factory.New() - require := require.New(t) - - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) - params := snowball.Parameters{ - K: 1, - AlphaPreference: 1, - AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - - block0 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{0x03}, // 0b0011 - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - block1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{0x03}, // 0b0011, same as block0 - StatusV: choices.Processing, - }, - ParentV: block0.IDV, - HeightV: block0.HeightV + 1, - } - - require.NoError(sm.Add(context.Background(), block0)) - err := sm.Add(context.Background(), block1) - require.ErrorIs(err, errDuplicateAdd) -} - -func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { +func gatherCounterGauge(t *testing.T, reg prometheus.Gatherer) map[string]float64 { ms, err := reg.Gather() require.NoError(t, err) mss := make(map[string]float64) @@ -1748,35 +1396,73 @@ func RecordPollWithDefaultParameters(t *testing.T, factory Factory) { snowCtx := snowtest.Context(t, snowtest.CChainID) ctx := snowtest.ConsensusContext(snowCtx) params := snowball.DefaultParameters - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) // "blk1" and "blk2" are in conflict - blk1 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{1}, - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - blk2 := &TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{2}, - StatusV: choices.Processing, - }, - ParentV: Genesis.IDV, - HeightV: Genesis.HeightV + 1, - } - require.NoError(sm.Add(context.Background(), blk1)) - require.NoError(sm.Add(context.Background(), blk2)) + blk1 := snowmantest.BuildChild(snowmantest.Genesis) + blk2 := snowmantest.BuildChild(snowmantest.Genesis) + + require.NoError(sm.Add(blk1)) + require.NoError(sm.Add(blk2)) votes := bag.Bag[ids.ID]{} votes.AddCount(blk1.ID(), params.AlphaConfidence) - // as "blk1" and "blk2" are in conflict, we need beta rogue rounds to finalize - for i := 0; i < params.BetaRogue; i++ { - // should not finalize with less than beta rogue rounds + // Require beta rounds to finalize + for i := 0; i < params.Beta; i++ { + // should not finalize with less than beta rounds require.Equal(2, sm.NumProcessing()) require.NoError(sm.RecordPoll(context.Background(), votes)) } require.Zero(sm.NumProcessing()) } + +// If a block that was voted for received additional votes from another block, +// the indegree of the topological sort should not traverse into the parent +// node. +func RecordPollRegressionCalculateInDegreeIndegreeCalculation(t *testing.T, factory Factory) { + require := require.New(t) + + sm := factory.New() + + snowCtx := snowtest.Context(t, snowtest.CChainID) + ctx := snowtest.ConsensusContext(snowCtx) + params := snowball.Parameters{ + K: 3, + AlphaPreference: 2, + AlphaConfidence: 2, + Beta: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + require.NoError(sm.Initialize( + ctx, + params, + snowmantest.GenesisID, + snowmantest.GenesisHeight, + snowmantest.GenesisTimestamp, + )) + + blk1 := snowmantest.BuildChild(snowmantest.Genesis) + blk2 := snowmantest.BuildChild(blk1) + blk3 := snowmantest.BuildChild(blk2) + + require.NoError(sm.Add(blk1)) + require.NoError(sm.Add(blk2)) + require.NoError(sm.Add(blk3)) + + votes := bag.Bag[ids.ID]{} + votes.AddCount(blk2.ID(), 1) + votes.AddCount(blk3.ID(), 2) + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + require.Equal(choices.Accepted, blk3.Status()) +} diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go index 43e5d7d91029..bab57e5c371d 100644 --- a/snow/consensus/snowman/metrics.go +++ b/snow/consensus/snowman/metrics.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -34,7 +34,7 @@ type metrics struct { // processingBlocks keeps track of the [processingStart] that each block was // issued into the consensus instance. This is used to calculate the amount // of time to accept or reject the block. - processingBlocks linkedhashmap.LinkedHashmap[ids.ID, processingStart] + processingBlocks *linked.Hashmap[ids.ID, processingStart] // numProcessing keeps track of the number of processing blocks numProcessing prometheus.Gauge @@ -65,7 +65,6 @@ type metrics struct { func newMetrics( log logging.Logger, - namespace string, reg prometheus.Registerer, lastAcceptedHeight uint64, lastAcceptedTime time.Time, @@ -75,82 +74,57 @@ func newMetrics( log: log, currentMaxVerifiedHeight: lastAcceptedHeight, maxVerifiedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "max_verified_height", - Help: "highest verified height", + Name: "max_verified_height", + Help: "highest verified height", }), lastAcceptedHeight: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_height", - Help: "last height accepted", + Name: "last_accepted_height", + Help: "last height accepted", }), lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "last_accepted_timestamp", - Help: "timestamp of the last accepted block in unix seconds", + Name: "last_accepted_timestamp", + Help: "timestamp of the last accepted block in unix seconds", }), - processingBlocks: linkedhashmap.New[ids.ID, processingStart](), + processingBlocks: linked.NewHashmap[ids.ID, processingStart](), - // e.g., - // "avalanche_X_blks_processing" reports how many blocks are currently processing numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_processing", - Help: "number of currently processing blocks", + Name: "blks_processing", + Help: "number of currently processing blocks", }), blockSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_accepted_container_size_sum", - Help: "cumulative size of all accepted blocks", + Name: "blks_accepted_container_size_sum", + Help: "cumulative size of all accepted blocks", }), pollsAccepted: metric.NewAveragerWithErrs( - namespace, "blks_polls_accepted", "number of polls from the issuance of a block to its acceptance", reg, &errs, ), - // e.g., - // "avalanche_C_blks_accepted_count" reports how many times "Observe" has been called which is the total number of blocks accepted - // "avalanche_C_blks_accepted_sum" reports the cumulative sum of all block acceptance latencies in nanoseconds - // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in nanoseconds - // "avalanche_C_blks_accepted_container_size_sum" reports the cumulative sum of all accepted blocks' sizes in bytes - // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average accepted block size in bytes latAccepted: metric.NewAveragerWithErrs( - namespace, "blks_accepted", "time (in ns) from the issuance of a block to its acceptance", reg, &errs, ), buildLatencyAccepted: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_build_accept_latency", - Help: "time (in ns) from the timestamp of a block to the time it was accepted", + Name: "blks_build_accept_latency", + Help: "time (in ns) from the timestamp of a block to the time it was accepted", }), blockSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blks_rejected_container_size_sum", - Help: "cumulative size of all rejected blocks", + Name: "blks_rejected_container_size_sum", + Help: "cumulative size of all rejected blocks", }), pollsRejected: metric.NewAveragerWithErrs( - namespace, "blks_polls_rejected", "number of polls from the issuance of a block to its rejection", reg, &errs, ), - // e.g., - // "avalanche_P_blks_rejected_count" reports how many times "Observe" has been called which is the total number of blocks rejected - // "avalanche_P_blks_rejected_sum" reports the cumulative sum of all block rejection latencies in nanoseconds - // "avalanche_P_blks_rejected_sum / avalanche_P_blks_rejected_count" is the average block rejection latency in nanoseconds - // "avalanche_P_blks_rejected_container_size_sum" reports the cumulative sum of all rejected blocks' sizes in bytes - // "avalanche_P_blks_rejected_container_size_sum / avalanche_P_blks_rejected_count" is the average rejected block size in bytes latRejected: metric.NewAveragerWithErrs( - namespace, "blks_rejected", "time (in ns) from the issuance of a block to its rejection", reg, @@ -158,14 +132,12 @@ func newMetrics( ), numSuccessfulPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_successful", - Help: "number of successful polls", + Name: "polls_successful", + Help: "number of successful polls", }), numFailedPolls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls_failed", - Help: "number of failed polls", + Name: "polls_failed", + Help: "number of failed polls", }), } diff --git a/snow/consensus/snowman/network_test.go b/snow/consensus/snowman/network_test.go index aead346fb5e4..8c302ed5c7c6 100644 --- a/snow/consensus/snowman/network_test.go +++ b/snow/consensus/snowman/network_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/bag" @@ -18,7 +19,7 @@ import ( type Network struct { params snowball.Parameters - colors []*TestBlock + colors []*snowmantest.Block rngSource sampler.Source nodes, running []Consensus } @@ -26,13 +27,13 @@ type Network struct { func NewNetwork(params snowball.Parameters, numColors int, rngSource sampler.Source) *Network { n := &Network{ params: params, - colors: []*TestBlock{{ + colors: []*snowmantest.Block{{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(rngSource.Uint64()), StatusV: choices.Processing, }, - ParentV: Genesis.IDV, - HeightV: 1, + ParentV: snowmantest.GenesisID, + HeightV: snowmantest.GenesisHeight + 1, }}, rngSource: rngSource, } @@ -42,7 +43,7 @@ func NewNetwork(params snowball.Parameters, numColors int, rngSource sampler.Sou s.Initialize(uint64(len(n.colors))) dependencyInd, _ := s.Next() dependency := n.colors[dependencyInd] - n.colors = append(n.colors, &TestBlock{ + n.colors = append(n.colors, &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(rngSource.Uint64()), StatusV: choices.Processing, @@ -58,7 +59,7 @@ func (n *Network) shuffleColors() { s := sampler.NewDeterministicUniform(n.rngSource) s.Initialize(uint64(len(n.colors))) indices, _ := s.Sample(len(n.colors)) - colors := []*TestBlock(nil) + colors := []*snowmantest.Block(nil) for _, index := range indices { colors = append(colors, n.colors[int(index)]) } @@ -69,7 +70,7 @@ func (n *Network) shuffleColors() { func (n *Network) AddNode(t testing.TB, sm Consensus) error { snowCtx := snowtest.Context(t, snowtest.CChainID) ctx := snowtest.ConsensusContext(snowCtx) - if err := sm.Initialize(ctx, n.params, Genesis.ID(), Genesis.Height(), Genesis.Timestamp()); err != nil { + if err := sm.Initialize(ctx, n.params, snowmantest.GenesisID, snowmantest.GenesisHeight, snowmantest.GenesisTimestamp); err != nil { return err } @@ -80,7 +81,7 @@ func (n *Network) AddNode(t testing.TB, sm Consensus) error { if !found { myDep = blk.Parent() } - myVtx := &TestBlock{ + myBlock := &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: blk.ID(), StatusV: blk.Status(), @@ -90,10 +91,10 @@ func (n *Network) AddNode(t testing.TB, sm Consensus) error { VerifyV: blk.Verify(context.Background()), BytesV: blk.Bytes(), } - if err := sm.Add(context.Background(), myVtx); err != nil { + if err := sm.Add(myBlock); err != nil { return err } - deps[myVtx.ID()] = myDep + deps[myBlock.ID()] = myDep } n.nodes = append(n.nodes, sm) n.running = append(n.running, sm) diff --git a/snow/consensus/snowman/poll/early_term_no_traversal.go b/snow/consensus/snowman/poll/early_term_no_traversal.go index 460805ab7820..df09157b04df 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal.go @@ -4,24 +4,124 @@ package poll import ( + "errors" "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" ) +var ( + errPollDurationVectorMetrics = errors.New("failed to register poll_duration vector metrics") + errPollCountVectorMetrics = errors.New("failed to register poll_count vector metrics") + + terminationReason = "reason" + exhaustedReason = "exhausted" + earlyFailReason = "early_fail" + earlyAlphaPrefReason = "early_alpha_pref" + earlyAlphaConfReason = "early_alpha_conf" + + exhaustedLabel = prometheus.Labels{ + terminationReason: exhaustedReason, + } + earlyFailLabel = prometheus.Labels{ + terminationReason: earlyFailReason, + } + earlyAlphaPrefLabel = prometheus.Labels{ + terminationReason: earlyAlphaPrefReason, + } + earlyAlphaConfLabel = prometheus.Labels{ + terminationReason: earlyAlphaConfReason, + } +) + +type earlyTermNoTraversalMetrics struct { + durExhaustedPolls prometheus.Gauge + durEarlyFailPolls prometheus.Gauge + durEarlyAlphaPrefPolls prometheus.Gauge + durEarlyAlphaConfPolls prometheus.Gauge + + countExhaustedPolls prometheus.Counter + countEarlyFailPolls prometheus.Counter + countEarlyAlphaPrefPolls prometheus.Counter + countEarlyAlphaConfPolls prometheus.Counter +} + +func newEarlyTermNoTraversalMetrics(reg prometheus.Registerer) (*earlyTermNoTraversalMetrics, error) { + pollCountVec := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "poll_count", + Help: "Total # of terminated polls by reason", + }, []string{terminationReason}) + if err := reg.Register(pollCountVec); err != nil { + return nil, fmt.Errorf("%w: %w", errPollCountVectorMetrics, err) + } + durPollsVec := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "poll_duration", + Help: "time (in ns) polls took to complete by reason", + }, []string{terminationReason}) + if err := reg.Register(durPollsVec); err != nil { + return nil, fmt.Errorf("%w: %w", errPollDurationVectorMetrics, err) + } + + return &earlyTermNoTraversalMetrics{ + durExhaustedPolls: durPollsVec.With(exhaustedLabel), + durEarlyFailPolls: durPollsVec.With(earlyFailLabel), + durEarlyAlphaPrefPolls: durPollsVec.With(earlyAlphaPrefLabel), + durEarlyAlphaConfPolls: durPollsVec.With(earlyAlphaConfLabel), + countExhaustedPolls: pollCountVec.With(exhaustedLabel), + countEarlyFailPolls: pollCountVec.With(earlyFailLabel), + countEarlyAlphaPrefPolls: pollCountVec.With(earlyAlphaPrefLabel), + countEarlyAlphaConfPolls: pollCountVec.With(earlyAlphaConfLabel), + }, nil +} + +func (m *earlyTermNoTraversalMetrics) observeExhausted(duration time.Duration) { + m.durExhaustedPolls.Add(float64(duration.Nanoseconds())) + m.countExhaustedPolls.Inc() +} + +func (m *earlyTermNoTraversalMetrics) observeEarlyFail(duration time.Duration) { + m.durEarlyFailPolls.Add(float64(duration.Nanoseconds())) + m.countEarlyFailPolls.Inc() +} + +func (m *earlyTermNoTraversalMetrics) observeEarlyAlphaPref(duration time.Duration) { + m.durEarlyAlphaPrefPolls.Add(float64(duration.Nanoseconds())) + m.countEarlyAlphaPrefPolls.Inc() +} + +func (m *earlyTermNoTraversalMetrics) observeEarlyAlphaConf(duration time.Duration) { + m.durEarlyAlphaConfPolls.Add(float64(duration.Nanoseconds())) + m.countEarlyAlphaConfPolls.Inc() +} + type earlyTermNoTraversalFactory struct { alphaPreference int alphaConfidence int + + metrics *earlyTermNoTraversalMetrics } // NewEarlyTermNoTraversalFactory returns a factory that returns polls with // early termination, without doing DAG traversals -func NewEarlyTermNoTraversalFactory(alphaPreference int, alphaConfidence int) Factory { +func NewEarlyTermNoTraversalFactory( + alphaPreference int, + alphaConfidence int, + reg prometheus.Registerer, +) (Factory, error) { + metrics, err := newEarlyTermNoTraversalMetrics(reg) + if err != nil { + return nil, err + } + return &earlyTermNoTraversalFactory{ alphaPreference: alphaPreference, alphaConfidence: alphaConfidence, - } + metrics: metrics, + }, nil } func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { @@ -29,6 +129,8 @@ func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { polled: vdrs, alphaPreference: f.alphaPreference, alphaConfidence: f.alphaConfidence, + metrics: f.metrics, + start: time.Now(), } } @@ -40,6 +142,10 @@ type earlyTermNoTraversalPoll struct { polled bag.Bag[ids.NodeID] alphaPreference int alphaConfidence int + + metrics *earlyTermNoTraversalMetrics + start time.Time + finished bool } // Vote registers a response for this poll @@ -67,20 +173,39 @@ func (p *earlyTermNoTraversalPoll) Drop(vdr ids.NodeID) { // transitive voting. // 4. A single element has achieved an alphaConfidence majority. func (p *earlyTermNoTraversalPoll) Finished() bool { + if p.finished { + return true + } + remaining := p.polled.Len() if remaining == 0 { + p.finished = true + p.metrics.observeExhausted(time.Since(p.start)) return true // Case 1 } received := p.votes.Len() maxPossibleVotes := received + remaining if maxPossibleVotes < p.alphaPreference { + p.finished = true + p.metrics.observeEarlyFail(time.Since(p.start)) return true // Case 2 } _, freq := p.votes.Mode() - return freq >= p.alphaPreference && maxPossibleVotes < p.alphaConfidence || // Case 3 - freq >= p.alphaConfidence // Case 4 + if freq >= p.alphaPreference && maxPossibleVotes < p.alphaConfidence { + p.finished = true + p.metrics.observeEarlyAlphaPref(time.Since(p.start)) + return true // Case 3 + } + + if freq >= p.alphaConfidence { + p.finished = true + p.metrics.observeEarlyAlphaConf(time.Since(p.start)) + return true // Case 4 + } + + return false } // Result returns the result of this poll diff --git a/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/snow/consensus/snowman/poll/early_term_no_traversal_test.go index 9d215c246eec..232169c01d41 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -6,18 +6,25 @@ package poll import ( "testing" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/utils/bag" ) +func newEarlyTermNoTraversalTestFactory(require *require.Assertions, alpha int) Factory { + factory, err := NewEarlyTermNoTraversalFactory(alpha, alpha, prometheus.NewRegistry()) + require.NoError(err) + return factory +} + func TestEarlyTermNoTraversalResults(t *testing.T) { require := require.New(t) vdrs := bag.Of(vdr1) // k = 1 alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -31,10 +38,12 @@ func TestEarlyTermNoTraversalResults(t *testing.T) { } func TestEarlyTermNoTraversalString(t *testing.T) { + require := require.New(t) + vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -43,7 +52,7 @@ func TestEarlyTermNoTraversalString(t *testing.T) { NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag[ids.ID]: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - require.Equal(t, expected, poll.String()) + require.Equal(expected, poll.String()) } func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { @@ -52,7 +61,7 @@ func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -72,7 +81,7 @@ func TestEarlyTermNoTraversalTerminatesEarlyWithoutAlphaPreference(t *testing.T) vdrs := bag.Of(vdr1, vdr2, vdr3) // k = 3 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Drop(vdr1) @@ -90,7 +99,8 @@ func TestEarlyTermNoTraversalTerminatesEarlyWithAlphaPreference(t *testing.T) { alphaPreference := 3 alphaConfidence := 5 - factory := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence) + factory, err := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence, prometheus.NewRegistry()) + require.NoError(err) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -114,7 +124,8 @@ func TestEarlyTermNoTraversalTerminatesEarlyWithAlphaConfidence(t *testing.T) { alphaPreference := 3 alphaConfidence := 3 - factory := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence) + factory, err := NewEarlyTermNoTraversalFactory(alphaPreference, alphaConfidence, prometheus.NewRegistry()) + require.NoError(err) poll := factory.New(vdrs) poll.Vote(vdr1, blkID1) @@ -138,7 +149,7 @@ func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { vdrs := bag.Of(vdr1, vdr2, vdr3, vdr4) // k = 4 alpha := 4 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr1, blkID2) @@ -160,7 +171,7 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { vdrs := bag.Of(vdr1, vdr2, vdr2) // k = 3 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Vote(vdr2, blkID1) @@ -174,12 +185,14 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { } func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { + require := require.New(t) + vdrs := bag.Of(vdr1, vdr2, vdr2) // k = 3 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) poll := factory.New(vdrs) poll.Drop(vdr2) - require.True(t, poll.Finished()) + require.True(poll.Finished()) } diff --git a/snow/consensus/snowman/poll/set.go b/snow/consensus/snowman/poll/set.go index 9a6b9b2d86e5..aa7e7342542a 100644 --- a/snow/consensus/snowman/poll/set.go +++ b/snow/consensus/snowman/poll/set.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" ) @@ -48,27 +48,24 @@ type set struct { durPolls metric.Averager factory Factory // maps requestID -> poll - polls linkedhashmap.LinkedHashmap[uint32, pollHolder] + polls *linked.Hashmap[uint32, pollHolder] } // NewSet returns a new empty set of polls func NewSet( factory Factory, log logging.Logger, - namespace string, reg prometheus.Registerer, ) (Set, error) { numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "polls", - Help: "Number of pending network polls", + Name: "polls", + Help: "Number of pending network polls", }) if err := reg.Register(numPolls); err != nil { return nil, fmt.Errorf("%w: %w", errFailedPollsMetric, err) } durPolls, err := metric.NewAverager( - namespace, "poll_duration", "time (in ns) this poll took to complete", reg, @@ -82,7 +79,7 @@ func NewSet( numPolls: numPolls, durPolls: durPolls, factory: factory, - polls: linkedhashmap.New[uint32, pollHolder](), + polls: linked.NewHashmap[uint32, pollHolder](), }, nil } diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 0717242060d9..d01ab3bb8262 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -30,34 +30,32 @@ var ( func TestNewSetErrorOnPollsMetrics(t *testing.T) { require := require.New(t) - factory := NewEarlyTermNoTraversalFactory(1, 1) + alpha := 1 + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "polls", + Name: "polls", }))) - _, err := NewSet(factory, log, namespace, registerer) + _, err := NewSet(factory, log, registerer) require.ErrorIs(err, errFailedPollsMetric) } func TestNewSetErrorOnPollDurationMetrics(t *testing.T) { require := require.New(t) - factory := NewEarlyTermNoTraversalFactory(1, 1) + alpha := 1 + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "poll_duration_count", + Name: "poll_duration_count", }))) - _, err := NewSet(factory, log, namespace, registerer) + _, err := NewSet(factory, log, registerer) require.ErrorIs(err, errFailedPollDurationMetrics) } @@ -67,11 +65,10 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 alpha := 3 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) // create two polls for the two blocks @@ -104,11 +101,10 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 alpha := 3 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) // create two polls for the two blocks @@ -141,11 +137,10 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // k = 3 alpha := 3 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) // create three polls for the two blocks @@ -186,11 +181,10 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 2 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) require.Zero(s.Len()) @@ -219,11 +213,10 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { vdrs := bag.Of(vdr1, vdr2) // k = 2 alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) require.Zero(s.Len()) @@ -249,11 +242,10 @@ func TestSetString(t *testing.T) { vdrs := bag.Of(vdr1) // k = 1 alpha := 1 - factory := NewEarlyTermNoTraversalFactory(alpha, alpha) + factory := newEarlyTermNoTraversalTestFactory(require, alpha) log := logging.NoLog{} - namespace := "" registerer := prometheus.NewRegistry() - s, err := NewSet(factory, log, namespace, registerer) + s, err := NewSet(factory, log, registerer) require.NoError(err) expected := `current polls: (Size = 1) diff --git a/snow/consensus/snowman/snowman_block.go b/snow/consensus/snowman/snowman_block.go index 7e8d339d201a..236c93645a02 100644 --- a/snow/consensus/snowman/snowman_block.go +++ b/snow/consensus/snowman/snowman_block.go @@ -5,14 +5,12 @@ package snowman import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" ) // Tracks the state of a snowman block type snowmanBlock struct { - // parameters to initialize the snowball instance with - params snowball.Parameters + t *Topological // block that this node contains. For the genesis, this value will be nil blk Block @@ -38,7 +36,7 @@ func (n *snowmanBlock) AddChild(child Block) { // if the snowball instance is nil, this is the first child. So the instance // should be initialized. if n.sb == nil { - n.sb = snowball.NewTree(snowball.SnowballFactory, n.params, childID) + n.sb = snowball.NewTree(snowball.SnowballFactory, n.t.params, childID) n.children = make(map[ids.ID]Block) } else { n.sb.Add(childID) @@ -47,11 +45,8 @@ func (n *snowmanBlock) AddChild(child Block) { n.children[childID] = child } -func (n *snowmanBlock) Accepted() bool { +func (n *snowmanBlock) Decided() bool { // if the block is nil, then this is the genesis which is defined as // accepted - if n.blk == nil { - return true - } - return n.blk.Status() == choices.Accepted + return n.blk == nil || n.blk.Height() <= n.t.lastAcceptedHeight } diff --git a/snow/consensus/snowman/snowmantest/block.go b/snow/consensus/snowman/snowmantest/block.go new file mode 100644 index 000000000000..d77ee9759136 --- /dev/null +++ b/snow/consensus/snowman/snowmantest/block.go @@ -0,0 +1,102 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowmantest + +import ( + "cmp" + "context" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils" +) + +const ( + GenesisHeight uint64 = 0 + GenesisUnixTimestamp int64 = 1 +) + +var ( + _ utils.Sortable[*Block] = (*Block)(nil) + + GenesisID = ids.GenerateTestID() + GenesisTimestamp = time.Unix(GenesisUnixTimestamp, 0) + GenesisBytes = GenesisID[:] + Genesis = BuildChain(1)[0] +) + +func BuildChild(parent *Block) *Block { + blkID := ids.GenerateTestID() + return &Block{ + TestDecidable: choices.TestDecidable{ + IDV: blkID, + StatusV: choices.Processing, + }, + ParentV: parent.ID(), + HeightV: parent.Height() + 1, + TimestampV: parent.Timestamp(), + BytesV: blkID[:], + } +} + +func BuildChain(length int) []*Block { + if length == 0 { + return nil + } + + genesis := &Block{ + TestDecidable: choices.TestDecidable{ + IDV: GenesisID, + StatusV: choices.Accepted, + }, + HeightV: GenesisHeight, + TimestampV: GenesisTimestamp, + BytesV: GenesisBytes, + } + return append([]*Block{genesis}, BuildDescendants(genesis, length-1)...) +} + +func BuildDescendants(parent *Block, length int) []*Block { + chain := make([]*Block, length) + for i := range chain { + parent = BuildChild(parent) + chain[i] = parent + } + return chain +} + +type Block struct { + choices.TestDecidable + + ParentV ids.ID + HeightV uint64 + TimestampV time.Time + VerifyV error + BytesV []byte +} + +func (b *Block) Parent() ids.ID { + return b.ParentV +} + +func (b *Block) Height() uint64 { + return b.HeightV +} + +func (b *Block) Timestamp() time.Time { + return b.TimestampV +} + +func (b *Block) Verify(context.Context) error { + return b.VerifyV +} + +func (b *Block) Bytes() []byte { + return b.BytesV +} + +func (b *Block) Compare(other *Block) int { + return cmp.Compare(b.HeightV, other.HeightV) +} diff --git a/snow/consensus/snowman/mock_block.go b/snow/consensus/snowman/snowmantest/mock_block.go similarity index 95% rename from snow/consensus/snowman/mock_block.go rename to snow/consensus/snowman/snowmantest/mock_block.go index 45393bfe7bdb..4c1a4ae7beec 100644 --- a/snow/consensus/snowman/mock_block.go +++ b/snow/consensus/snowman/snowmantest/mock_block.go @@ -3,11 +3,11 @@ // // Generated by this command: // -// mockgen -package=snowman -destination=snow/consensus/snowman/mock_block.go github.com/ava-labs/avalanchego/snow/consensus/snowman Block +// mockgen -package=snowmantest -destination=snow/consensus/snowman/snowmantest/mock_block.go github.com/ava-labs/avalanchego/snow/consensus/snowman Block // -// Package snowman is a generated GoMock package. -package snowman +// Package snowmantest is a generated GoMock package. +package snowmantest import ( context "context" diff --git a/snow/consensus/snowman/test_block.go b/snow/consensus/snowman/test_block.go deleted file mode 100644 index a32872adbb96..000000000000 --- a/snow/consensus/snowman/test_block.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "cmp" - "context" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils" -) - -var ( - _ Block = (*TestBlock)(nil) - _ utils.Sortable[*TestBlock] = (*TestBlock)(nil) -) - -// TestBlock is a useful test block -type TestBlock struct { - choices.TestDecidable - - ParentV ids.ID - HeightV uint64 - TimestampV time.Time - VerifyV error - BytesV []byte -} - -func (b *TestBlock) Parent() ids.ID { - return b.ParentV -} - -func (b *TestBlock) Height() uint64 { - return b.HeightV -} - -func (b *TestBlock) Timestamp() time.Time { - return b.TimestampV -} - -func (b *TestBlock) Verify(context.Context) error { - return b.VerifyV -} - -func (b *TestBlock) Bytes() []byte { - return b.BytesV -} - -func (b *TestBlock) Compare(other *TestBlock) int { - return cmp.Compare(b.HeightV, other.HeightV) -} diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index d99ac9c293ad..8c09e2798fcc 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/set" @@ -21,6 +20,7 @@ import ( var ( errDuplicateAdd = errors.New("duplicate block add") + errUnknownParentBlock = errors.New("unknown parent block") errTooManyProcessingBlocks = errors.New("too many processing blocks") errBlockProcessingTooLong = errors.New("block processing too long") @@ -111,7 +111,6 @@ func (ts *Topological) Initialize( ts.metrics, err = newMetrics( ctx.Log, - "", ctx.Registerer, lastAcceptedHeight, lastAcceptedTime, @@ -127,7 +126,7 @@ func (ts *Topological) Initialize( ts.lastAcceptedID = lastAcceptedID ts.lastAcceptedHeight = lastAcceptedHeight ts.blocks = map[ids.ID]*snowmanBlock{ - lastAcceptedID: {params: ts.params}, + lastAcceptedID: {t: ts}, } ts.preferredHeights = make(map[uint64]ids.ID) ts.preference = lastAcceptedID @@ -138,7 +137,7 @@ func (ts *Topological) NumProcessing() int { return len(ts.blocks) - 1 } -func (ts *Topological) Add(ctx context.Context, blk Block) error { +func (ts *Topological) Add(blk Block) error { blkID := blk.ID() height := blk.Height() ts.ctx.Log.Verbo("adding block", @@ -146,12 +145,8 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { zap.Uint64("height", height), ) - // Make sure a block is not inserted twice. This enforces the invariant that - // blocks are always added in topological order. Essentially, a block that - // is being added should never have a child that was already added. - // Additionally, this prevents any edge cases that may occur due to adding - // different blocks with the same ID. - if ts.Decided(blk) || ts.Processing(blkID) { + // Make sure a block is not inserted twice. + if ts.Processing(blkID) { return errDuplicateAdd } @@ -161,27 +156,14 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { parentID := blk.Parent() parentNode, ok := ts.blocks[parentID] if !ok { - ts.ctx.Log.Verbo("block ancestor is missing, being rejected", - zap.Stringer("blkID", blkID), - zap.Uint64("height", height), - zap.Stringer("parentID", parentID), - ) - - // If the ancestor is missing, this means the ancestor must have already - // been pruned. Therefore, the dependent should be transitively - // rejected. - if err := blk.Reject(ctx); err != nil { - return err - } - ts.metrics.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) - return nil + return errUnknownParentBlock } // add the block as a child of its parent, and add the block to the tree parentNode.AddChild(blk) ts.blocks[blkID] = &snowmanBlock{ - params: ts.params, - blk: blk, + t: ts, + blk: blk, } // If we are extending the preference, this is the new preference @@ -199,16 +181,6 @@ func (ts *Topological) Add(ctx context.Context, blk Block) error { return nil } -func (ts *Topological) Decided(blk Block) bool { - // If the block is decided, then it must have been previously issued. - if blk.Status().Decided() { - return true - } - // If the block is marked as fetched, we can check if it has been - // transitively rejected. - return blk.Status() == choices.Processing && blk.Height() <= ts.lastAcceptedHeight -} - func (ts *Topological) Processing(blkID ids.ID) bool { // The last accepted block is in the blocks map, so we first must ensure the // requested block isn't the last accepted block. @@ -221,12 +193,8 @@ func (ts *Topological) Processing(blkID ids.ID) bool { return ok } -func (ts *Topological) IsPreferred(blk Block) bool { - // If the block is accepted, then it must be transitively preferred. - if blk.Status() == choices.Accepted { - return true - } - return ts.preferredIDs.Contains(blk.ID()) +func (ts *Topological) IsPreferred(blkID ids.ID) bool { + return blkID == ts.lastAcceptedID || ts.preferredIDs.Contains(blkID) } func (ts *Topological) LastAccepted() (ids.ID, uint64) { @@ -310,7 +278,12 @@ func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) // Runtime = |live set| ; Space = Constant // Traverse from the preferred ID to the last accepted ancestor. - for block := startBlock; !block.Accepted(); { + // + // It is guaranteed that the first decided block we encounter is the last + // accepted block because the startBlock is the preferred block. The + // preferred block is guaranteed to either be the last accepted block or + // extend the accepted chain. + for block := startBlock; !block.Decided(); { blkID := block.blk.ID() ts.preferredIDs.Add(blkID) ts.preferredHeights[block.blk.Height()] = blkID @@ -381,7 +354,7 @@ func (ts *Topological) calculateInDegree(votes bag.Bag[ids.ID]) { } // If the vote is for the last accepted block, the vote is dropped - if votedBlock.Accepted() { + if votedBlock.Decided() { continue } @@ -405,24 +378,21 @@ func (ts *Topological) calculateInDegree(votes bag.Bag[ids.ID]) { // iterate through all the block's ancestors and set up the inDegrees of // the blocks - for n := ts.blocks[parentID]; !n.Accepted(); n = ts.blocks[parentID] { + for n := ts.blocks[parentID]; !n.Decided(); n = ts.blocks[parentID] { parentID = n.blk.Parent() // Increase the inDegree by one - kahn := ts.kahnNodes[parentID] + kahn, previouslySeen := ts.kahnNodes[parentID] kahn.inDegree++ ts.kahnNodes[parentID] = kahn // If we have already seen this block, then we shouldn't increase // the inDegree of the ancestors through this block again. - if kahn.inDegree != 1 { + if previouslySeen { + // Nodes are only leaves if they have no inbound edges. + ts.leaves.Remove(parentID) break } - - // If I am transitively seeing this block for the first time, either - // the block was previously unknown or it was previously a leaf. - // Regardless, it shouldn't be tracked as a leaf. - ts.leaves.Remove(parentID) } } } @@ -452,7 +422,7 @@ func (ts *Topological) pushVotes() []votes { // If the block is accepted, then we don't need to push votes to the // parent block - if block.Accepted() { + if block.Decided() { continue } diff --git a/snow/consensus/snowman/traced_consensus.go b/snow/consensus/snowman/traced_consensus.go index 10f49229fe16..d2d5f197a65c 100644 --- a/snow/consensus/snowman/traced_consensus.go +++ b/snow/consensus/snowman/traced_consensus.go @@ -29,16 +29,6 @@ func Trace(consensus Consensus, tracer trace.Tracer) Consensus { } } -func (c *tracedConsensus) Add(ctx context.Context, blk Block) error { - ctx, span := c.tracer.Start(ctx, "tracedConsensus.Add", oteltrace.WithAttributes( - attribute.Stringer("blkID", blk.ID()), - attribute.Int64("height", int64(blk.Height())), - )) - defer span.End() - - return c.Consensus.Add(ctx, blk) -} - func (c *tracedConsensus) RecordPoll(ctx context.Context, votes bag.Bag[ids.ID]) error { ctx, span := c.tracer.Start(ctx, "tracedConsensus.RecordPoll", oteltrace.WithAttributes( attribute.Int("numVotes", votes.Len()), diff --git a/snow/context.go b/snow/context.go index 2cbbedb38b47..26fc67f213a8 100644 --- a/snow/context.go +++ b/snow/context.go @@ -46,7 +46,7 @@ type Context struct { Keystore keystore.BlockchainKeystore SharedMemory atomic.SharedMemory BCLookup ids.AliaserReader - Metrics metrics.OptionalGatherer + Metrics metrics.MultiGatherer WarpSigner warp.Signer @@ -65,15 +65,12 @@ type Registerer interface { type ConsensusContext struct { *Context - // Registers all common and snowman consensus metrics. Unlike the avalanche - // consensus engine metrics, we do not prefix the name with the engine name, - // as snowman is used for all chains by default. + // PrimaryAlias is the primary alias of the chain this context exists + // within. + PrimaryAlias string + + // Registers all consensus metrics. Registerer Registerer - // Only used to register Avalanche consensus metrics. Previously, all - // metrics were prefixed with "avalanche_{chainID}_". Now we add avalanche - // to the prefix, "avalanche_{chainID}_avalanche_", to differentiate - // consensus operations after the DAG linearization. - AvalancheRegisterer Registerer // BlockAcceptor is the callback that will be fired whenever a VM is // notified that their block was accepted. diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index cd530d1cb1f8..00f9ab64a458 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -6,7 +6,9 @@ package bootstrap import ( "context" "fmt" + "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" @@ -18,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/heap" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -38,6 +39,8 @@ const ( // maxOutstandingGetAncestorsRequests is the maximum number of GetAncestors // sent but not yet responded to/failed maxOutstandingGetAncestorsRequests = 10 + + epsilon = 1e-6 // small amount to add to time to avoid division by 0 ) var _ common.BootstrapableEngine = (*bootstrapper)(nil) @@ -45,6 +48,7 @@ var _ common.BootstrapableEngine = (*bootstrapper)(nil) func New( config Config, onFinished func(ctx context.Context, lastReqID uint32) error, + reg prometheus.Registerer, ) (common.BootstrapableEngine, error) { b := &bootstrapper{ Config: config, @@ -58,12 +62,13 @@ func New( ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), AppHandler: config.VM, - outstandingRequests: bimap.New[common.Request, ids.ID](), + outstandingRequests: bimap.New[common.Request, ids.ID](), + outstandingRequestTimes: make(map[common.Request]time.Time), processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, onFinished: onFinished, } - return b, b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer) + return b, b.metrics.Initialize(reg) } // Note: To align with the Snowman invariant, it should be guaranteed the VM is @@ -85,7 +90,8 @@ type bootstrapper struct { metrics // tracks which validators were asked for which containers in which requests - outstandingRequests *bimap.BiMap[common.Request, ids.ID] + outstandingRequests *bimap.BiMap[common.Request, ids.ID] + outstandingRequestTimes map[common.Request]time.Time // IDs of vertices that we will send a GetAncestors request for once we are // not at the max number of outstanding requests @@ -125,84 +131,76 @@ func (b *bootstrapper) Clear(context.Context) error { // response to a GetAncestors message to [nodeID] with request ID [requestID]. // Expects vtxs[0] to be the vertex requested in the corresponding GetAncestors. func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxs [][]byte) error { + request := common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + requestedVtxID, ok := b.outstandingRequests.DeleteKey(request) + if !ok { // this message isn't in response to a request we made + b.Ctx.Log.Debug("received unexpected Ancestors", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return nil + } + requestTime := b.outstandingRequestTimes[request] + delete(b.outstandingRequestTimes, request) + lenVtxs := len(vtxs) if lenVtxs == 0 { b.Ctx.Log.Debug("Ancestors contains no vertices", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) - return b.GetAncestorsFailed(ctx, nodeID, requestID) + + b.PeerTracker.RegisterFailure(nodeID) + return b.fetch(ctx, requestedVtxID) } + if lenVtxs > b.Config.AncestorsMaxContainersReceived { + vtxs = vtxs[:b.Config.AncestorsMaxContainersReceived] + b.Ctx.Log.Debug("ignoring containers in Ancestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Int("numIgnored", lenVtxs-b.Config.AncestorsMaxContainersReceived), ) - - vtxs = vtxs[:b.Config.AncestorsMaxContainersReceived] } - requestedVtxID, requested := b.outstandingRequests.DeleteKey(common.Request{ - NodeID: nodeID, - RequestID: requestID, - }) - vtx, err := b.Manager.ParseVtx(ctx, vtxs[0]) // first vertex should be the one we requested in GetAncestors request + vtx, err := b.Manager.ParseVtx(ctx, vtxs[0]) if err != nil { - if !requested { - b.Ctx.Log.Debug("failed to parse unrequested vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Error(err), - ) - return nil - } - if b.Ctx.Log.Enabled(logging.Verbo) { - b.Ctx.Log.Verbo("failed to parse requested vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", requestedVtxID), - zap.Binary("vtxBytes", vtxs[0]), - zap.Error(err), - ) - } else { - b.Ctx.Log.Debug("failed to parse requested vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", requestedVtxID), - zap.Error(err), - ) - } - return b.fetch(ctx, requestedVtxID) - } - - vtxID := vtx.ID() - // If the vertex is neither the requested vertex nor a needed vertex, return early and re-fetch if necessary - if requested && requestedVtxID != vtxID { - b.Ctx.Log.Debug("received incorrect vertex", + b.Ctx.Log.Debug("failed to parse requested vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", vtxID), + zap.Stringer("vtxID", requestedVtxID), + zap.Error(err), ) + + b.PeerTracker.RegisterFailure(nodeID) return b.fetch(ctx, requestedVtxID) } - if !requested && !b.outstandingRequests.HasValue(vtxID) && !b.needToFetch.Contains(vtxID) { - b.Ctx.Log.Debug("received un-needed vertex", + + if actualID := vtx.ID(); actualID != requestedVtxID { + b.Ctx.Log.Debug("received incorrect vertex", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), - zap.Stringer("vtxID", vtxID), + zap.Stringer("vtxID", actualID), ) - return nil + + b.PeerTracker.RegisterFailure(nodeID) + return b.fetch(ctx, requestedVtxID) } - // Do not remove from outstanding requests if this did not answer a specific outstanding request - // to ensure that real responses are not dropped in favor of potentially byzantine Ancestors messages that - // could force the node to bootstrap 1 vertex at a time. - b.needToFetch.Remove(vtxID) + b.needToFetch.Remove(requestedVtxID) + + // All vertices added to [verticesToProcess] have received transitive votes + // from the accepted frontier. + var ( + numBytes = len(vtxs[0]) + verticesToProcess = make([]avalanche.Vertex, 1, len(vtxs)) + ) + verticesToProcess[0] = vtx - // All vertices added to [processVertices] have received transitive votes from the accepted frontier - processVertices := make([]avalanche.Vertex, 1, len(vtxs)) // Process all of the valid vertices in this message - processVertices[0] = vtx parents, err := vtx.Parents() if err != nil { return err @@ -212,7 +210,7 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request eligibleVertices.Add(parent.ID()) } - for _, vtxBytes := range vtxs[1:] { // Parse/persist all the vertices + for _, vtxBytes := range vtxs[1:] { vtx, err := b.Manager.ParseVtx(ctx, vtxBytes) // Persists the vtx if err != nil { b.Ctx.Log.Debug("failed to parse vertex", @@ -220,12 +218,6 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request zap.Uint32("requestID", requestID), zap.Error(err), ) - b.Ctx.Log.Debug("failed to parse vertex", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - zap.Binary("vtxBytes", vtxBytes), - zap.Error(err), - ) break } vtxID := vtx.ID() @@ -245,26 +237,41 @@ func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request for _, parent := range parents { eligibleVertices.Add(parent.ID()) } - processVertices = append(processVertices, vtx) + + numBytes += len(vtxBytes) + verticesToProcess = append(verticesToProcess, vtx) b.needToFetch.Remove(vtxID) // No need to fetch this vertex since we have it now } - return b.process(ctx, processVertices...) + // TODO: Calculate bandwidth based on the vertices that were persisted to + // disk. + var ( + requestLatency = time.Since(requestTime).Seconds() + epsilon + bandwidth = float64(numBytes) / requestLatency + ) + b.PeerTracker.RegisterResponse(nodeID, bandwidth) + + return b.process(ctx, verticesToProcess...) } func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - vtxID, ok := b.outstandingRequests.DeleteKey(common.Request{ + request := common.Request{ NodeID: nodeID, RequestID: requestID, - }) + } + vtxID, ok := b.outstandingRequests.DeleteKey(request) if !ok { - b.Ctx.Log.Debug("skipping GetAncestorsFailed call", - zap.String("reason", "no matching outstanding request"), + b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } + delete(b.outstandingRequestTimes, request) + + // This node timed out their request. + b.PeerTracker.RegisterFailure(nodeID) + // Send another request for the vertex return b.fetch(ctx, vtxID) } @@ -325,7 +332,6 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { if err := b.VtxBlocked.SetParser(ctx, &vtxParser{ log: b.Ctx.Log, numAccepted: b.numAcceptedVts, - numDropped: b.numDroppedVts, manager: b.Manager, }); err != nil { return err @@ -334,7 +340,6 @@ func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { if err := b.TxBlocked.SetParser(&txParser{ log: b.Ctx.Log, numAccepted: b.numAcceptedTxs, - numDropped: b.numDroppedTxs, vm: b.VM, }); err != nil { return err @@ -413,21 +418,25 @@ func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { continue } - validatorIDs, err := b.Config.Beacons.Sample(b.Ctx.SubnetID, 1) // validator to send request to - if err != nil { - return fmt.Errorf("dropping request for %s as there are no validators", vtxID) + nodeID, ok := b.PeerTracker.SelectPeer() + if !ok { + // If we aren't connected to any peers, we send a request to ourself + // which is guaranteed to fail. We send this message to use the + // message timeout as a retry mechanism. Once we are connected to + // another node again we will select them to sample from. + nodeID = b.Ctx.NodeID } - validatorID := validatorIDs[0] - b.requestID++ - b.outstandingRequests.Put( - common.Request{ - NodeID: validatorID, - RequestID: b.requestID, - }, - vtxID, - ) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, vtxID) // request vertex and ancestors + b.PeerTracker.RegisterRequest(nodeID) + + b.requestID++ + request := common.Request{ + NodeID: nodeID, + RequestID: b.requestID, + } + b.outstandingRequests.Put(request, vtxID) + b.outstandingRequestTimes[request] = time.Now() + b.Config.Sender.SendGetAncestors(ctx, nodeID, b.requestID, vtxID) // request vertex and ancestors } return b.checkFinish(ctx) } @@ -475,7 +484,6 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er pushed, err := b.VtxBlocked.Push(ctx, &vertexJob{ log: b.Ctx.Log, numAccepted: b.numAcceptedVts, - numDropped: b.numDroppedVts, vtx: vtx, }) if err != nil { @@ -497,7 +505,6 @@ func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) er pushed, err := b.TxBlocked.Push(ctx, &txJob{ log: b.Ctx.Log, numAccepted: b.numAcceptedTxs, - numDropped: b.numDroppedTxs, tx: tx, }) if err != nil { diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index 133e90519b66..2792e8682f61 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -10,25 +10,30 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap/queue" "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" + + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" ) var ( @@ -71,27 +76,38 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te peer := ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, peer, nil, ids.Empty, 1)) - vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.AvalancheRegisterer) + vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", prometheus.NewRegistry()) require.NoError(err) - txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.AvalancheRegisterer) + txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", prometheus.NewRegistry()) require.NoError(err) peerTracker := tracker.NewPeers() totalWeight, err := vdrs.TotalWeight(constants.PrimaryNetworkID) require.NoError(err) startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) - vdrs.RegisterCallbackListener(constants.PrimaryNetworkID, startupTracker) + vdrs.RegisterSetCallbackListener(constants.PrimaryNetworkID, startupTracker) + + avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, prometheus.NewRegistry()) + require.NoError(err) - avaGetHandler, err := getter.New(manager, sender, ctx.Log, time.Second, 2000, ctx.AvalancheRegisterer) + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) require.NoError(err) + p2pTracker.Connected(peer, version.CurrentApp) + return Config{ AllGetsServer: avaGetHandler, Ctx: ctx, - Beacons: vdrs, StartupTracker: startupTracker, Sender: sender, + PeerTracker: p2pTracker, AncestorsMaxContainersReceived: 2000, VtxBlocked: vtxBlocker, TxBlocked: txBlocker, @@ -151,11 +167,12 @@ func TestBootstrapperSingleFrontier(t *testing.T) { config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + Type: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, State: snow.NormalOp, }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -257,11 +274,12 @@ func TestBootstrapperByzantineResponses(t *testing.T) { config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + Type: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, State: snow.NormalOp, }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -423,11 +441,12 @@ func TestBootstrapperTxDependencies(t *testing.T) { config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + Type: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, State: snow.NormalOp, }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -546,11 +565,12 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + Type: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, State: snow.NormalOp, }) return nil }, + prometheus.NewRegistry(), ) require.NoError(err) @@ -620,111 +640,3 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { require.Equal(choices.Accepted, vtx1.Status()) require.Equal(choices.Accepted, vtx2.Status()) } - -func TestBootstrapperUnexpectedVertex(t *testing.T) { - require := require.New(t) - - config, peerID, sender, manager, vm := newConfig(t) - - vtxID0 := ids.Empty.Prefix(0) - vtxID1 := ids.Empty.Prefix(1) - - vtxBytes0 := []byte{0} - vtxBytes1 := []byte{1} - - vtx0 := &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Unknown, - }, - HeightV: 0, - BytesV: vtxBytes0, - } - vtx1 := &avalanche.TestVertex{ // vtx1 is the stop vertex - TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Unknown, - }, - ParentsV: []avalanche.Vertex{vtx0}, - HeightV: 1, - BytesV: vtxBytes1, - } - - config.StopVertexID = vtxID1 - bs, err := New( - config, - func(context.Context, uint32) error { - config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - State: snow.NormalOp, - }) - return nil - }, - ) - require.NoError(err) - - parsedVtx0 := false - parsedVtx1 := false - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - if parsedVtx0 { - return vtx0, nil - } - return nil, errUnknownVertex - case vtxID1: - if parsedVtx1 { - return vtx1, nil - } - return nil, errUnknownVertex - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - } - manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { - switch { - case bytes.Equal(vtxBytes, vtxBytes0): - vtx0.StatusV = choices.Processing - parsedVtx0 = true - return vtx0, nil - case bytes.Equal(vtxBytes, vtxBytes1): - vtx1.StatusV = choices.Processing - parsedVtx1 = true - return vtx1, nil - default: - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - } - - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - require.Equal(peerID, vdr) - requestIDs[vtxID] = reqID - } - - vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 - require.Contains(requestIDs, vtxID1) - - reqID := requestIDs[vtxID1] - clear(requestIDs) - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes0})) - require.Contains(requestIDs, vtxID1) - - manager.EdgeF = func(context.Context) []ids.ID { - require.Equal(choices.Accepted, vtx1.Status()) - return []ids.ID{vtxID1} - } - - vm.LinearizeF = func(_ context.Context, stopVertexID ids.ID) error { - require.Equal(vtxID1, stopVertexID) - return nil - } - - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0})) - require.Equal(choices.Accepted, vtx0.Status()) - require.Equal(choices.Accepted, vtx1.Status()) - require.Equal(snow.NormalOp, config.Ctx.State.Get().State) -} diff --git a/snow/engine/avalanche/bootstrap/config.go b/snow/engine/avalanche/bootstrap/config.go index a674c2758460..ec1baccc390b 100644 --- a/snow/engine/avalanche/bootstrap/config.go +++ b/snow/engine/avalanche/bootstrap/config.go @@ -5,23 +5,25 @@ package bootstrap import ( "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap/queue" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" - "github.com/ava-labs/avalanchego/snow/validators" ) type Config struct { common.AllGetsServer - Ctx *snow.ConsensusContext - Beacons validators.Manager + Ctx *snow.ConsensusContext StartupTracker tracker.Startup Sender common.Sender + // PeerTracker manages the set of nodes that we fetch the next block from. + PeerTracker *p2p.PeerTracker + // This node will only consider the first [AncestorsMaxContainersReceived] // containers in an ancestors message it receives. AncestorsMaxContainersReceived int diff --git a/snow/engine/avalanche/bootstrap/metrics.go b/snow/engine/avalanche/bootstrap/metrics.go index cc357f25901f..fdf68f5ecff2 100644 --- a/snow/engine/avalanche/bootstrap/metrics.go +++ b/snow/engine/avalanche/bootstrap/metrics.go @@ -10,52 +10,33 @@ import ( ) type metrics struct { - numFetchedVts, numDroppedVts, numAcceptedVts, - numFetchedTxs, numDroppedTxs, numAcceptedTxs prometheus.Counter + numFetchedVts, numAcceptedVts, + numFetchedTxs, numAcceptedTxs prometheus.Counter } -func (m *metrics) Initialize( - namespace string, - registerer prometheus.Registerer, -) error { +func (m *metrics) Initialize(registerer prometheus.Registerer) error { m.numFetchedVts = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "fetched_vts", - Help: "Number of vertices fetched during bootstrapping", - }) - m.numDroppedVts = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "dropped_vts", - Help: "Number of vertices dropped during bootstrapping", + Name: "bs_fetched_vts", + Help: "Number of vertices fetched during bootstrapping", }) m.numAcceptedVts = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accepted_vts", - Help: "Number of vertices accepted during bootstrapping", + Name: "bs_accepted_vts", + Help: "Number of vertices accepted during bootstrapping", }) m.numFetchedTxs = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "fetched_txs", - Help: "Number of transactions fetched during bootstrapping", - }) - m.numDroppedTxs = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "dropped_txs", - Help: "Number of transactions dropped during bootstrapping", + Name: "bs_fetched_txs", + Help: "Number of transactions fetched during bootstrapping", }) m.numAcceptedTxs = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accepted_txs", - Help: "Number of transactions accepted during bootstrapping", + Name: "bs_accepted_txs", + Help: "Number of transactions accepted during bootstrapping", }) return utils.Err( registerer.Register(m.numFetchedVts), - registerer.Register(m.numDroppedVts), registerer.Register(m.numAcceptedVts), registerer.Register(m.numFetchedTxs), - registerer.Register(m.numDroppedTxs), registerer.Register(m.numAcceptedTxs), ) } diff --git a/snow/engine/common/queue/job.go b/snow/engine/avalanche/bootstrap/queue/job.go similarity index 100% rename from snow/engine/common/queue/job.go rename to snow/engine/avalanche/bootstrap/queue/job.go diff --git a/snow/engine/common/queue/jobs.go b/snow/engine/avalanche/bootstrap/queue/jobs.go similarity index 100% rename from snow/engine/common/queue/jobs.go rename to snow/engine/avalanche/bootstrap/queue/jobs.go diff --git a/snow/engine/common/queue/jobs_test.go b/snow/engine/avalanche/bootstrap/queue/jobs_test.go similarity index 100% rename from snow/engine/common/queue/jobs_test.go rename to snow/engine/avalanche/bootstrap/queue/jobs_test.go diff --git a/snow/engine/common/queue/parser.go b/snow/engine/avalanche/bootstrap/queue/parser.go similarity index 100% rename from snow/engine/common/queue/parser.go rename to snow/engine/avalanche/bootstrap/queue/parser.go diff --git a/snow/engine/common/queue/state.go b/snow/engine/avalanche/bootstrap/queue/state.go similarity index 100% rename from snow/engine/common/queue/state.go rename to snow/engine/avalanche/bootstrap/queue/state.go diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/avalanche/bootstrap/queue/test_job.go similarity index 100% rename from snow/engine/common/queue/test_job.go rename to snow/engine/avalanche/bootstrap/queue/test_job.go diff --git a/snow/engine/common/queue/test_parser.go b/snow/engine/avalanche/bootstrap/queue/test_parser.go similarity index 100% rename from snow/engine/common/queue/test_parser.go rename to snow/engine/avalanche/bootstrap/queue/test_parser.go diff --git a/snow/engine/avalanche/bootstrap/tx_job.go b/snow/engine/avalanche/bootstrap/tx_job.go index 5a2ff3d98f64..62a664ddc157 100644 --- a/snow/engine/avalanche/bootstrap/tx_job.go +++ b/snow/engine/avalanche/bootstrap/tx_job.go @@ -14,8 +14,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap/queue" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) @@ -23,9 +23,9 @@ import ( var errMissingTxDependenciesOnAccept = errors.New("attempting to accept a transaction with missing dependencies") type txParser struct { - log logging.Logger - numAccepted, numDropped prometheus.Counter - vm vertex.LinearizableVM + log logging.Logger + numAccepted prometheus.Counter + vm vertex.LinearizableVM } func (p *txParser) Parse(ctx context.Context, txBytes []byte) (queue.Job, error) { @@ -36,15 +36,14 @@ func (p *txParser) Parse(ctx context.Context, txBytes []byte) (queue.Job, error) return &txJob{ log: p.log, numAccepted: p.numAccepted, - numDropped: p.numDropped, tx: tx, }, nil } type txJob struct { - log logging.Logger - numAccepted, numDropped prometheus.Counter - tx snowstorm.Tx + log logging.Logger + numAccepted prometheus.Counter + tx snowstorm.Tx } func (t *txJob) ID() ids.ID { @@ -67,14 +66,12 @@ func (t *txJob) Execute(ctx context.Context) error { return err } if hasMissingDeps { - t.numDropped.Inc() return errMissingTxDependenciesOnAccept } status := t.tx.Status() switch status { case choices.Unknown, choices.Rejected: - t.numDropped.Inc() return fmt.Errorf("attempting to execute transaction with status %s", status) case choices.Processing: txID := t.tx.ID() diff --git a/snow/engine/avalanche/bootstrap/vertex_job.go b/snow/engine/avalanche/bootstrap/vertex_job.go index 8860b61d816a..a9326c08fc78 100644 --- a/snow/engine/avalanche/bootstrap/vertex_job.go +++ b/snow/engine/avalanche/bootstrap/vertex_job.go @@ -14,18 +14,21 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/bootstrap/queue" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) -var errMissingVtxDependenciesOnAccept = errors.New("attempting to execute blocked vertex") +var ( + errMissingVtxDependenciesOnAccept = errors.New("attempting to execute blocked vertex") + errTxNotAcceptedInVtxOnAccept = errors.New("attempting to execute vertex with non-accepted transaction") +) type vtxParser struct { - log logging.Logger - numAccepted, numDropped prometheus.Counter - manager vertex.Manager + log logging.Logger + numAccepted prometheus.Counter + manager vertex.Manager } func (p *vtxParser) Parse(ctx context.Context, vtxBytes []byte) (queue.Job, error) { @@ -36,15 +39,14 @@ func (p *vtxParser) Parse(ctx context.Context, vtxBytes []byte) (queue.Job, erro return &vertexJob{ log: p.log, numAccepted: p.numAccepted, - numDropped: p.numDropped, vtx: vtx, }, nil } type vertexJob struct { - log logging.Logger - numAccepted, numDropped prometheus.Counter - vtx avalanche.Vertex + log logging.Logger + numAccepted prometheus.Counter + vtx avalanche.Vertex } func (v *vertexJob) ID() ids.ID { @@ -85,7 +87,6 @@ func (v *vertexJob) Execute(ctx context.Context) error { return err } if hasMissingDependencies { - v.numDropped.Inc() return errMissingVtxDependenciesOnAccept } txs, err := v.vtx.Txs(ctx) @@ -94,15 +95,12 @@ func (v *vertexJob) Execute(ctx context.Context) error { } for _, tx := range txs { if tx.Status() != choices.Accepted { - v.numDropped.Inc() - v.log.Warn("attempting to execute vertex with non-accepted transactions") - return nil + return errTxNotAcceptedInVtxOnAccept } } status := v.vtx.Status() switch status { case choices.Unknown, choices.Rejected: - v.numDropped.Inc() return fmt.Errorf("attempting to execute vertex with status %s", status) case choices.Processing: v.numAccepted.Inc() diff --git a/snow/engine/avalanche/getter/getter.go b/snow/engine/avalanche/getter/getter.go index a8e35fddfd64..1e1105c7675b 100644 --- a/snow/engine/avalanche/getter/getter.go +++ b/snow/engine/avalanche/getter/getter.go @@ -44,8 +44,7 @@ func New( var err error gh.getAncestorsVtxs, err = metric.NewAverager( - "bs", - "get_ancestors_vtxs", + "bs_get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", reg, ) @@ -82,27 +81,23 @@ func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, return nil } -// TODO: Remove support for GetAcceptedFrontier messages after v1.11.x is -// activated. -func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { - acceptedFrontier := gh.storage.Edge(ctx) - // Since all the DAGs are linearized, we only need to return the stop - // vertex. - if len(acceptedFrontier) > 0 { - gh.sender.SendAcceptedFrontier(ctx, validatorID, requestID, acceptedFrontier[0]) - } +func (gh *getter) GetAcceptedFrontier(_ context.Context, nodeID ids.NodeID, requestID uint32) error { + gh.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.GetAcceptedFrontierOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) return nil } -// TODO: Remove support for GetAccepted messages after v1.11.x is activated. -func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs set.Set[ids.ID]) error { - acceptedVtxIDs := make([]ids.ID, 0, containerIDs.Len()) - for vtxID := range containerIDs { - if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil && vtx.Status() == choices.Accepted { - acceptedVtxIDs = append(acceptedVtxIDs, vtxID) - } - } - gh.sender.SendAccepted(ctx, nodeID, requestID, acceptedVtxIDs) +func (gh *getter) GetAccepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ set.Set[ids.ID]) error { + gh.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.GetAcceptedOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) return nil } @@ -158,10 +153,12 @@ func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID return nil } -func (gh *getter) Get(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { - // If this engine has access to the requested vertex, provide it - if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil { - gh.sender.SendPut(ctx, nodeID, requestID, vtx.Bytes()) - } +func (gh *getter) Get(_ context.Context, nodeID ids.NodeID, requestID uint32, _ ids.ID) error { + gh.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.GetOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) return nil } diff --git a/snow/engine/avalanche/getter/getter_test.go b/snow/engine/avalanche/getter/getter_test.go deleted file mode 100644 index c052d0bc3a83..000000000000 --- a/snow/engine/avalanche/getter/getter_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package getter - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" -) - -var errUnknownVertex = errors.New("unknown vertex") - -func newTest(t *testing.T) (common.AllGetsServer, *vertex.TestManager, *common.SenderTest) { - manager := vertex.NewTestManager(t) - manager.Default(true) - - sender := &common.SenderTest{ - T: t, - } - sender.Default(true) - - bs, err := New( - manager, - sender, - logging.NoLog{}, - time.Second, - 2000, - prometheus.NewRegistry(), - ) - require.NoError(t, err) - - return bs, manager, sender -} - -func TestAcceptedFrontier(t *testing.T) { - require := require.New(t) - bs, manager, sender := newTest(t) - - vtxID := ids.GenerateTestID() - manager.EdgeF = func(context.Context) []ids.ID { - return []ids.ID{ - vtxID, - } - } - - var accepted ids.ID - sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, containerID ids.ID) { - accepted = containerID - } - require.NoError(bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0)) - require.Equal(vtxID, accepted) -} - -func TestFilterAccepted(t *testing.T) { - require := require.New(t) - bs, manager, sender := newTest(t) - - vtxID0 := ids.GenerateTestID() - vtxID1 := ids.GenerateTestID() - vtxID2 := ids.GenerateTestID() - - vtx0 := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: vtxID0, - StatusV: choices.Accepted, - }} - vtx1 := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ - IDV: vtxID1, - StatusV: choices.Accepted, - }} - - manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { - switch vtxID { - case vtxID0: - return vtx0, nil - case vtxID1: - return vtx1, nil - case vtxID2: - return nil, errUnknownVertex - } - require.FailNow(errUnknownVertex.Error()) - return nil, errUnknownVertex - } - - var accepted []ids.ID - sender.SendAcceptedF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { - accepted = frontier - } - - vtxIDs := set.Of(vtxID0, vtxID1, vtxID2) - require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs)) - - require.Contains(accepted, vtxID0) - require.Contains(accepted, vtxID1) - require.NotContains(accepted, vtxID2) -} diff --git a/snow/engine/avalanche/vertex/codec.go b/snow/engine/avalanche/vertex/codec.go index 12f387d0d25d..3a55f443467e 100644 --- a/snow/engine/avalanche/vertex/codec.go +++ b/snow/engine/avalanche/vertex/codec.go @@ -4,8 +4,6 @@ package vertex import ( - "time" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/codec/reflectcodec" @@ -24,8 +22,8 @@ const ( var Codec codec.Manager func init() { - lc0 := linearcodec.New(time.Time{}, []string{reflectcodec.DefaultTagName + "V0"}, maxSize) - lc1 := linearcodec.New(time.Time{}, []string{reflectcodec.DefaultTagName + "V1"}, maxSize) + lc0 := linearcodec.New([]string{reflectcodec.DefaultTagName + "V0"}) + lc1 := linearcodec.New([]string{reflectcodec.DefaultTagName + "V1"}) Codec = codec.NewManager(maxSize) err := utils.Err( diff --git a/snow/engine/avalanche/vertex/mock_vm.go b/snow/engine/avalanche/vertex/mock_vm.go index 7ad293f6313f..18903544bd6f 100644 --- a/snow/engine/avalanche/vertex/mock_vm.go +++ b/snow/engine/avalanche/vertex/mock_vm.go @@ -364,20 +364,6 @@ func (mr *MockLinearizableVMMockRecorder) Shutdown(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockLinearizableVM)(nil).Shutdown), arg0) } -// VerifyHeightIndex mocks base method. -func (m *MockLinearizableVM) VerifyHeightIndex(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyHeightIndex", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// VerifyHeightIndex indicates an expected call of VerifyHeightIndex. -func (mr *MockLinearizableVMMockRecorder) VerifyHeightIndex(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeightIndex", reflect.TypeOf((*MockLinearizableVM)(nil).VerifyHeightIndex), arg0) -} - // Version mocks base method. func (m *MockLinearizableVM) Version(arg0 context.Context) (string, error) { m.ctrl.T.Helper() diff --git a/snow/engine/common/appsender/appsender_client.go b/snow/engine/common/appsender/appsender_client.go index ed1248d2aac8..dcf2ef3dc558 100644 --- a/snow/engine/common/appsender/appsender_client.go +++ b/snow/engine/common/appsender/appsender_client.go @@ -105,28 +105,25 @@ func (c *Client) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID return err } -func (c *Client) SendAppGossip(ctx context.Context, msg []byte) error { - _, err := c.client.SendAppGossip( - ctx, - &appsenderpb.SendAppGossipMsg{ - Msg: msg, - }, - ) - return err -} - -func (c *Client) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], msg []byte) error { - nodeIDsBytes := make([][]byte, nodeIDs.Len()) +func (c *Client) SendAppGossip( + ctx context.Context, + config common.SendConfig, + msg []byte, +) error { + nodeIDs := make([][]byte, config.NodeIDs.Len()) i := 0 - for nodeID := range nodeIDs { - nodeIDsBytes[i] = nodeID.Bytes() + for nodeID := range config.NodeIDs { + nodeIDs[i] = nodeID.Bytes() i++ } - _, err := c.client.SendAppGossipSpecific( + _, err := c.client.SendAppGossip( ctx, - &appsenderpb.SendAppGossipSpecificMsg{ - NodeIds: nodeIDsBytes, - Msg: msg, + &appsenderpb.SendAppGossipMsg{ + NodeIds: nodeIDs, + Validators: uint64(config.Validators), + NonValidators: uint64(config.NonValidators), + Peers: uint64(config.Peers), + Msg: msg, }, ) return err diff --git a/snow/engine/common/appsender/appsender_server.go b/snow/engine/common/appsender/appsender_server.go index eedce556ce7d..2a3734d89347 100644 --- a/snow/engine/common/appsender/appsender_server.go +++ b/snow/engine/common/appsender/appsender_server.go @@ -87,11 +87,6 @@ func (s *Server) SendAppError(ctx context.Context, req *appsenderpb.SendAppError } func (s *Server) SendAppGossip(ctx context.Context, req *appsenderpb.SendAppGossipMsg) (*emptypb.Empty, error) { - err := s.appSender.SendAppGossip(ctx, req.Msg) - return &emptypb.Empty{}, err -} - -func (s *Server) SendAppGossipSpecific(ctx context.Context, req *appsenderpb.SendAppGossipSpecificMsg) (*emptypb.Empty, error) { nodeIDs := set.NewSet[ids.NodeID](len(req.NodeIds)) for _, nodeIDBytes := range req.NodeIds { nodeID, err := ids.ToNodeID(nodeIDBytes) @@ -100,6 +95,15 @@ func (s *Server) SendAppGossipSpecific(ctx context.Context, req *appsenderpb.Sen } nodeIDs.Add(nodeID) } - err := s.appSender.SendAppGossipSpecific(ctx, nodeIDs, req.Msg) + err := s.appSender.SendAppGossip( + ctx, + common.SendConfig{ + NodeIDs: nodeIDs, + Validators: int(req.Validators), + NonValidators: int(req.NonValidators), + Peers: int(req.Peers), + }, + req.Msg, + ) return &emptypb.Empty{}, err } diff --git a/snow/engine/common/mock_sender.go b/snow/engine/common/mock_sender.go index c22cfb45bfef..d1d3d1a68e5c 100644 --- a/snow/engine/common/mock_sender.go +++ b/snow/engine/common/mock_sender.go @@ -14,7 +14,6 @@ import ( reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - snow "github.com/ava-labs/avalanchego/snow" set "github.com/ava-labs/avalanchego/utils/set" gomock "go.uber.org/mock/gomock" ) @@ -42,20 +41,6 @@ func (m *MockSender) EXPECT() *MockSenderMockRecorder { return m.recorder } -// Accept mocks base method. -func (m *MockSender) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept", ctx, containerID, container) - ret0, _ := ret[0].(error) - return ret0 -} - -// Accept indicates an expected call of Accept. -func (mr *MockSenderMockRecorder) Accept(ctx, containerID, container any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), ctx, containerID, container) -} - // SendAccepted mocks base method. func (m *MockSender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { m.ctrl.T.Helper() @@ -119,31 +104,17 @@ func (mr *MockSenderMockRecorder) SendAppError(ctx, nodeID, requestID, errorCode } // SendAppGossip mocks base method. -func (m *MockSender) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { +func (m *MockSender) SendAppGossip(ctx context.Context, config SendConfig, appGossipBytes []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossip", ctx, appGossipBytes) + ret := m.ctrl.Call(m, "SendAppGossip", ctx, config, appGossipBytes) ret0, _ := ret[0].(error) return ret0 } // SendAppGossip indicates an expected call of SendAppGossip. -func (mr *MockSenderMockRecorder) SendAppGossip(ctx, appGossipBytes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), ctx, appGossipBytes) -} - -// SendAppGossipSpecific mocks base method. -func (m *MockSender) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossipSpecific", ctx, nodeIDs, appGossipBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendAppGossipSpecific indicates an expected call of SendAppGossipSpecific. -func (mr *MockSenderMockRecorder) SendAppGossipSpecific(ctx, nodeIDs, appGossipBytes any) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossip(ctx, config, appGossipBytes any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), ctx, nodeIDs, appGossipBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), ctx, config, appGossipBytes) } // SendAppRequest mocks base method. @@ -300,18 +271,6 @@ func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(ctx, nodeIDs, requ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), ctx, nodeIDs, requestID) } -// SendGossip mocks base method. -func (m *MockSender) SendGossip(ctx context.Context, container []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGossip", ctx, container) -} - -// SendGossip indicates an expected call of SendGossip. -func (mr *MockSenderMockRecorder) SendGossip(ctx, container any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), ctx, container) -} - // SendPullQuery mocks base method. func (m *MockSender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID, requestedHeight uint64) { m.ctrl.T.Helper() diff --git a/snow/engine/common/no_ops_handlers.go b/snow/engine/common/no_ops_handlers.go index 870c6694a7a7..4458070e7696 100644 --- a/snow/engine/common/no_ops_handlers.go +++ b/snow/engine/common/no_ops_handlers.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" @@ -180,21 +179,12 @@ func NewNoOpPutHandler(log logging.Logger) PutHandler { } func (nop *noOpPutHandler) Put(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte) error { - if requestID == constants.GossipMsgRequestID { - nop.log.Verbo("dropping request", - zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - } else { - nop.log.Debug("dropping request", - zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - } + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.PutOp), + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) return nil } diff --git a/snow/engine/common/sender.go b/snow/engine/common/sender.go index d0ba856347a0..69b53a899568 100644 --- a/snow/engine/common/sender.go +++ b/snow/engine/common/sender.go @@ -7,10 +7,17 @@ import ( "context" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" ) +// SendConfig is used to specify who to send messages to over the p2p network. +type SendConfig struct { + NodeIDs set.Set[ids.NodeID] + Validators int + NonValidators int + Peers int +} + // Sender defines how a consensus engine sends messages and requests to other // validators. // @@ -35,15 +42,12 @@ import ( // time the requestID space has been exhausted, the beginning of the requestID // space is free of conflicts. type Sender interface { - snow.Acceptor - StateSummarySender AcceptedStateSummarySender FrontierSender AcceptedSender FetchSender QuerySender - Gossiper AppSender } @@ -160,21 +164,19 @@ type QuerySender interface { ) } -// Gossiper defines how a consensus engine gossips a container on the accepted -// frontier to other nodes -type Gossiper interface { - // Gossip the provided container throughout the network - SendGossip(ctx context.Context, container []byte) -} - // NetworkAppSender sends VM-level messages to nodes in the network. type NetworkAppSender interface { // Send an application-level request. - // A nil return value guarantees that for each nodeID in [nodeIDs], - // the VM corresponding to this AppSender eventually receives either: + // + // The VM corresponding to this AppSender may receive either: // * An AppResponse from nodeID with ID [requestID] // * An AppRequestFailed from nodeID with ID [requestID] - // Exactly one of the above messages will eventually be received per nodeID. + // + // A nil return value guarantees that the VM corresponding to this AppSender + // will receive exactly one of the above messages. + // + // A non-nil return value guarantees that the VM corresponding to this + // AppSender will receive at most one of the above messages. SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error // Send an application-level response to a request. // This response must be in response to an AppRequest that the VM corresponding @@ -183,8 +185,11 @@ type NetworkAppSender interface { // SendAppError sends an application-level error to an AppRequest SendAppError(ctx context.Context, nodeID ids.NodeID, requestID uint32, errorCode int32, errorMessage string) error // Gossip an application-level message. - SendAppGossip(ctx context.Context, appGossipBytes []byte) error - SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error + SendAppGossip( + ctx context.Context, + config SendConfig, + appGossipBytes []byte, + ) error } // CrossChainAppSender sends local VM-level messages to another VM. @@ -192,12 +197,16 @@ type CrossChainAppSender interface { // SendCrossChainAppRequest sends an application-level request to a // specific chain. // - // A nil return value guarantees that the VM corresponding to this - // CrossChainAppSender eventually receives either: + // The VM corresponding to this CrossChainAppSender may receive either: // * A CrossChainAppResponse from [chainID] with ID [requestID] // * A CrossChainAppRequestFailed from [chainID] with ID [requestID] - // Exactly one of the above messages will eventually be received from - // [chainID]. + // + // A nil return value guarantees that the VM corresponding to this + // CrossChainAppSender will eventually receive exactly one of the above + // messages. + // + // A non-nil return value guarantees that the VM corresponding to this + // CrossChainAppSender will receive at most one of the above messages. SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error // SendCrossChainAppResponse sends an application-level response to a // specific chain diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go index ef77fc658a5d..e3cb44165c54 100644 --- a/snow/engine/common/test_sender.go +++ b/snow/engine/common/test_sender.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" ) @@ -18,31 +17,26 @@ var ( _ Sender = (*SenderTest)(nil) _ AppSender = (*FakeSender)(nil) - errAccept = errors.New("unexpectedly called Accept") - errSendAppRequest = errors.New("unexpectedly called SendAppRequest") - errSendAppResponse = errors.New("unexpectedly called SendAppResponse") - errSendAppError = errors.New("unexpectedly called SendAppError") - errSendAppGossip = errors.New("unexpectedly called SendAppGossip") - errSendAppGossipSpecific = errors.New("unexpectedly called SendAppGossipSpecific") + errSendAppRequest = errors.New("unexpectedly called SendAppRequest") + errSendAppResponse = errors.New("unexpectedly called SendAppResponse") + errSendAppError = errors.New("unexpectedly called SendAppError") + errSendAppGossip = errors.New("unexpectedly called SendAppGossip") ) // SenderTest is a test sender type SenderTest struct { T require.TestingT - CantAccept, CantSendGetStateSummaryFrontier, CantSendStateSummaryFrontier, CantSendGetAcceptedStateSummary, CantSendAcceptedStateSummary, CantSendGetAcceptedFrontier, CantSendAcceptedFrontier, CantSendGetAccepted, CantSendAccepted, CantSendGet, CantSendGetAncestors, CantSendPut, CantSendAncestors, CantSendPullQuery, CantSendPushQuery, CantSendChits, - CantSendGossip, CantSendAppRequest, CantSendAppResponse, CantSendAppError, - CantSendAppGossip, CantSendAppGossipSpecific, + CantSendAppGossip, CantSendCrossChainAppRequest, CantSendCrossChainAppResponse, CantSendCrossChainAppError bool - AcceptF func(*snow.ConsensusContext, ids.ID, []byte) error SendGetStateSummaryFrontierF func(context.Context, set.Set[ids.NodeID], uint32) SendStateSummaryFrontierF func(context.Context, ids.NodeID, uint32, []byte) SendGetAcceptedStateSummaryF func(context.Context, set.Set[ids.NodeID], uint32, []uint64) @@ -58,12 +52,10 @@ type SenderTest struct { SendPushQueryF func(context.Context, set.Set[ids.NodeID], uint32, []byte, uint64) SendPullQueryF func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) SendChitsF func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) - SendGossipF func(context.Context, []byte) SendAppRequestF func(context.Context, set.Set[ids.NodeID], uint32, []byte) error SendAppResponseF func(context.Context, ids.NodeID, uint32, []byte) error SendAppErrorF func(context.Context, ids.NodeID, uint32, int32, string) error - SendAppGossipF func(context.Context, []byte) error - SendAppGossipSpecificF func(context.Context, set.Set[ids.NodeID], []byte) error + SendAppGossipF func(context.Context, SendConfig, []byte) error SendCrossChainAppRequestF func(context.Context, ids.ID, uint32, []byte) SendCrossChainAppResponseF func(context.Context, ids.ID, uint32, []byte) SendCrossChainAppErrorF func(context.Context, ids.ID, uint32, int32, string) @@ -71,7 +63,6 @@ type SenderTest struct { // Default set the default callable value to [cant] func (s *SenderTest) Default(cant bool) { - s.CantAccept = cant s.CantSendGetStateSummaryFrontier = cant s.CantSendStateSummaryFrontier = cant s.CantSendGetAcceptedStateSummary = cant @@ -87,31 +78,13 @@ func (s *SenderTest) Default(cant bool) { s.CantSendPullQuery = cant s.CantSendPushQuery = cant s.CantSendChits = cant - s.CantSendGossip = cant s.CantSendAppRequest = cant s.CantSendAppResponse = cant s.CantSendAppGossip = cant - s.CantSendAppGossipSpecific = cant s.CantSendCrossChainAppRequest = cant s.CantSendCrossChainAppResponse = cant } -// Accept calls AcceptF if it was initialized. If it wasn't initialized and this -// function shouldn't be called and testing was initialized, then testing will -// fail. -func (s *SenderTest) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { - if s.AcceptF != nil { - return s.AcceptF(ctx, containerID, container) - } - if !s.CantAccept { - return nil - } - if s.T != nil { - require.FailNow(s.T, errAccept.Error()) - } - return errAccept -} - // SendGetStateSummaryFrontier calls SendGetStateSummaryFrontierF if it was // initialized. If it wasn't initialized and this function shouldn't be called // and testing was initialized, then testing will fail. @@ -277,17 +250,6 @@ func (s *SenderTest) SendChits(ctx context.Context, vdr ids.NodeID, requestID ui } } -// SendGossip calls SendGossipF if it was initialized. If it wasn't initialized -// and this function shouldn't be called and testing was initialized, then -// testing will fail. -func (s *SenderTest) SendGossip(ctx context.Context, container []byte) { - if s.SendGossipF != nil { - s.SendGossipF(ctx, container) - } else if s.CantSendGossip && s.T != nil { - require.FailNow(s.T, "Unexpectedly called SendGossip") - } -} - // SendCrossChainAppRequest calls SendCrossChainAppRequestF if it was // initialized. If it wasn't initialized and this function shouldn't be called // and testing was initialized, then testing will fail. @@ -366,33 +328,24 @@ func (s *SenderTest) SendAppError(ctx context.Context, nodeID ids.NodeID, reques // SendAppGossip calls SendAppGossipF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { +func (s *SenderTest) SendAppGossip( + ctx context.Context, + config SendConfig, + appGossipBytes []byte, +) error { switch { case s.SendAppGossipF != nil: - return s.SendAppGossipF(ctx, appGossipBytes) + return s.SendAppGossipF(ctx, config, appGossipBytes) case s.CantSendAppGossip && s.T != nil: require.FailNow(s.T, errSendAppGossip.Error()) } return errSendAppGossip } -// SendAppGossipSpecific calls SendAppGossipSpecificF if it was initialized. If it wasn't -// initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. -func (s *SenderTest) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { - switch { - case s.SendAppGossipSpecificF != nil: - return s.SendAppGossipSpecificF(ctx, nodeIDs, appGossipBytes) - case s.CantSendAppGossipSpecific && s.T != nil: - require.FailNow(s.T, errSendAppGossipSpecific.Error()) - } - return errSendAppGossipSpecific -} - // FakeSender is used for testing type FakeSender struct { SentAppRequest, SentAppResponse, - SentAppGossip, SentAppGossipSpecific, + SentAppGossip, SentCrossChainAppRequest, SentCrossChainAppResponse chan []byte SentAppError, SentCrossChainAppError chan *AppError @@ -428,7 +381,7 @@ func (f FakeSender) SendAppError(_ context.Context, _ ids.NodeID, _ uint32, erro return nil } -func (f FakeSender) SendAppGossip(_ context.Context, bytes []byte) error { +func (f FakeSender) SendAppGossip(_ context.Context, _ SendConfig, bytes []byte) error { if f.SentAppGossip == nil { return nil } @@ -437,15 +390,6 @@ func (f FakeSender) SendAppGossip(_ context.Context, bytes []byte) error { return nil } -func (f FakeSender) SendAppGossipSpecific(_ context.Context, _ set.Set[ids.NodeID], bytes []byte) error { - if f.SentAppGossipSpecific == nil { - return nil - } - - f.SentAppGossipSpecific <- bytes - return nil -} - func (f FakeSender) SendCrossChainAppRequest(_ context.Context, _ ids.ID, _ uint32, bytes []byte) error { if f.SentCrossChainAppRequest == nil { return nil diff --git a/snow/engine/common/tracker/peers.go b/snow/engine/common/tracker/peers.go index fdf070613d83..94ed46764785 100644 --- a/snow/engine/common/tracker/peers.go +++ b/snow/engine/common/tracker/peers.go @@ -31,15 +31,9 @@ type Peers interface { ConnectedWeight() uint64 // ConnectedPercent returns the currently connected stake percentage [0, 1] ConnectedPercent() float64 - // TotalWeight returns the total validator weight - TotalWeight() uint64 // SampleValidator returns a randomly selected connected validator. If there // are no currently connected validators then it will return false. SampleValidator() (ids.NodeID, bool) - // PreferredPeers returns the currently connected validators. If there are - // no currently connected validators then it will return the currently - // connected peers. - PreferredPeers() set.Set[ids.NodeID] } type lockedPeers struct { @@ -104,13 +98,6 @@ func (p *lockedPeers) ConnectedPercent() float64 { return p.peers.ConnectedPercent() } -func (p *lockedPeers) TotalWeight() uint64 { - p.lock.RLock() - defer p.lock.RUnlock() - - return p.peers.TotalWeight() -} - func (p *lockedPeers) SampleValidator() (ids.NodeID, bool) { p.lock.RLock() defer p.lock.RUnlock() @@ -118,13 +105,6 @@ func (p *lockedPeers) SampleValidator() (ids.NodeID, bool) { return p.peers.SampleValidator() } -func (p *lockedPeers) PreferredPeers() set.Set[ids.NodeID] { - p.lock.RLock() - defer p.lock.RUnlock() - - return p.peers.PreferredPeers() -} - type meteredPeers struct { Peers @@ -133,21 +113,18 @@ type meteredPeers struct { totalWeight prometheus.Gauge } -func NewMeteredPeers(namespace string, reg prometheus.Registerer) (Peers, error) { +func NewMeteredPeers(reg prometheus.Registerer) (Peers, error) { percentConnected := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "percent_connected", - Help: "Percent of connected stake", + Name: "percent_connected", + Help: "Percent of connected stake", }) totalWeight := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "total_weight", - Help: "Total stake", + Name: "total_weight", + Help: "Total stake", }) numValidators := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_validators", - Help: "Total number of validators", + Name: "num_validators", + Help: "Total number of validators", }) err := utils.Err( reg.Register(percentConnected), @@ -169,20 +146,21 @@ func NewMeteredPeers(namespace string, reg prometheus.Registerer) (Peers, error) func (p *meteredPeers) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { p.Peers.OnValidatorAdded(nodeID, pk, txID, weight) p.numValidators.Inc() - p.totalWeight.Set(float64(p.Peers.TotalWeight())) + p.totalWeight.Add(float64(weight)) p.percentConnected.Set(p.Peers.ConnectedPercent()) } func (p *meteredPeers) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { p.Peers.OnValidatorRemoved(nodeID, weight) p.numValidators.Dec() - p.totalWeight.Set(float64(p.Peers.TotalWeight())) + p.totalWeight.Sub(float64(weight)) p.percentConnected.Set(p.Peers.ConnectedPercent()) } func (p *meteredPeers) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { p.Peers.OnValidatorWeightChanged(nodeID, oldWeight, newWeight) - p.totalWeight.Set(float64(p.Peers.TotalWeight())) + p.totalWeight.Sub(float64(oldWeight)) + p.totalWeight.Add(float64(newWeight)) p.percentConnected.Set(p.Peers.ConnectedPercent()) } @@ -269,22 +247,6 @@ func (p *peerData) ConnectedPercent() float64 { return float64(p.connectedWeight) / float64(p.totalWeight) } -func (p *peerData) TotalWeight() uint64 { - return p.totalWeight -} - func (p *peerData) SampleValidator() (ids.NodeID, bool) { return p.connectedValidators.Peek() } - -func (p *peerData) PreferredPeers() set.Set[ids.NodeID] { - if p.connectedValidators.Len() == 0 { - connectedPeers := set.NewSet[ids.NodeID](p.connectedPeers.Len()) - connectedPeers.Union(p.connectedPeers) - return connectedPeers - } - - connectedValidators := set.NewSet[ids.NodeID](p.connectedValidators.Len()) - connectedValidators.Union(p.connectedValidators) - return connectedValidators -} diff --git a/snow/engine/common/tracker/peers_test.go b/snow/engine/common/tracker/peers_test.go index b627b79a16ed..1ed2daf6575b 100644 --- a/snow/engine/common/tracker/peers_test.go +++ b/snow/engine/common/tracker/peers_test.go @@ -20,36 +20,23 @@ func TestPeers(t *testing.T) { p := NewPeers() - require.Zero(p.TotalWeight()) require.Zero(p.ConnectedWeight()) - require.Empty(p.PreferredPeers()) p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.Zero(p.ConnectedWeight()) - require.Equal(uint64(5), p.TotalWeight()) - require.Empty(p.PreferredPeers()) require.NoError(p.Connected(context.Background(), nodeID, version.CurrentApp)) require.Equal(uint64(5), p.ConnectedWeight()) - require.Contains(p.PreferredPeers(), nodeID) p.OnValidatorWeightChanged(nodeID, 5, 10) require.Equal(uint64(10), p.ConnectedWeight()) - require.Equal(uint64(10), p.TotalWeight()) - require.Contains(p.PreferredPeers(), nodeID) p.OnValidatorRemoved(nodeID, 10) require.Zero(p.ConnectedWeight()) - require.Zero(p.TotalWeight()) - require.Contains(p.PreferredPeers(), nodeID) p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.Equal(uint64(5), p.ConnectedWeight()) - require.Equal(uint64(5), p.TotalWeight()) - require.Contains(p.PreferredPeers(), nodeID) require.NoError(p.Disconnected(context.Background(), nodeID)) require.Zero(p.ConnectedWeight()) - require.Equal(uint64(5), p.TotalWeight()) - require.Empty(p.PreferredPeers()) } diff --git a/snow/engine/snowman/block/mock_chain_vm.go b/snow/engine/snowman/block/mock_chain_vm.go index ad99e3f716d0..c927282c1f93 100644 --- a/snow/engine/snowman/block/mock_chain_vm.go +++ b/snow/engine/snowman/block/mock_chain_vm.go @@ -334,20 +334,6 @@ func (mr *MockChainVMMockRecorder) Shutdown(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockChainVM)(nil).Shutdown), arg0) } -// VerifyHeightIndex mocks base method. -func (m *MockChainVM) VerifyHeightIndex(arg0 context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyHeightIndex", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// VerifyHeightIndex indicates an expected call of VerifyHeightIndex. -func (mr *MockChainVMMockRecorder) VerifyHeightIndex(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyHeightIndex", reflect.TypeOf((*MockChainVM)(nil).VerifyHeightIndex), arg0) -} - // Version mocks base method. func (m *MockChainVM) Version(arg0 context.Context) (string, error) { m.ctrl.T.Helper() diff --git a/snow/engine/snowman/block/test_vm.go b/snow/engine/snowman/block/test_vm.go index 376dd27066f7..7c04c90ec238 100644 --- a/snow/engine/snowman/block/test_vm.go +++ b/snow/engine/snowman/block/test_vm.go @@ -19,7 +19,6 @@ var ( errParseBlock = errors.New("unexpectedly called ParseBlock") errGetBlock = errors.New("unexpectedly called GetBlock") errLastAccepted = errors.New("unexpectedly called LastAccepted") - errVerifyHeightIndex = errors.New("unexpectedly called VerifyHeightIndex") errGetBlockIDAtHeight = errors.New("unexpectedly called GetBlockIDAtHeight") _ ChainVM = (*TestVM)(nil) @@ -34,7 +33,6 @@ type TestVM struct { CantGetBlock, CantSetPreference, CantLastAccepted, - CantVerifyHeightIndex, CantGetBlockIDAtHeight bool BuildBlockF func(context.Context) (snowman.Block, error) @@ -42,7 +40,6 @@ type TestVM struct { GetBlockF func(context.Context, ids.ID) (snowman.Block, error) SetPreferenceF func(context.Context, ids.ID) error LastAcceptedF func(context.Context) (ids.ID, error) - VerifyHeightIndexF func(context.Context) error GetBlockIDAtHeightF func(ctx context.Context, height uint64) (ids.ID, error) } @@ -106,16 +103,6 @@ func (vm *TestVM) LastAccepted(ctx context.Context) (ids.ID, error) { return ids.ID{}, errLastAccepted } -func (vm *TestVM) VerifyHeightIndex(ctx context.Context) error { - if vm.VerifyHeightIndexF != nil { - return vm.VerifyHeightIndexF(ctx) - } - if vm.CantVerifyHeightIndex && vm.T != nil { - require.FailNow(vm.T, errVerifyHeightIndex.Error()) - } - return errVerifyHeightIndex -} - func (vm *TestVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { if vm.GetBlockIDAtHeightF != nil { return vm.GetBlockIDAtHeightF(ctx, height) diff --git a/snow/engine/snowman/block/vm.go b/snow/engine/snowman/block/vm.go index 4153632a7616..d28dc7ef7a6d 100644 --- a/snow/engine/snowman/block/vm.go +++ b/snow/engine/snowman/block/vm.go @@ -5,19 +5,12 @@ package block import ( "context" - "errors" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" ) -// ErrIndexIncomplete is used to indicate that the VM is currently repairing its -// index. -// -// TODO: Remove after v1.11.x activates. -var ErrIndexIncomplete = errors.New("query failed because height index is incomplete") - // ChainVM defines the required functionality of a Snowman VM. // // A Snowman VM is responsible for defining the representation of state, @@ -56,15 +49,6 @@ type ChainVM interface { // returned. LastAccepted(context.Context) (ids.ID, error) - // VerifyHeightIndex should return: - // - nil if the height index is available. - // - ErrIndexIncomplete if the height index is not currently available. - // - Any other non-standard error that may have occurred when verifying the - // index. - // - // TODO: Remove after v1.11.x activates. - VerifyHeightIndex(context.Context) error - // GetBlockIDAtHeight returns: // - The ID of the block that was accepted with [height]. // - database.ErrNotFound if the [height] index is unknown. diff --git a/snow/engine/snowman/bootstrap/acceptor.go b/snow/engine/snowman/bootstrap/acceptor.go new file mode 100644 index 000000000000..eae4be879afa --- /dev/null +++ b/snow/engine/snowman/bootstrap/acceptor.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrap + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var ( + _ block.Parser = (*parseAcceptor)(nil) + _ snowman.Block = (*blockAcceptor)(nil) +) + +type parseAcceptor struct { + parser block.Parser + ctx *snow.ConsensusContext + numAccepted prometheus.Counter +} + +func (p *parseAcceptor) ParseBlock(ctx context.Context, bytes []byte) (snowman.Block, error) { + blk, err := p.parser.ParseBlock(ctx, bytes) + if err != nil { + return nil, err + } + return &blockAcceptor{ + Block: blk, + ctx: p.ctx, + numAccepted: p.numAccepted, + }, nil +} + +type blockAcceptor struct { + snowman.Block + + ctx *snow.ConsensusContext + numAccepted prometheus.Counter +} + +func (b *blockAcceptor) Accept(ctx context.Context) error { + if err := b.ctx.BlockAcceptor.Accept(b.ctx, b.ID(), b.Bytes()); err != nil { + return err + } + err := b.Block.Accept(ctx) + b.numAccepted.Inc() + return err +} diff --git a/snow/engine/snowman/bootstrap/block_job.go b/snow/engine/snowman/bootstrap/block_job.go deleted file mode 100644 index a9496316f1fb..000000000000 --- a/snow/engine/snowman/bootstrap/block_job.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package bootstrap - -import ( - "context" - "errors" - "fmt" - - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" -) - -var errMissingDependenciesOnAccept = errors.New("attempting to accept a block with missing dependencies") - -type parser struct { - log logging.Logger - numAccepted, numDropped prometheus.Counter - vm block.ChainVM -} - -func (p *parser) Parse(ctx context.Context, blkBytes []byte) (queue.Job, error) { - blk, err := p.vm.ParseBlock(ctx, blkBytes) - if err != nil { - return nil, err - } - return &blockJob{ - log: p.log, - numAccepted: p.numAccepted, - numDropped: p.numDropped, - blk: blk, - vm: p.vm, - }, nil -} - -type blockJob struct { - log logging.Logger - numAccepted, numDropped prometheus.Counter - blk snowman.Block - vm block.Getter -} - -func (b *blockJob) ID() ids.ID { - return b.blk.ID() -} - -func (b *blockJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], error) { - missing := set.Set[ids.ID]{} - parentID := b.blk.Parent() - if parent, err := b.vm.GetBlock(ctx, parentID); err != nil || parent.Status() != choices.Accepted { - missing.Add(parentID) - } - return missing, nil -} - -func (b *blockJob) HasMissingDependencies(ctx context.Context) (bool, error) { - parentID := b.blk.Parent() - if parent, err := b.vm.GetBlock(ctx, parentID); err != nil || parent.Status() != choices.Accepted { - return true, nil - } - return false, nil -} - -func (b *blockJob) Execute(ctx context.Context) error { - hasMissingDeps, err := b.HasMissingDependencies(ctx) - if err != nil { - return err - } - if hasMissingDeps { - b.numDropped.Inc() - return errMissingDependenciesOnAccept - } - status := b.blk.Status() - switch status { - case choices.Unknown, choices.Rejected: - b.numDropped.Inc() - return fmt.Errorf("attempting to execute block with status %s", status) - case choices.Processing: - blkID := b.blk.ID() - if err := b.blk.Verify(ctx); err != nil { - b.log.Error("block failed verification during bootstrapping", - zap.Stringer("blkID", blkID), - zap.Error(err), - ) - return fmt.Errorf("failed to verify block in bootstrapping: %w", err) - } - - b.numAccepted.Inc() - b.log.Trace("accepting block in bootstrapping", - zap.Stringer("blkID", blkID), - zap.Uint64("height", b.blk.Height()), - zap.Time("timestamp", b.blk.Timestamp()), - ) - if err := b.blk.Accept(ctx); err != nil { - b.log.Debug("failed to accept block during bootstrapping", - zap.Stringer("blkID", blkID), - zap.Error(err), - ) - return fmt.Errorf("failed to accept block in bootstrapping: %w", err) - } - } - return nil -} - -func (b *blockJob) Bytes() []byte { - return b.blk.Bytes() -} diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 29754a24d734..6b8462f83f63 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -13,14 +13,16 @@ import ( "go.uber.org/zap" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/consensus/snowman/bootstrapper" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap/interval" "github.com/ava-labs/avalanchego/utils/bimap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" @@ -38,6 +40,8 @@ const ( // maxOutstandingBroadcastRequests is the maximum number of requests to have // outstanding when broadcasting. maxOutstandingBroadcastRequests = 50 + + epsilon = 1e-6 // small amount to add to time to avoid division by 0 ) var ( @@ -93,23 +97,15 @@ type Bootstrapper struct { startTime time.Time // tracks which validators were asked for which containers in which requests - outstandingRequests *bimap.BiMap[common.Request, ids.ID] + outstandingRequests *bimap.BiMap[common.Request, ids.ID] + outstandingRequestTimes map[common.Request]time.Time // number of state transitions executed - executedStateTransitions int - - parser *parser + executedStateTransitions uint64 + awaitingTimeout bool - awaitingTimeout bool - - // fetchFrom is the set of nodes that we can fetch the next container from. - // When a container is fetched, the nodeID is removed from [fetchFrom] to - // attempt to limit a single request to a peer at any given time. When the - // response is received, either and Ancestors or an AncestorsFailed, the - // nodeID will be added back to [fetchFrom] unless the Ancestors message is - // empty. This is to attempt to prevent requesting containers from that peer - // again. - fetchFrom set.Set[ids.NodeID] + tree *interval.Tree + missingBlockIDs set.Set[ids.ID] // bootstrappedOnce ensures that the [Bootstrapped] callback is only invoked // once, even if bootstrapping is retried. @@ -120,7 +116,7 @@ type Bootstrapper struct { } func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (*Bootstrapper, error) { - metrics, err := newMetrics("bs", config.Ctx.Registerer) + metrics, err := newMetrics(config.Ctx.Registerer) return &Bootstrapper{ Config: config, metrics: metrics, @@ -134,7 +130,8 @@ func New(config Config, onFinished func(ctx context.Context, lastReqID uint32) e minority: bootstrapper.Noop, majority: bootstrapper.Noop, - outstandingRequests: bimap.New[common.Request, ids.ID](), + outstandingRequests: bimap.New[common.Request, ids.ID](), + outstandingRequestTimes: make(map[common.Request]time.Time), executedStateTransitions: math.MaxInt, onFinished: onFinished, @@ -149,45 +146,42 @@ func (b *Bootstrapper) Clear(context.Context) error { b.Ctx.Lock.Lock() defer b.Ctx.Lock.Unlock() - if err := b.Config.Blocked.Clear(); err != nil { - return err - } - return b.Config.Blocked.Commit() + return database.AtomicClear(b.DB, b.DB) } func (b *Bootstrapper) Start(ctx context.Context, startReqID uint32) error { - b.Ctx.Log.Info("starting bootstrapper") - b.Ctx.State.Set(snow.EngineState{ Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, }) if err := b.VM.SetState(ctx, snow.Bootstrapping); err != nil { - return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", - err) + return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", err) } - b.parser = &parser{ - log: b.Ctx.Log, - numAccepted: b.numAccepted, - numDropped: b.numDropped, - vm: b.VM, - } - if err := b.Blocked.SetParser(ctx, b.parser); err != nil { + lastAccepted, err := b.getLastAccepted(ctx) + if err != nil { return err } + lastAcceptedHeight := lastAccepted.Height() + b.Ctx.Log.Info("starting bootstrapper", + zap.Stringer("lastAcceptedID", lastAccepted.ID()), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), + ) + // Set the starting height - lastAcceptedID, err := b.VM.LastAccepted(ctx) + b.startingHeight = lastAcceptedHeight + b.requestID = startReqID + + b.tree, err = interval.NewTree(b.DB) if err != nil { - return fmt.Errorf("couldn't get last accepted ID: %w", err) + return fmt.Errorf("failed to initialize interval tree: %w", err) } - lastAccepted, err := b.VM.GetBlock(ctx, lastAcceptedID) + + b.missingBlockIDs, err = getMissingBlockIDs(ctx, b.DB, b.VM, b.tree, b.startingHeight) if err != nil { - return fmt.Errorf("couldn't get last accepted block: %w", err) + return fmt.Errorf("failed to initialize missing block IDs: %w", err) } - b.startingHeight = lastAccepted.Height() - b.requestID = startReqID return b.tryStartBootstrapping(ctx) } @@ -200,10 +194,6 @@ func (b *Bootstrapper) Connected(ctx context.Context, nodeID ids.NodeID, nodeVer if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { return err } - // Ensure fetchFrom reflects proper validator list - if _, ok := b.Beacons.GetValidator(b.Ctx.SubnetID, nodeID); ok { - b.fetchFrom.Add(nodeID) - } return b.tryStartBootstrapping(ctx) } @@ -212,13 +202,7 @@ func (b *Bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) erro if err := b.VM.Disconnected(ctx, nodeID); err != nil { return err } - - if err := b.StartupTracker.Disconnected(ctx, nodeID); err != nil { - return err - } - - b.markUnavailable(nodeID) - return nil + return b.StartupTracker.Disconnected(ctx, nodeID) } // tryStartBootstrapping will start bootstrapping the first time it is called @@ -308,16 +292,6 @@ func (b *Bootstrapper) sendBootstrappingMessagesOrFinish(ctx context.Context) er return b.startBootstrapping(ctx) } - if !b.restarted { - b.Ctx.Log.Info("bootstrapping started syncing", - zap.Int("numAccepted", numAccepted), - ) - } else { - b.Ctx.Log.Debug("bootstrapping started syncing", - zap.Int("numAccepted", numAccepted), - ) - } - return b.startSyncing(ctx, accepted) } @@ -385,23 +359,24 @@ func (b *Bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, return b.sendBootstrappingMessagesOrFinish(ctx) } -func (b *Bootstrapper) startSyncing(ctx context.Context, acceptedContainerIDs []ids.ID) error { - // Initialize the fetch from set to the currently preferred peers - b.fetchFrom = b.StartupTracker.PreferredPeers() - - pendingContainerIDs := b.Blocked.MissingIDs() - // Append the list of accepted container IDs to pendingContainerIDs to ensure - // we iterate over every container that must be traversed. - pendingContainerIDs = append(pendingContainerIDs, acceptedContainerIDs...) - b.Ctx.Log.Debug("starting bootstrapping", - zap.Int("numPendingBlocks", len(pendingContainerIDs)), - zap.Int("numAcceptedBlocks", len(acceptedContainerIDs)), - ) +func (b *Bootstrapper) startSyncing(ctx context.Context, acceptedBlockIDs []ids.ID) error { + knownBlockIDs := genesis.GetCheckpoints(b.Ctx.NetworkID, b.Ctx.ChainID) + b.missingBlockIDs.Union(knownBlockIDs) + b.missingBlockIDs.Add(acceptedBlockIDs...) + numMissingBlockIDs := b.missingBlockIDs.Len() - toProcess := make([]snowman.Block, 0, len(pendingContainerIDs)) - for _, blkID := range pendingContainerIDs { - b.Blocked.AddMissingID(blkID) + log := b.Ctx.Log.Info + if b.restarted { + log = b.Ctx.Log.Debug + } + log("starting to fetch blocks", + zap.Int("numKnownBlocks", knownBlockIDs.Len()), + zap.Int("numAcceptedBlocks", len(acceptedBlockIDs)), + zap.Int("numMissingBlocks", numMissingBlockIDs), + ) + toProcess := make([]snowman.Block, 0, numMissingBlockIDs) + for blkID := range b.missingBlockIDs { // TODO: if `GetBlock` returns an error other than // `database.ErrNotFound`, then the error should be propagated. blk, err := b.VM.GetBlock(ctx, blkID) @@ -414,7 +389,7 @@ func (b *Bootstrapper) startSyncing(ctx context.Context, acceptedContainerIDs [] toProcess = append(toProcess, blk) } - b.initiallyFetched = b.Blocked.PendingJobs() + b.initiallyFetched = b.tree.Len() b.startTime = time.Now() // Process received blocks @@ -434,29 +409,25 @@ func (b *Bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { return nil } - // Make sure we don't already have this block - if _, err := b.VM.GetBlock(ctx, blkID); err == nil { - return b.tryStartExecuting(ctx) - } - - validatorID, ok := b.fetchFrom.Peek() + nodeID, ok := b.PeerTracker.SelectPeer() if !ok { - return fmt.Errorf("dropping request for %s as there are no validators", blkID) + // If we aren't connected to any peers, we send a request to ourself + // which is guaranteed to fail. We send this message to use the message + // timeout as a retry mechanism. Once we are connected to another node + // again we will select them to sample from. + nodeID = b.Ctx.NodeID } - // We only allow one outbound request at a time from a node - b.markUnavailable(validatorID) + b.PeerTracker.RegisterRequest(nodeID) b.requestID++ - - b.outstandingRequests.Put( - common.Request{ - NodeID: validatorID, - RequestID: b.requestID, - }, - blkID, - ) - b.Config.Sender.SendGetAncestors(ctx, validatorID, b.requestID, blkID) // request block and ancestors + request := common.Request{ + NodeID: nodeID, + RequestID: b.requestID, + } + b.outstandingRequests.Put(request, blkID) + b.outstandingRequestTimes[request] = time.Now() + b.Config.Sender.SendGetAncestors(ctx, nodeID, b.requestID, blkID) // request block and ancestors return nil } @@ -464,10 +435,11 @@ func (b *Bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { // response to a GetAncestors message to [nodeID] with request ID [requestID] func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blks [][]byte) error { // Make sure this is in response to a request we made - wantedBlkID, ok := b.outstandingRequests.DeleteKey(common.Request{ + request := common.Request{ NodeID: nodeID, RequestID: requestID, - }) + } + wantedBlkID, ok := b.outstandingRequests.DeleteKey(request) if !ok { // this message isn't in response to a request we made b.Ctx.Log.Debug("received unexpected Ancestors", zap.Stringer("nodeID", nodeID), @@ -475,6 +447,8 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request ) return nil } + requestTime := b.outstandingRequestTimes[request] + delete(b.outstandingRequestTimes, request) lenBlks := len(blks) if lenBlks == 0 { @@ -483,15 +457,12 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request zap.Uint32("requestID", requestID), ) - b.markUnavailable(nodeID) + b.PeerTracker.RegisterFailure(nodeID) // Send another request for this return b.fetch(ctx, wantedBlkID) } - // This node has responded - so add it back into the set - b.fetchFrom.Add(nodeID) - if lenBlks > b.Config.AncestorsMaxContainersReceived { blks = blks[:b.Config.AncestorsMaxContainersReceived] b.Ctx.Log.Debug("ignoring containers in Ancestors", @@ -508,6 +479,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request zap.Uint32("requestID", requestID), zap.Error(err), ) + b.PeerTracker.RegisterFailure(nodeID) return b.fetch(ctx, wantedBlkID) } @@ -516,6 +488,7 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) + b.PeerTracker.RegisterFailure(nodeID) return b.fetch(ctx, wantedBlkID) } @@ -525,21 +498,40 @@ func (b *Bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, request zap.Stringer("expectedBlkID", wantedBlkID), zap.Stringer("blkID", actualID), ) + b.PeerTracker.RegisterFailure(nodeID) return b.fetch(ctx, wantedBlkID) } - blockSet := make(map[ids.ID]snowman.Block, len(blocks)) + var ( + numBytes = len(requestedBlock.Bytes()) + ancestors = make(map[ids.ID]snowman.Block, len(blocks)) + ) for _, block := range blocks[1:] { - blockSet[block.ID()] = block + numBytes += len(block.Bytes()) + ancestors[block.ID()] = block } - return b.process(ctx, requestedBlock, blockSet) + + // TODO: Calculate bandwidth based on the blocks that were persisted to + // disk. + var ( + requestLatency = time.Since(requestTime).Seconds() + epsilon + bandwidth = float64(numBytes) / requestLatency + ) + b.PeerTracker.RegisterResponse(nodeID, bandwidth) + + if err := b.process(ctx, requestedBlock, ancestors); err != nil { + return err + } + + return b.tryStartExecuting(ctx) } func (b *Bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - blkID, ok := b.outstandingRequests.DeleteKey(common.Request{ + request := common.Request{ NodeID: nodeID, RequestID: requestID, - }) + } + blkID, ok := b.outstandingRequests.DeleteKey(request) if !ok { b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", zap.Stringer("nodeID", nodeID), @@ -547,159 +539,93 @@ func (b *Bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID ) return nil } + delete(b.outstandingRequestTimes, request) - // This node timed out their request, so we can add them back to [fetchFrom] - b.fetchFrom.Add(nodeID) + // This node timed out their request. + b.PeerTracker.RegisterFailure(nodeID) // Send another request for this return b.fetch(ctx, blkID) } -// markUnavailable removes [nodeID] from the set of peers used to fetch -// ancestors. If the set becomes empty, it is reset to the currently preferred -// peers so bootstrapping can continue. -func (b *Bootstrapper) markUnavailable(nodeID ids.NodeID) { - b.fetchFrom.Remove(nodeID) - - // if [fetchFrom] has become empty, reset it to the currently preferred - // peers - if b.fetchFrom.Len() == 0 { - b.fetchFrom = b.StartupTracker.PreferredPeers() - } -} - // process a series of consecutive blocks starting at [blk]. // // - blk is a block that is assumed to have been marked as acceptable by the // bootstrapping engine. -// - processingBlocks is a set of blocks that can be used to lookup blocks. -// This enables the engine to process multiple blocks without relying on the -// VM to have stored blocks during `ParseBlock`. -// -// If [blk]'s height is <= the last accepted height, then it will be removed -// from the missingIDs set. -func (b *Bootstrapper) process(ctx context.Context, blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { - for { - blkID := blk.ID() - if b.Halted() { - // We must add in [blkID] to the set of missing IDs so that we are - // guaranteed to continue processing from this state when the - // bootstrapper is restarted. - b.Blocked.AddMissingID(blkID) - return b.Blocked.Commit() - } - - b.Blocked.RemoveMissingID(blkID) - - status := blk.Status() - // The status should never be rejected here - but we check to fail as - // quickly as possible - if status == choices.Rejected { - return fmt.Errorf("bootstrapping wants to accept %s, however it was previously rejected", blkID) - } - - blkHeight := blk.Height() - if status == choices.Accepted || blkHeight <= b.startingHeight { - // We can stop traversing, as we have reached the accepted frontier - if err := b.Blocked.Commit(); err != nil { - return err - } - return b.tryStartExecuting(ctx) - } +// - ancestors is a set of blocks that can be used to optimistically lookup +// parent blocks. This enables the engine to process multiple blocks without +// relying on the VM to have stored blocks during `ParseBlock`. +func (b *Bootstrapper) process( + ctx context.Context, + blk snowman.Block, + ancestors map[ids.ID]snowman.Block, +) error { + lastAccepted, err := b.getLastAccepted(ctx) + if err != nil { + return err + } - // If this block is going to be accepted, make sure to update the - // tipHeight for logging - if blkHeight > b.tipHeight { - b.tipHeight = blkHeight - } + numPreviouslyFetched := b.tree.Len() - pushed, err := b.Blocked.Push(ctx, &blockJob{ - log: b.Ctx.Log, - numAccepted: b.numAccepted, - numDropped: b.numDropped, - blk: blk, - vm: b.VM, - }) - if err != nil { - return err - } + batch := b.DB.NewBatch() + missingBlockID, foundNewMissingID, err := process( + batch, + b.tree, + b.missingBlockIDs, + lastAccepted.Height(), + blk, + ancestors, + ) + if err != nil { + return err + } - if !pushed { - // We can stop traversing, as we have reached a block that we - // previously pushed onto the jobs queue - if err := b.Blocked.Commit(); err != nil { - return err - } - return b.tryStartExecuting(ctx) - } + // Update metrics and log statuses + { + numFetched := b.tree.Len() + b.numFetched.Add(float64(b.tree.Len() - numPreviouslyFetched)) - // We added a new block to the queue, so track that it was fetched - b.numFetched.Inc() + height := blk.Height() + b.tipHeight = max(b.tipHeight, height) - // Periodically log progress - blocksFetchedSoFar := b.Blocked.Jobs.PendingJobs() - if blocksFetchedSoFar%statusUpdateFrequency == 0 { + if numPreviouslyFetched/statusUpdateFrequency != numFetched/statusUpdateFrequency { totalBlocksToFetch := b.tipHeight - b.startingHeight eta := timer.EstimateETA( b.startTime, - blocksFetchedSoFar-b.initiallyFetched, // Number of blocks we have fetched during this run + numFetched-b.initiallyFetched, // Number of blocks we have fetched during this run totalBlocksToFetch-b.initiallyFetched, // Number of blocks we expect to fetch during this run ) - b.fetchETA.Set(float64(eta)) if !b.restarted { b.Ctx.Log.Info("fetching blocks", - zap.Uint64("numFetchedBlocks", blocksFetchedSoFar), + zap.Uint64("numFetchedBlocks", numFetched), zap.Uint64("numTotalBlocks", totalBlocksToFetch), zap.Duration("eta", eta), ) } else { b.Ctx.Log.Debug("fetching blocks", - zap.Uint64("numFetchedBlocks", blocksFetchedSoFar), + zap.Uint64("numFetchedBlocks", numFetched), zap.Uint64("numTotalBlocks", totalBlocksToFetch), zap.Duration("eta", eta), ) } } + } - // Attempt to traverse to the next block - parentID := blk.Parent() - - // First check if the parent is in the processing blocks set - parent, ok := processingBlocks[parentID] - if ok { - blk = parent - continue - } - - // If the parent is not available in processing blocks, attempt to get - // the block from the vm - parent, err = b.VM.GetBlock(ctx, parentID) - if err == nil { - blk = parent - continue - } - // TODO: report errors that aren't `database.ErrNotFound` - - // If the block wasn't able to be acquired immediately, attempt to fetch - // it - b.Blocked.AddMissingID(parentID) - if err := b.fetch(ctx, parentID); err != nil { - return err - } - - if err := b.Blocked.Commit(); err != nil { - return err - } - return b.tryStartExecuting(ctx) + if err := batch.Write(); err != nil || !foundNewMissingID { + return err } + + b.missingBlockIDs.Add(missingBlockID) + // Attempt to fetch the newly discovered block + return b.fetch(ctx, missingBlockID) } // tryStartExecuting executes all pending blocks if there are no more blocks // being fetched. After executing all pending blocks it will either restart // bootstrapping, or transition into normal operations. func (b *Bootstrapper) tryStartExecuting(ctx context.Context) error { - if numPending := b.Blocked.NumMissingIDs(); numPending != 0 { + if numMissingBlockIDs := b.missingBlockIDs.Len(); numMissingBlockIDs != 0 { return nil } @@ -707,34 +633,54 @@ func (b *Bootstrapper) tryStartExecuting(ctx context.Context) error { return nil } - if !b.restarted { - b.Ctx.Log.Info("executing blocks", - zap.Uint64("numPendingJobs", b.Blocked.PendingJobs()), - ) - } else { - b.Ctx.Log.Debug("executing blocks", - zap.Uint64("numPendingJobs", b.Blocked.PendingJobs()), - ) + lastAccepted, err := b.getLastAccepted(ctx) + if err != nil { + return err + } + + log := b.Ctx.Log.Info + if b.restarted { + log = b.Ctx.Log.Debug } - executedBlocks, err := b.Blocked.ExecuteAll( + numToExecute := b.tree.Len() + err = execute( ctx, - b.Config.Ctx, b, - b.restarted, - b.Ctx.BlockAcceptor, + log, + b.DB, + &parseAcceptor{ + parser: b.VM, + ctx: b.Ctx, + numAccepted: b.numAccepted, + }, + b.tree, + lastAccepted.Height(), ) - if err != nil || b.Halted() { - return err + if err != nil { + // If a fatal error has occurred, include the last accepted block + // information. + lastAccepted, lastAcceptedErr := b.getLastAccepted(ctx) + if lastAcceptedErr != nil { + return fmt.Errorf("%w after %w", lastAcceptedErr, err) + } + return fmt.Errorf("%w with last accepted %s (height=%d)", + err, + lastAccepted.ID(), + lastAccepted.Height(), + ) + } + if b.Halted() { + return nil } previouslyExecuted := b.executedStateTransitions - b.executedStateTransitions = executedBlocks + b.executedStateTransitions = numToExecute // Note that executedBlocks < c*previouslyExecuted ( 0 <= c < 1 ) is enforced // so that the bootstrapping process will terminate even as new blocks are // being issued. - if executedBlocks > 0 && executedBlocks < previouslyExecuted/2 { + if numToExecute > 0 && numToExecute < previouslyExecuted/2 { return b.restartBootstrapping(ctx) } @@ -750,21 +696,28 @@ func (b *Bootstrapper) tryStartExecuting(ctx context.Context) error { // If the subnet hasn't finished bootstrapping, this chain should remain // syncing. if !b.Config.BootstrapTracker.IsBootstrapped() { - if !b.restarted { - b.Ctx.Log.Info("waiting for the remaining chains in this subnet to finish syncing") - } else { - b.Ctx.Log.Debug("waiting for the remaining chains in this subnet to finish syncing") - } + log("waiting for the remaining chains in this subnet to finish syncing") // Restart bootstrapping after [bootstrappingDelay] to keep up to date // on the latest tip. b.Config.Timer.RegisterTimeout(bootstrappingDelay) b.awaitingTimeout = true return nil } - b.fetchETA.Set(0) return b.onFinished(ctx, b.requestID) } +func (b *Bootstrapper) getLastAccepted(ctx context.Context) (snowman.Block, error) { + lastAcceptedID, err := b.VM.LastAccepted(ctx) + if err != nil { + return nil, fmt.Errorf("couldn't get last accepted ID: %w", err) + } + lastAccepted, err := b.VM.GetBlock(ctx, lastAcceptedID) + if err != nil { + return nil, fmt.Errorf("couldn't get last accepted block %s: %w", lastAcceptedID, err) + } + return lastAccepted, nil +} + func (b *Bootstrapper) Timeout(ctx context.Context) error { if !b.awaitingTimeout { return errUnexpectedTimeout @@ -774,7 +727,6 @@ func (b *Bootstrapper) Timeout(ctx context.Context) error { if !b.Config.BootstrapTracker.IsBootstrapped() { return b.restartBootstrapping(ctx) } - b.fetchETA.Set(0) return b.onFinished(ctx, b.requestID) } @@ -782,6 +734,7 @@ func (b *Bootstrapper) restartBootstrapping(ctx context.Context) error { b.Ctx.Log.Debug("Checking for new frontiers") b.restarted = true b.outstandingRequests = bimap.New[common.Request, ids.ID]() + b.outstandingRequestTimes = make(map[common.Request]time.Time) return b.startBootstrapping(ctx) } diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index d5cb9cc763fc..5577f62fa81d 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -16,21 +16,22 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap/interval" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" + + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" ) var errUnknownBlock = errors.New("unknown block") @@ -68,29 +69,39 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes peer := ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(ctx.SubnetID, peer, nil, ids.Empty, 1)) - peerTracker := tracker.NewPeers() totalWeight, err := vdrs.TotalWeight(ctx.SubnetID) require.NoError(err) - startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) - vdrs.RegisterCallbackListener(ctx.SubnetID, startupTracker) + startupTracker := tracker.NewStartup(tracker.NewPeers(), totalWeight/2+1) + vdrs.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) - blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) + peerTracker, err := p2p.NewPeerTracker( + ctx.Log, + "", + prometheus.NewRegistry(), + nil, + nil, + ) + require.NoError(err) + + peerTracker.Connected(peer, version.CurrentApp) + return Config{ AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: vdrs, SampleK: vdrs.Count(ctx.SubnetID), StartupTracker: startupTracker, + PeerTracker: peerTracker, Sender: sender, BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, AncestorsMaxContainersReceived: 2000, - Blocked: blocker, + DB: memdb.New(), VM: vm, }, peer, sender, vm } @@ -113,50 +124,49 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { alpha := uint64(10) startupAlpha := alpha - peerTracker := tracker.NewPeers() - startupTracker := tracker.NewStartup(peerTracker, startupAlpha) - peers.RegisterCallbackListener(ctx.SubnetID, startupTracker) + startupTracker := tracker.NewStartup(tracker.NewPeers(), startupAlpha) + peers.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) - blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) + + peerTracker, err := p2p.NewPeerTracker( + ctx.Log, + "", + prometheus.NewRegistry(), + nil, + nil, + ) + require.NoError(err) + cfg := Config{ AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: peers, SampleK: sampleK, StartupTracker: startupTracker, + PeerTracker: peerTracker, Sender: sender, BootstrapTracker: &common.BootstrapTrackerTest{}, Timer: &common.TimerTest{}, AncestorsMaxContainersReceived: 2000, - Blocked: blocker, + DB: memdb.New(), VM: vm, } - blkID0 := ids.Empty.Prefix(0) - blkBytes0 := []byte{0} - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } vm.CantLastAccepted = false vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil + require.Equal(snowmantest.GenesisID, blkID) + return snowmantest.Genesis, nil } // create bootstrapper dummyCallback := func(context.Context, uint32) error { cfg.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -183,6 +193,8 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() require.NoError(peers.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) + + peerTracker.Connected(vdr0, version.CurrentApp) require.NoError(bs.Connected(context.Background(), vdr0, version.CurrentApp)) require.NoError(bs.Start(context.Background(), 0)) @@ -191,6 +203,8 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() require.NoError(peers.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) + + peerTracker.Connected(vdr, version.CurrentApp) require.NoError(bs.Connected(context.Background(), vdr, version.CurrentApp)) require.True(frontierRequested) } @@ -201,44 +215,14 @@ func TestBootstrapperSingleFrontier(t *testing.T) { config, _, _, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Processing, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil - } + blks := snowmantest.BuildChain(1) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -246,97 +230,27 @@ func TestBootstrapperSingleFrontier(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) - acceptedIDs := []ids.ID{blkID1} - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID1: - return blk1, nil - case blkID0: - return blk0, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes1): - return blk1, nil - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - - require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[0:1]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk1.Status()) } -// Requests the unknown block and gets back a Ancestors with unexpected request ID. -// Requests again and gets response from unexpected peer. -// Requests again and gets an unexpected block. +// Requests the unknown block and gets back a Ancestors with unexpected block. // Requests again and gets the expected block. func TestBootstrapperUnknownByzantineResponse(t *testing.T) { require := require.New(t) config, peerID, sender, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - blkID2 := ids.Empty.Prefix(2) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - blkBytes2 := []byte{2} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Unknown, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Processing, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: blkBytes2, - } - - vm.CantSetState = false - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil - } + blks := snowmantest.BuildChain(2) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -346,129 +260,42 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { require.NoError(bs.Start(context.Background(), 0)) - parsedBlk1 := false - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if parsedBlk1 { - return blk1, nil - } - return nil, database.ErrNotFound - case blkID2: - return blk2, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - blk1.StatusV = choices.Processing - parsedBlk1 = true - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - return blk2, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - var requestID uint32 - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) - require.Equal(blkID1, blkID) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) + require.Equal(blks[1].ID(), blkID) requestID = reqID } - vm.CantSetState = false - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) // should request blk1 + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) // should request blk1 oldReqID := requestID - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes0})) // respond with wrong block + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[0:1]))) // respond with wrong block require.NotEqual(oldReqID, requestID) - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes1})) + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[1:2]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) + requireStatusIs(require, blks, choices.Accepted) - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } -// There are multiple needed blocks and Ancestors returns one at a time +// There are multiple needed blocks and multiple Ancestors are required func TestBootstrapperPartialFetch(t *testing.T) { require := require.New(t) config, peerID, sender, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - blkID2 := ids.Empty.Prefix(2) - blkID3 := ids.Empty.Prefix(3) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - blkBytes2 := []byte{2} - blkBytes3 := []byte{3} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Unknown, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Unknown, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: blkBytes2, - } - blk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID3, - StatusV: choices.Processing, - }, - ParentV: blk2.IDV, - HeightV: 3, - BytesV: blkBytes3, - } - - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil - } + blks := snowmantest.BuildChain(4) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -476,146 +303,49 @@ func TestBootstrapperPartialFetch(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) - acceptedIDs := []ids.ID{blkID3} - - parsedBlk1 := false - parsedBlk2 := false - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if parsedBlk1 { - return blk1, nil - } - return nil, database.ErrNotFound - case blkID2: - if parsedBlk2 { - return blk2, nil - } - return nil, database.ErrNotFound - case blkID3: - return blk3, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - blk1.StatusV = choices.Processing - parsedBlk1 = true - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - blk2.StatusV = choices.Processing - parsedBlk2 = true - return blk2, nil - case bytes.Equal(blkBytes, blkBytes3): - return blk3, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - - requestID := new(uint32) - requested := ids.Empty - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) - require.Contains([]ids.ID{blkID1, blkID2}, blkID) - *requestID = reqID + var ( + requestID uint32 + requested ids.ID + ) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) + require.Contains([]ids.ID{blks[1].ID(), blks[3].ID()}, blkID) + requestID = reqID requested = blkID } - require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) // should request blk2 + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) // should request blk3 + require.Equal(blks[3].ID(), requested) - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2})) // respond with blk2 - require.Equal(blkID1, requested) + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[2:4]))) // respond with blk3 and blk2 + require.Equal(blks[1].ID(), requested) - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1})) // respond with blk1 - require.Equal(blkID1, requested) + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[1:2]))) // respond with blk1 require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) + requireStatusIs(require, blks, choices.Accepted) - require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } -// There are multiple needed blocks and some validators do not have all the blocks -// This test was modeled after TestBootstrapperPartialFetch. +// There are multiple needed blocks and some validators do not have all the +// blocks. func TestBootstrapperEmptyResponse(t *testing.T) { require := require.New(t) config, peerID, sender, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - blkID2 := ids.Empty.Prefix(2) - blkID3 := ids.Empty.Prefix(3) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - blkBytes2 := []byte{2} - blkBytes3 := []byte{3} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Unknown, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Unknown, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: blkBytes2, - } - blk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID3, - StatusV: choices.Processing, - }, - ParentV: blk2.IDV, - HeightV: 3, - BytesV: blkBytes3, - } - - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil - } + blks := snowmantest.BuildChain(2) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -623,93 +353,31 @@ func TestBootstrapperEmptyResponse(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) - acceptedIDs := []ids.ID{blkID3} - - parsedBlk1 := false - parsedBlk2 := false - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if parsedBlk1 { - return blk1, nil - } - return nil, database.ErrNotFound - case blkID2: - if parsedBlk2 { - return blk2, nil - } - return nil, database.ErrNotFound - case blkID3: - return blk3, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - blk1.StatusV = choices.Processing - parsedBlk1 = true - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - blk2.StatusV = choices.Processing - parsedBlk2 = true - return blk2, nil - case bytes.Equal(blkBytes, blkBytes3): - return blk3, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - - requestedVdr := ids.EmptyNodeID - requestID := uint32(0) - requestedBlock := ids.Empty - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - requestedVdr = vdr + var ( + requestedNodeID ids.NodeID + requestID uint32 + ) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(blks[1].ID(), blkID) + requestedNodeID = nodeID requestID = reqID - requestedBlock = blkID } - // should request blk2 - require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) - require.Equal(peerID, requestedVdr) - require.Equal(blkID2, requestedBlock) - - // add another two validators to the fetch set to test behavior on empty response - newPeerID := ids.GenerateTestNodeID() - bs.fetchFrom.Add(newPeerID) - - newPeerID = ids.GenerateTestNodeID() - bs.fetchFrom.Add(newPeerID) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) + require.Equal(requestedNodeID, peerID) - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2})) - require.Equal(blkID1, requestedBlock) + // Add another peer to allow a new node to be selected. A new node should be + // sampled if the prior response was empty. + bs.PeerTracker.Connected(ids.GenerateTestNodeID(), version.CurrentApp) - peerToBlacklist := requestedVdr - - // respond with empty - require.NoError(bs.Ancestors(context.Background(), peerToBlacklist, requestID, nil)) - require.NotEqual(peerToBlacklist, requestedVdr) - require.Equal(blkID1, requestedBlock) - - require.NoError(bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1})) // respond with blk1 + require.NoError(bs.Ancestors(context.Background(), requestedNodeID, requestID, nil)) // respond with empty + require.NotEqual(requestedNodeID, peerID) + require.NoError(bs.Ancestors(context.Background(), requestedNodeID, requestID, blocksToBytes(blks[1:2]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) - - // check peerToBlacklist was removed from the fetch set - require.NotContains(bs.fetchFrom, peerToBlacklist) + requireStatusIs(require, blks, choices.Accepted) } // There are multiple needed blocks and Ancestors returns all at once @@ -718,67 +386,14 @@ func TestBootstrapperAncestors(t *testing.T) { config, peerID, sender, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - blkID2 := ids.Empty.Prefix(2) - blkID3 := ids.Empty.Prefix(3) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - blkBytes2 := []byte{2} - blkBytes3 := []byte{3} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Unknown, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Unknown, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: blkBytes2, - } - blk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID3, - StatusV: choices.Processing, - }, - ParentV: blk2.IDV, - HeightV: 3, - BytesV: blkBytes3, - } - - vm.CantSetState = false - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil - } + blks := snowmantest.BuildChain(4) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -788,69 +403,26 @@ func TestBootstrapperAncestors(t *testing.T) { require.NoError(bs.Start(context.Background(), 0)) - acceptedIDs := []ids.ID{blkID3} - - parsedBlk1 := false - parsedBlk2 := false - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if parsedBlk1 { - return blk1, nil - } - return nil, database.ErrNotFound - case blkID2: - if parsedBlk2 { - return blk2, nil - } - return nil, database.ErrNotFound - case blkID3: - return blk3, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - blk1.StatusV = choices.Processing - parsedBlk1 = true - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - blk2.StatusV = choices.Processing - parsedBlk2 = true - return blk2, nil - case bytes.Equal(blkBytes, blkBytes3): - return blk3, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - - requestID := new(uint32) - requested := ids.Empty - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) - require.Contains([]ids.ID{blkID1, blkID2}, blkID) - *requestID = reqID + var ( + requestID uint32 + requested ids.ID + ) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) + require.Equal(blks[3].ID(), blkID) + requestID = reqID requested = blkID } - require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) // should request blk2 - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1})) // respond with blk2 and blk1 - require.Equal(blkID2, requested) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) // should request blk3 + require.Equal(blks[3].ID(), requested) + + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks))) // respond with all the blocks require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) + requireStatusIs(require, blks, choices.Accepted) - require.NoError(bs.startSyncing(context.Background(), acceptedIDs)) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -859,54 +431,14 @@ func TestBootstrapperFinalized(t *testing.T) { config, peerID, sender, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - blkID2 := ids.Empty.Prefix(2) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - blkBytes2 := []byte{2} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Unknown, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Unknown, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: blkBytes2, - } + blks := snowmantest.BuildChain(3) + initializeVMWithBlockchain(vm, blks) - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil - } bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -914,66 +446,25 @@ func TestBootstrapperFinalized(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) - parsedBlk1 := false - parsedBlk2 := false - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if parsedBlk1 { - return blk1, nil - } - return nil, database.ErrNotFound - case blkID2: - if parsedBlk2 { - return blk2, nil - } - return nil, database.ErrNotFound - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - blk1.StatusV = choices.Processing - parsedBlk1 = true - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - blk2.StatusV = choices.Processing - parsedBlk2 = true - return blk2, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) requestIDs[blkID] = reqID } - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:3]))) // should request blk1 and blk2 - reqIDBlk2, ok := requestIDs[blkID2] + reqIDBlk2, ok := requestIDs[blks[2].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, blocksToBytes(blks[1:3]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) + requireStatusIs(require, blks, choices.Accepted) - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID2})) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[2:3]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -982,130 +473,14 @@ func TestRestartBootstrapping(t *testing.T) { config, peerID, sender, vm := newConfig(t) - blkID0 := ids.Empty.Prefix(0) - blkID1 := ids.Empty.Prefix(1) - blkID2 := ids.Empty.Prefix(2) - blkID3 := ids.Empty.Prefix(3) - blkID4 := ids.Empty.Prefix(4) - - blkBytes0 := []byte{0} - blkBytes1 := []byte{1} - blkBytes2 := []byte{2} - blkBytes3 := []byte{3} - blkBytes4 := []byte{4} - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Unknown, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: blkBytes1, - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Unknown, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: blkBytes2, - } - blk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID3, - StatusV: choices.Unknown, - }, - ParentV: blk2.IDV, - HeightV: 3, - BytesV: blkBytes3, - } - blk4 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID4, - StatusV: choices.Unknown, - }, - ParentV: blk3.IDV, - HeightV: 4, - BytesV: blkBytes4, - } - - vm.CantLastAccepted = false - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - parsedBlk1 := false - parsedBlk2 := false - parsedBlk3 := false - parsedBlk4 := false - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if parsedBlk1 { - return blk1, nil - } - return nil, database.ErrNotFound - case blkID2: - if parsedBlk2 { - return blk2, nil - } - return nil, database.ErrNotFound - case blkID3: - if parsedBlk3 { - return blk3, nil - } - return nil, database.ErrNotFound - case blkID4: - if parsedBlk4 { - return blk4, nil - } - return nil, database.ErrNotFound - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - blk1.StatusV = choices.Processing - parsedBlk1 = true - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - blk2.StatusV = choices.Processing - parsedBlk2 = true - return blk2, nil - case bytes.Equal(blkBytes, blkBytes3): - blk3.StatusV = choices.Processing - parsedBlk3 = true - return blk3, nil - case bytes.Equal(blkBytes, blkBytes4): - blk4.StatusV = choices.Processing - parsedBlk4 = true - return blk4, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } + blks := snowmantest.BuildChain(5) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -1113,51 +488,44 @@ func TestRestartBootstrapping(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) requestIDs[blkID] = reqID } - // Force Accept blk3 - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID3})) // should request blk3 + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) // should request blk3 - reqID, ok := requestIDs[blkID3] + reqID, ok := requestIDs[blks[3].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blkBytes3, blkBytes2})) - - require.Contains(requestIDs, blkID1) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, blocksToBytes(blks[2:4]))) + require.Contains(requestIDs, blks[1].ID()) // Remove request, so we can restart bootstrapping via startSyncing - _, removed := bs.outstandingRequests.DeleteValue(blkID1) + _, removed := bs.outstandingRequests.DeleteValue(blks[1].ID()) require.True(removed) - requestIDs = map[ids.ID]uint32{} + clear(requestIDs) - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID4})) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[4:5]))) - blk1RequestID, ok := requestIDs[blkID1] + blk1RequestID, ok := requestIDs[blks[1].ID()] require.True(ok) - blk4RequestID, ok := requestIDs[blkID4] + blk4RequestID, ok := requestIDs[blks[4].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, blk1RequestID, [][]byte{blkBytes1})) - - require.NotEqual(snow.NormalOp, config.Ctx.State.Get().State) - - require.NoError(bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4})) + require.NoError(bs.Ancestors(context.Background(), peerID, blk1RequestID, blocksToBytes(blks[1:2]))) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blks[0].Status()) + requireStatusIs(require, blks[1:], choices.Processing) + require.NoError(bs.Ancestors(context.Background(), peerID, blk4RequestID, blocksToBytes(blks[4:5]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) - require.Equal(choices.Accepted, blk3.Status()) - require.Equal(choices.Accepted, blk4.Status()) + requireStatusIs(require, blks, choices.Accepted) - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID4})) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[4:5]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -1166,54 +534,17 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { config, peerID, sender, vm := newConfig(t) - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - HeightV: 0, - BytesV: utils.RandomBytes(32), - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: utils.RandomBytes(32), - } + blks := snowmantest.BuildChain(2) + initializeVMWithBlockchain(vm, blks) - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk1.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blk0.ID(): - return nil, database.ErrNotFound - case blk1.ID(): - return blk1, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } - } - vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blk0.Bytes()): - return blk0, nil - case bytes.Equal(blkBytes, blk1.Bytes()): - return blk1, nil - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } + blks[0].StatusV = choices.Processing + require.NoError(blks[1].Accept(context.Background())) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -1221,25 +552,24 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) requestIDs[blkID] = reqID } // Force Accept, the already transitively accepted, blk0 - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blk0.ID()})) // should request blk0 + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[0:1]))) // should request blk0 - reqID, ok := requestIDs[blk0.ID()] + reqID, ok := requestIDs[blks[0].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blk0.Bytes()})) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, blocksToBytes(blks[0:1]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) - require.Equal(choices.Processing, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Processing, blks[0].Status()) + require.Equal(choices.Accepted, blks[1].Status()) } func TestBootstrapContinueAfterHalt(t *testing.T) { @@ -1247,42 +577,14 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { config, _, _, vm := newConfig(t) - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: utils.RandomBytes(32), - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk0.IDV, - HeightV: 1, - BytesV: utils.RandomBytes(32), - } - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk1.IDV, - HeightV: 2, - BytesV: utils.RandomBytes(32), - } - - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } + blks := snowmantest.BuildChain(2) + initializeVMWithBlockchain(vm, blks) bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -1290,27 +592,16 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { ) require.NoError(err) - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blk0.ID(): - return blk0, nil - case blk1.ID(): - bs.Halt(context.Background()) - return blk1, nil - case blk2.ID(): - return blk2, nil - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound - } + getBlockF := vm.GetBlockF + vm.GetBlockF = func(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + bs.Halt(ctx) + return getBlockF(ctx, blkID) } - vm.CantSetState = false require.NoError(bs.Start(context.Background(), 0)) - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blk2.ID()})) - - require.Equal(1, bs.Blocked.NumMissingIDs()) + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) + require.Equal(1, bs.missingBlockIDs.Len()) } func TestBootstrapNoParseOnNew(t *testing.T) { @@ -1345,72 +636,53 @@ func TestBootstrapNoParseOnNew(t *testing.T) { peer := ids.GenerateTestNodeID() require.NoError(peers.AddStaker(ctx.SubnetID, peer, nil, ids.Empty, 1)) - peerTracker := tracker.NewPeers() totalWeight, err := peers.TotalWeight(ctx.SubnetID) require.NoError(err) - startupTracker := tracker.NewStartup(peerTracker, totalWeight/2+1) - peers.RegisterCallbackListener(ctx.SubnetID, startupTracker) + startupTracker := tracker.NewStartup(tracker.NewPeers(), totalWeight/2+1) + peers.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) - queueDB := memdb.New() - blocker, err := queue.NewWithMissing(queueDB, "", prometheus.NewRegistry()) - require.NoError(err) - - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: utils.RandomBytes(32), - } - - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk0.ID(), - HeightV: 1, - BytesV: utils.RandomBytes(32), - } + blk1 := snowmantest.BuildChild(snowmantest.Genesis) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blk0.ID(), blkID) - return blk0, nil + require.Equal(snowmantest.GenesisID, blkID) + return snowmantest.Genesis, nil } - pushed, err := blocker.Push(context.Background(), &blockJob{ - log: logging.NoLog{}, - numAccepted: prometheus.NewCounter(prometheus.CounterOpts{}), - numDropped: prometheus.NewCounter(prometheus.CounterOpts{}), - blk: blk1, - vm: vm, - }) + intervalDB := memdb.New() + tree, err := interval.NewTree(intervalDB) + require.NoError(err) + _, err = interval.Add(intervalDB, tree, 0, blk1.Height(), blk1.Bytes()) require.NoError(err) - require.True(pushed) - - require.NoError(blocker.Commit()) vm.GetBlockF = nil - blocker, err = queue.NewWithMissing(queueDB, "", prometheus.NewRegistry()) + peerTracker, err := p2p.NewPeerTracker( + ctx.Log, + "", + prometheus.NewRegistry(), + nil, + nil, + ) require.NoError(err) + peerTracker.Connected(peer, version.CurrentApp) + config := Config{ AllGetsServer: snowGetHandler, Ctx: ctx, Beacons: peers, SampleK: peers.Count(ctx.SubnetID), StartupTracker: startupTracker, + PeerTracker: peerTracker, Sender: sender, BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, AncestorsMaxContainersReceived: 2000, - Blocked: blocker, + DB: intervalDB, VM: vm, } @@ -1418,7 +690,7 @@ func TestBootstrapNoParseOnNew(t *testing.T) { config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -1432,55 +704,59 @@ func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { config, peerID, sender, vm := newConfig(t) - var ( - blkID0 = ids.GenerateTestID() - blkBytes0 = utils.RandomBytes(1024) - blk0 = &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }, - HeightV: 0, - BytesV: blkBytes0, - } - - blkID1 = ids.GenerateTestID() - blkBytes1 = utils.RandomBytes(1024) - blk1 = &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Processing, - }, - ParentV: blk0.IDV, - HeightV: blk0.HeightV + 1, - BytesV: blkBytes1, - } + blks := snowmantest.BuildChain(3) + initializeVMWithBlockchain(vm, blks) - blkID2 = ids.GenerateTestID() - blkBytes2 = utils.RandomBytes(1024) - blk2 = &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blkID2, - StatusV: choices.Processing, - }, - ParentV: blk1.IDV, - HeightV: blk1.HeightV + 1, - BytesV: blkBytes2, - } + bs, err := New( + config, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) + require.NoError(err) - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return blk0.ID(), nil - } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(blkID0, blkID) - return blk0, nil + require.NoError(bs.Start(context.Background(), 0)) + + requestIDs := map[ids.ID]uint32{} + sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, nodeID) + requestIDs[blkID] = reqID } + + require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:3]))) // should request blk1 and blk2 + + reqIDBlk1, ok := requestIDs[blks[1].ID()] + require.True(ok) + reqIDBlk2, ok := requestIDs[blks[2].ID()] + require.True(ok) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, blocksToBytes(blks[1:3]))) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + requireStatusIs(require, blks, choices.Accepted) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk1, blocksToBytes(blks[1:2]))) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) +} + +func TestBootstrapperRollbackOnSetState(t *testing.T) { + require := require.New(t) + + config, _, _, vm := newConfig(t) + + blks := snowmantest.BuildChain(2) + initializeVMWithBlockchain(vm, blks) + + blks[1].StatusV = choices.Accepted + bs, err := New( config, func(context.Context, uint32) error { config.Ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) return nil @@ -1488,62 +764,68 @@ func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { ) require.NoError(err) - vm.CantSetState = false + vm.SetStateF = func(context.Context, snow.State) error { + blks[1].StatusV = choices.Processing + return nil + } + require.NoError(bs.Start(context.Background(), 0)) + require.Equal(blks[0].HeightV, bs.startingHeight) +} - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case blkID0: - return blk0, nil - case blkID1: - if blk1.StatusV == choices.Accepted { - return blk1, nil +func initializeVMWithBlockchain(vm *block.TestVM, blocks []*snowmantest.Block) { + vm.CantSetState = false + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + var ( + lastAcceptedID ids.ID + lastAcceptedHeight uint64 + ) + for _, blk := range blocks { + height := blk.Height() + if blk.Status() == choices.Accepted && height >= lastAcceptedHeight { + lastAcceptedID = blk.ID() + lastAcceptedHeight = height } - return nil, database.ErrNotFound - case blkID2: - if blk2.StatusV == choices.Accepted { - return blk2, nil + } + return lastAcceptedID, nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + for _, blk := range blocks { + if blk.Status() == choices.Accepted && blk.ID() == blkID { + return blk, nil } - return nil, database.ErrNotFound - default: - require.FailNow(database.ErrNotFound.Error()) - return nil, database.ErrNotFound } + return nil, database.ErrNotFound } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { - switch { - case bytes.Equal(blkBytes, blkBytes0): - return blk0, nil - case bytes.Equal(blkBytes, blkBytes1): - return blk1, nil - case bytes.Equal(blkBytes, blkBytes2): - return blk2, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock + for _, blk := range blocks { + if bytes.Equal(blk.Bytes(), blkBytes) { + return blk, nil + } } + return nil, errUnknownBlock } +} - requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { - require.Equal(peerID, vdr) - requestIDs[blkID] = reqID +func requireStatusIs(require *require.Assertions, blocks []*snowmantest.Block, status choices.Status) { + for i, blk := range blocks { + require.Equal(status, blk.Status(), i) } +} - require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 - - reqIDBlk1, ok := requestIDs[blkID1] - require.True(ok) - reqIDBlk2, ok := requestIDs[blkID2] - require.True(ok) - - require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) - - require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) - require.Equal(choices.Accepted, blk0.Status()) - require.Equal(choices.Accepted, blk1.Status()) - require.Equal(choices.Accepted, blk2.Status()) +func blocksToIDs(blocks []*snowmantest.Block) []ids.ID { + blkIDs := make([]ids.ID, len(blocks)) + for i, blk := range blocks { + blkIDs[i] = blk.ID() + } + return blkIDs +} - require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk1, [][]byte{blkBytes1})) - require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) +func blocksToBytes(blocks []*snowmantest.Block) [][]byte { + numBlocks := len(blocks) + blkBytes := make([][]byte, numBlocks) + for i, blk := range blocks { + blkBytes[numBlocks-i-1] = blk.Bytes() + } + return blkBytes } diff --git a/snow/engine/snowman/bootstrap/config.go b/snow/engine/snowman/bootstrap/config.go index 6fb8894db96f..bcf57f02e832 100644 --- a/snow/engine/snowman/bootstrap/config.go +++ b/snow/engine/snowman/bootstrap/config.go @@ -4,9 +4,10 @@ package bootstrap import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" @@ -24,16 +25,16 @@ type Config struct { BootstrapTracker common.BootstrapTracker Timer common.Timer + // PeerTracker manages the set of nodes that we fetch the next block from. + PeerTracker *p2p.PeerTracker + // This node will only consider the first [AncestorsMaxContainersReceived] // containers in an ancestors message it receives. AncestorsMaxContainersReceived int - // Blocked tracks operations that are blocked on blocks - // - // It should be guaranteed that `MissingIDs` should contain all IDs - // referenced by the `MissingDependencies` that have not already been added - // to the queue. - Blocked *queue.JobsWithMissing + // Database used to track the fetched, but not yet executed, blocks during + // bootstrapping. + DB database.Database VM block.ChainVM diff --git a/snow/engine/snowman/bootstrap/interval/blocks.go b/snow/engine/snowman/bootstrap/interval/blocks.go new file mode 100644 index 000000000000..d7d053c17876 --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/blocks.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import "github.com/ava-labs/avalanchego/database" + +// Add the block to the tree and return if the parent block should be fetched, +// but wasn't desired before. +func Add( + db database.KeyValueWriterDeleter, + tree *Tree, + lastAcceptedHeight uint64, + height uint64, + blkBytes []byte, +) (bool, error) { + if height <= lastAcceptedHeight || tree.Contains(height) { + return false, nil + } + + if err := PutBlock(db, height, blkBytes); err != nil { + return false, err + } + if err := tree.Add(db, height); err != nil { + return false, err + } + + // We know that height is greater than lastAcceptedHeight here, so height-1 + // is guaranteed not to underflow. + nextHeight := height - 1 + return nextHeight != lastAcceptedHeight && !tree.Contains(nextHeight), nil +} + +// Remove the block from the tree. +func Remove( + db database.KeyValueWriterDeleter, + tree *Tree, + height uint64, +) error { + if err := DeleteBlock(db, height); err != nil { + return err + } + return tree.Remove(db, height) +} diff --git a/snow/engine/snowman/bootstrap/interval/blocks_test.go b/snow/engine/snowman/bootstrap/interval/blocks_test.go new file mode 100644 index 000000000000..d11a6fe434aa --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/blocks_test.go @@ -0,0 +1,137 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" +) + +func TestAdd(t *testing.T) { + tests := []struct { + name string + existing []uint64 + lastAcceptedHeight uint64 + height uint64 + blkBytes []byte + expectedToPersist bool + expectedToWantParent bool + }{ + { + name: "height already accepted", + lastAcceptedHeight: 1, + height: 1, + blkBytes: []byte{1}, + expectedToPersist: false, + expectedToWantParent: false, + }, + { + name: "height already added", + existing: []uint64{1}, + lastAcceptedHeight: 0, + height: 1, + blkBytes: []byte{1}, + expectedToPersist: false, + expectedToWantParent: false, + }, + { + name: "next block is desired", + lastAcceptedHeight: 0, + height: 2, + blkBytes: []byte{2}, + expectedToPersist: true, + expectedToWantParent: true, + }, + { + name: "next block is accepted", + lastAcceptedHeight: 0, + height: 1, + blkBytes: []byte{1}, + expectedToPersist: true, + expectedToWantParent: false, + }, + { + name: "next block already added", + existing: []uint64{1}, + lastAcceptedHeight: 0, + height: 2, + blkBytes: []byte{2}, + expectedToPersist: true, + expectedToWantParent: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + db := memdb.New() + tree, err := NewTree(db) + require.NoError(err) + for _, add := range test.existing { + require.NoError(tree.Add(db, add)) + } + + wantsParent, err := Add( + db, + tree, + test.lastAcceptedHeight, + test.height, + test.blkBytes, + ) + require.NoError(err) + require.Equal(test.expectedToWantParent, wantsParent) + + blkBytes, err := GetBlock(db, test.height) + if test.expectedToPersist { + require.NoError(err) + require.Equal(test.blkBytes, blkBytes) + require.True(tree.Contains(test.height)) + } else { + require.ErrorIs(err, database.ErrNotFound) + } + }) + } +} + +func TestRemove(t *testing.T) { + require := require.New(t) + + db := memdb.New() + tree, err := NewTree(db) + require.NoError(err) + lastAcceptedHeight := uint64(1) + height := uint64(5) + blkBytes := []byte{5} + + _, err = Add( + db, + tree, + lastAcceptedHeight, + height, + blkBytes, + ) + require.NoError(err) + + // Verify that the database has the block. + storedBlkBytes, err := GetBlock(db, height) + require.NoError(err) + require.Equal(blkBytes, storedBlkBytes) + require.Equal(uint64(1), tree.Len()) + + require.NoError(Remove( + db, + tree, + height, + )) + require.Zero(tree.Len()) + + // Verify that the database no longer contains the block. + _, err = GetBlock(db, height) + require.ErrorIs(err, database.ErrNotFound) + require.Zero(tree.Len()) +} diff --git a/snow/engine/snowman/bootstrap/interval/interval.go b/snow/engine/snowman/bootstrap/interval/interval.go new file mode 100644 index 000000000000..35ae260e446a --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/interval.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import "math" + +type Interval struct { + LowerBound uint64 + UpperBound uint64 +} + +func (i *Interval) Less(other *Interval) bool { + return i.UpperBound < other.UpperBound +} + +func (i *Interval) Contains(height uint64) bool { + return i != nil && + i.LowerBound <= height && + height <= i.UpperBound +} + +// AdjacentToLowerBound returns true if height is 1 less than lowerBound. +func (i *Interval) AdjacentToLowerBound(height uint64) bool { + return i != nil && + height < math.MaxUint64 && + height+1 == i.LowerBound +} + +// AdjacentToUpperBound returns true if height is 1 greater than upperBound. +func (i *Interval) AdjacentToUpperBound(height uint64) bool { + return i != nil && + i.UpperBound < math.MaxUint64 && + i.UpperBound+1 == height +} diff --git a/snow/engine/snowman/bootstrap/interval/interval_test.go b/snow/engine/snowman/bootstrap/interval/interval_test.go new file mode 100644 index 000000000000..2213302925fd --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/interval_test.go @@ -0,0 +1,255 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIntervalLess(t *testing.T) { + tests := []struct { + name string + left *Interval + right *Interval + expected bool + }{ + { + name: "less", + left: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + right: &Interval{ + LowerBound: 11, + UpperBound: 11, + }, + expected: true, + }, + { + name: "greater", + left: &Interval{ + LowerBound: 11, + UpperBound: 11, + }, + right: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + expected: false, + }, + { + name: "equal", + left: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + right: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + less := test.left.Less(test.right) + require.Equal(t, test.expected, less) + }) + } +} + +func TestIntervalContains(t *testing.T) { + tests := []struct { + name string + interval *Interval + height uint64 + expected bool + }{ + { + name: "nil does not contain anything", + interval: nil, + height: 10, + expected: false, + }, + { + name: "too low", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 9, + expected: false, + }, + { + name: "inside", + interval: &Interval{ + LowerBound: 9, + UpperBound: 11, + }, + height: 10, + expected: true, + }, + { + name: "equal", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 10, + expected: true, + }, + { + name: "too high", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 11, + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + contains := test.interval.Contains(test.height) + require.Equal(t, test.expected, contains) + }) + } +} + +func TestIntervalAdjacentToLowerBound(t *testing.T) { + tests := []struct { + name string + interval *Interval + height uint64 + expected bool + }{ + { + name: "nil is not adjacent to anything", + interval: nil, + height: 10, + expected: false, + }, + { + name: "too low", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 8, + expected: false, + }, + { + name: "equal", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 10, + expected: false, + }, + { + name: "adjacent to both", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 9, + expected: true, + }, + { + name: "adjacent to lower", + interval: &Interval{ + LowerBound: 10, + UpperBound: 11, + }, + height: 9, + expected: true, + }, + { + name: "check for overflow", + interval: &Interval{ + LowerBound: 0, + UpperBound: math.MaxUint64 - 1, + }, + height: math.MaxUint64, + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + adjacent := test.interval.AdjacentToLowerBound(test.height) + require.Equal(t, test.expected, adjacent) + }) + } +} + +func TestIntervalAdjacentToUpperBound(t *testing.T) { + tests := []struct { + name string + interval *Interval + height uint64 + expected bool + }{ + { + name: "nil is not adjacent to anything", + interval: nil, + height: 10, + expected: false, + }, + { + name: "too low", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 8, + expected: false, + }, + { + name: "equal", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 10, + expected: false, + }, + { + name: "adjacent to both", + interval: &Interval{ + LowerBound: 10, + UpperBound: 10, + }, + height: 11, + expected: true, + }, + { + name: "adjacent to higher", + interval: &Interval{ + LowerBound: 9, + UpperBound: 10, + }, + height: 11, + expected: true, + }, + { + name: "check for overflow", + interval: &Interval{ + LowerBound: 1, + UpperBound: math.MaxUint64, + }, + height: 0, + expected: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + adjacent := test.interval.AdjacentToUpperBound(test.height) + require.Equal(t, test.expected, adjacent) + }) + } +} diff --git a/snow/engine/snowman/bootstrap/interval/state.go b/snow/engine/snowman/bootstrap/interval/state.go new file mode 100644 index 000000000000..8ba06824eea2 --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/state.go @@ -0,0 +1,109 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import ( + "errors" + + "github.com/ava-labs/avalanchego/database" +) + +const ( + intervalPrefixByte byte = iota + blockPrefixByte + + prefixLen = 1 +) + +var ( + intervalPrefix = []byte{intervalPrefixByte} + blockPrefix = []byte{blockPrefixByte} + + errInvalidKeyLength = errors.New("invalid key length") +) + +func GetIntervals(db database.Iteratee) ([]*Interval, error) { + it := db.NewIteratorWithPrefix(intervalPrefix) + defer it.Release() + + var intervals []*Interval + for it.Next() { + dbKey := it.Key() + if len(dbKey) < prefixLen { + return nil, errInvalidKeyLength + } + + intervalKey := dbKey[prefixLen:] + upperBound, err := database.ParseUInt64(intervalKey) + if err != nil { + return nil, err + } + + value := it.Value() + lowerBound, err := database.ParseUInt64(value) + if err != nil { + return nil, err + } + + intervals = append(intervals, &Interval{ + LowerBound: lowerBound, + UpperBound: upperBound, + }) + } + return intervals, it.Error() +} + +func PutInterval(db database.KeyValueWriter, upperBound uint64, lowerBound uint64) error { + return database.PutUInt64(db, makeIntervalKey(upperBound), lowerBound) +} + +func DeleteInterval(db database.KeyValueDeleter, upperBound uint64) error { + return db.Delete(makeIntervalKey(upperBound)) +} + +// makeIntervalKey uses the upperBound rather than the lowerBound because blocks +// are fetched from tip towards genesis. This means that it is more common for +// the lowerBound to change than the upperBound. Modifying the lowerBound only +// requires a single write rather than a write and a delete when modifying the +// upperBound. +func makeIntervalKey(upperBound uint64) []byte { + intervalKey := database.PackUInt64(upperBound) + return append(intervalPrefix, intervalKey...) +} + +// GetBlockIterator returns a block iterator that will produce values +// corresponding to persisted blocks in order of increasing height. +func GetBlockIterator(db database.Iteratee) database.Iterator { + return db.NewIteratorWithPrefix(blockPrefix) +} + +// GetBlockIterator returns a block iterator that will produce values +// corresponding to persisted blocks in order of increasing height starting at +// [height]. +func GetBlockIteratorWithStart(db database.Iteratee, height uint64) database.Iterator { + return db.NewIteratorWithStartAndPrefix( + makeBlockKey(height), + blockPrefix, + ) +} + +func GetBlock(db database.KeyValueReader, height uint64) ([]byte, error) { + return db.Get(makeBlockKey(height)) +} + +func PutBlock(db database.KeyValueWriter, height uint64, bytes []byte) error { + return db.Put(makeBlockKey(height), bytes) +} + +func DeleteBlock(db database.KeyValueDeleter, height uint64) error { + return db.Delete(makeBlockKey(height)) +} + +// makeBlockKey ensures that the returned key maintains the same sorted order as +// the height. This ensures that database iteration of block keys will iterate +// from lower height to higher height. +func makeBlockKey(height uint64) []byte { + blockKey := database.PackUInt64(height) + return append(blockPrefix, blockKey...) +} diff --git a/snow/engine/snowman/bootstrap/interval/tree.go b/snow/engine/snowman/bootstrap/interval/tree.go new file mode 100644 index 000000000000..51d1083c1e21 --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/tree.go @@ -0,0 +1,188 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import ( + "github.com/google/btree" + + "github.com/ava-labs/avalanchego/database" +) + +// TODO: Benchmark what degree to use. +const treeDegree = 2 + +// Tree implements a set of numbers by tracking intervals. It supports adding +// and removing new values. It also allows checking if a value is included in +// the set. +// +// Tree is more space efficient than a map implementation if the values that it +// contains are continuous. The tree takes O(n) space where n is the number of +// continuous ranges that have been inserted into the tree. +// +// Add, Remove, and Contains all run in O(log n) where n is the number of +// continuous ranges that have been inserted into the tree. +type Tree struct { + knownHeights *btree.BTreeG[*Interval] + // If knownHeights contains the full range [0, MaxUint64], then + // numKnownHeights overflows to 0. + numKnownHeights uint64 +} + +// NewTree creates a new interval tree from the provided database. +// +// It is assumed that persisted intervals are non-overlapping. Providing a +// database with overlapping intervals will result in undefined behavior of the +// structure. +func NewTree(db database.Iteratee) (*Tree, error) { + intervals, err := GetIntervals(db) + if err != nil { + return nil, err + } + + var ( + knownHeights = btree.NewG(treeDegree, (*Interval).Less) + numKnownHeights uint64 + ) + for _, i := range intervals { + knownHeights.ReplaceOrInsert(i) + numKnownHeights += i.UpperBound - i.LowerBound + 1 + } + return &Tree{ + knownHeights: knownHeights, + numKnownHeights: numKnownHeights, + }, nil +} + +func (t *Tree) Add(db database.KeyValueWriterDeleter, height uint64) error { + var ( + newInterval = &Interval{ + LowerBound: height, + UpperBound: height, + } + upper *Interval + lower *Interval + ) + t.knownHeights.AscendGreaterOrEqual(newInterval, func(item *Interval) bool { + upper = item + return false + }) + if upper.Contains(height) { + // height is already in the tree + return nil + } + + t.knownHeights.DescendLessOrEqual(newInterval, func(item *Interval) bool { + lower = item + return false + }) + + t.numKnownHeights++ + + var ( + adjacentToLowerBound = upper.AdjacentToLowerBound(height) + adjacentToUpperBound = lower.AdjacentToUpperBound(height) + ) + switch { + case adjacentToLowerBound && adjacentToUpperBound: + // the upper and lower ranges should be merged + if err := DeleteInterval(db, lower.UpperBound); err != nil { + return err + } + upper.LowerBound = lower.LowerBound + t.knownHeights.Delete(lower) + return PutInterval(db, upper.UpperBound, lower.LowerBound) + case adjacentToLowerBound: + // the upper range should be extended by one on the lower side + upper.LowerBound = height + return PutInterval(db, upper.UpperBound, height) + case adjacentToUpperBound: + // the lower range should be extended by one on the upper side + if err := DeleteInterval(db, lower.UpperBound); err != nil { + return err + } + lower.UpperBound = height + return PutInterval(db, height, lower.LowerBound) + default: + t.knownHeights.ReplaceOrInsert(newInterval) + return PutInterval(db, height, height) + } +} + +func (t *Tree) Remove(db database.KeyValueWriterDeleter, height uint64) error { + var ( + newInterval = &Interval{ + LowerBound: height, + UpperBound: height, + } + higher *Interval + ) + t.knownHeights.AscendGreaterOrEqual(newInterval, func(item *Interval) bool { + higher = item + return false + }) + if !higher.Contains(height) { + // height isn't in the tree + return nil + } + + t.numKnownHeights-- + + switch { + case higher.LowerBound == higher.UpperBound: + t.knownHeights.Delete(higher) + return DeleteInterval(db, higher.UpperBound) + case higher.LowerBound == height: + higher.LowerBound++ + return PutInterval(db, higher.UpperBound, higher.LowerBound) + case higher.UpperBound == height: + if err := DeleteInterval(db, higher.UpperBound); err != nil { + return err + } + higher.UpperBound-- + return PutInterval(db, higher.UpperBound, higher.LowerBound) + default: + newInterval.LowerBound = higher.LowerBound + newInterval.UpperBound = height - 1 + t.knownHeights.ReplaceOrInsert(newInterval) + if err := PutInterval(db, newInterval.UpperBound, newInterval.LowerBound); err != nil { + return err + } + + higher.LowerBound = height + 1 + return PutInterval(db, higher.UpperBound, higher.LowerBound) + } +} + +func (t *Tree) Contains(height uint64) bool { + var ( + i = &Interval{ + LowerBound: height, + UpperBound: height, + } + higher *Interval + ) + t.knownHeights.AscendGreaterOrEqual(i, func(item *Interval) bool { + higher = item + return false + }) + return higher.Contains(height) +} + +func (t *Tree) Flatten() []*Interval { + intervals := make([]*Interval, 0, t.knownHeights.Len()) + t.knownHeights.Ascend(func(item *Interval) bool { + intervals = append(intervals, item) + return true + }) + return intervals +} + +// Len returns the number of heights in the tree; not the number of intervals. +// +// Because Len returns a uint64 and is describing the number of values in the +// range of uint64s, it will return 0 if the tree contains the full interval +// [0, MaxUint64]. +func (t *Tree) Len() uint64 { + return t.numKnownHeights +} diff --git a/snow/engine/snowman/bootstrap/interval/tree_test.go b/snow/engine/snowman/bootstrap/interval/tree_test.go new file mode 100644 index 000000000000..396e4d281a91 --- /dev/null +++ b/snow/engine/snowman/bootstrap/interval/tree_test.go @@ -0,0 +1,389 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interval + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" +) + +func newTree(require *require.Assertions, db database.Database, intervals []*Interval) *Tree { + tree, err := NewTree(db) + require.NoError(err) + + for _, toAdd := range intervals { + for i := toAdd.LowerBound; i <= toAdd.UpperBound; i++ { + require.NoError(tree.Add(db, i)) + } + } + return tree +} + +func TestTreeAdd(t *testing.T) { + tests := []struct { + name string + toAdd []*Interval + expected []*Interval + expectedLen uint64 + }{ + { + name: "single addition", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expectedLen: 1, + }, + { + name: "extend above", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + expectedLen: 2, + }, + { + name: "extend below", + toAdd: []*Interval{ + { + LowerBound: 11, + UpperBound: 11, + }, + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + expectedLen: 2, + }, + { + name: "merge", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + { + LowerBound: 12, + UpperBound: 12, + }, + { + LowerBound: 11, + UpperBound: 11, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 12, + }, + }, + expectedLen: 3, + }, + { + name: "ignore duplicate", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + { + LowerBound: 11, + UpperBound: 11, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + expectedLen: 2, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + db := memdb.New() + treeFromAdditions := newTree(require, db, test.toAdd) + require.Equal(test.expected, treeFromAdditions.Flatten()) + require.Equal(test.expectedLen, treeFromAdditions.Len()) + + treeFromDB := newTree(require, db, nil) + require.Equal(test.expected, treeFromDB.Flatten()) + require.Equal(test.expectedLen, treeFromDB.Len()) + }) + } +} + +func TestTreeRemove(t *testing.T) { + tests := []struct { + name string + toAdd []*Interval + toRemove []*Interval + expected []*Interval + expectedLen uint64 + }{ + { + name: "single removal", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + toRemove: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expected: []*Interval{}, + expectedLen: 0, + }, + { + name: "reduce above", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + toRemove: []*Interval{ + { + LowerBound: 11, + UpperBound: 11, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expectedLen: 1, + }, + { + name: "reduce below", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + toRemove: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expected: []*Interval{ + { + LowerBound: 11, + UpperBound: 11, + }, + }, + expectedLen: 1, + }, + { + name: "split", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 12, + }, + }, + toRemove: []*Interval{ + { + LowerBound: 11, + UpperBound: 11, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + { + LowerBound: 12, + UpperBound: 12, + }, + }, + expectedLen: 2, + }, + { + name: "ignore missing", + toAdd: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + toRemove: []*Interval{ + { + LowerBound: 11, + UpperBound: 11, + }, + }, + expected: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + expectedLen: 1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + db := memdb.New() + treeFromModifications := newTree(require, db, test.toAdd) + for _, toRemove := range test.toRemove { + for i := toRemove.LowerBound; i <= toRemove.UpperBound; i++ { + require.NoError(treeFromModifications.Remove(db, i)) + } + } + require.Equal(test.expected, treeFromModifications.Flatten()) + require.Equal(test.expectedLen, treeFromModifications.Len()) + + treeFromDB := newTree(require, db, nil) + require.Equal(test.expected, treeFromDB.Flatten()) + require.Equal(test.expectedLen, treeFromDB.Len()) + }) + } +} + +func TestTreeContains(t *testing.T) { + tests := []struct { + name string + tree []*Interval + height uint64 + expected bool + }{ + { + name: "below", + tree: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + height: 9, + expected: false, + }, + { + name: "above", + tree: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + height: 11, + expected: false, + }, + { + name: "equal both", + tree: []*Interval{ + { + LowerBound: 10, + UpperBound: 10, + }, + }, + height: 10, + expected: true, + }, + { + name: "equal lower", + tree: []*Interval{ + { + LowerBound: 10, + UpperBound: 11, + }, + }, + height: 10, + expected: true, + }, + { + name: "equal upper", + tree: []*Interval{ + { + LowerBound: 9, + UpperBound: 10, + }, + }, + height: 10, + expected: true, + }, + { + name: "inside", + tree: []*Interval{ + { + LowerBound: 9, + UpperBound: 11, + }, + }, + height: 10, + expected: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tree := newTree(require, memdb.New(), test.tree) + require.Equal(test.expected, tree.Contains(test.height)) + }) + } +} + +func TestTreeLenOverflow(t *testing.T) { + require := require.New(t) + + db := memdb.New() + require.NoError(PutInterval(db, math.MaxUint64, 0)) + + tree, err := NewTree(db) + require.NoError(err) + require.Zero(tree.Len()) + require.True(tree.Contains(0)) + require.True(tree.Contains(math.MaxUint64 / 2)) + require.True(tree.Contains(math.MaxUint64)) + + require.NoError(tree.Remove(db, 5)) + require.Equal(uint64(math.MaxUint64), tree.Len()) + + require.NoError(tree.Add(db, 5)) + require.Zero(tree.Len()) +} diff --git a/snow/engine/snowman/bootstrap/metrics.go b/snow/engine/snowman/bootstrap/metrics.go index f6ad90d16419..7b28b8b969b7 100644 --- a/snow/engine/snowman/bootstrap/metrics.go +++ b/snow/engine/snowman/bootstrap/metrics.go @@ -10,39 +10,24 @@ import ( ) type metrics struct { - numFetched, numDropped, numAccepted prometheus.Counter - fetchETA prometheus.Gauge + numFetched, numAccepted prometheus.Counter } -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { +func newMetrics(registerer prometheus.Registerer) (*metrics, error) { m := &metrics{ numFetched: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "fetched", - Help: "Number of blocks fetched during bootstrapping", - }), - numDropped: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "dropped", - Help: "Number of blocks dropped during bootstrapping", + Name: "bs_fetched", + Help: "Number of blocks fetched during bootstrapping", }), numAccepted: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "accepted", - Help: "Number of blocks accepted during bootstrapping", - }), - fetchETA: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "eta_fetching_complete", - Help: "ETA in nanoseconds until fetching phase of bootstrapping finishes", + Name: "bs_accepted", + Help: "Number of blocks accepted during bootstrapping", }), } err := utils.Err( registerer.Register(m.numFetched), - registerer.Register(m.numDropped), registerer.Register(m.numAccepted), - registerer.Register(m.fetchETA), ) return m, err } diff --git a/snow/engine/snowman/bootstrap/storage.go b/snow/engine/snowman/bootstrap/storage.go new file mode 100644 index 000000000000..7dafc3a40225 --- /dev/null +++ b/snow/engine/snowman/bootstrap/storage.go @@ -0,0 +1,278 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrap + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap/interval" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer" +) + +const ( + batchWritePeriod = 64 + iteratorReleasePeriod = 1024 + logPeriod = 5 * time.Second + minBlocksToCompact = 5000 +) + +// getMissingBlockIDs returns the ID of the blocks that should be fetched to +// attempt to make a single continuous range from +// (lastAcceptedHeight, highestTrackedHeight]. +// +// For example, if the tree currently contains heights [1, 4, 6, 7] and the +// lastAcceptedHeight is 2, this function will return the IDs corresponding to +// blocks [3, 5]. +func getMissingBlockIDs( + ctx context.Context, + db database.KeyValueReader, + parser block.Parser, + tree *interval.Tree, + lastAcceptedHeight uint64, +) (set.Set[ids.ID], error) { + var ( + missingBlocks set.Set[ids.ID] + intervals = tree.Flatten() + lastHeightToFetch = lastAcceptedHeight + 1 + ) + for _, i := range intervals { + if i.LowerBound <= lastHeightToFetch { + continue + } + + blkBytes, err := interval.GetBlock(db, i.LowerBound) + if err != nil { + return nil, err + } + + blk, err := parser.ParseBlock(ctx, blkBytes) + if err != nil { + return nil, err + } + + parentID := blk.Parent() + missingBlocks.Add(parentID) + } + return missingBlocks, nil +} + +// process a series of consecutive blocks starting at [blk]. +// +// - blk is a block that is assumed to have been marked as acceptable by the +// bootstrapping engine. +// - ancestors is a set of blocks that can be used to lookup blocks. +// +// If [blk]'s height is <= the last accepted height, then it will be removed +// from the missingIDs set. +// +// Returns a newly discovered blockID that should be fetched. +func process( + db database.KeyValueWriterDeleter, + tree *interval.Tree, + missingBlockIDs set.Set[ids.ID], + lastAcceptedHeight uint64, + blk snowman.Block, + ancestors map[ids.ID]snowman.Block, +) (ids.ID, bool, error) { + for { + // It's possible that missingBlockIDs contain values contained inside of + // ancestors. So, it's important to remove IDs from the set for each + // iteration, not just the first block's ID. + blkID := blk.ID() + missingBlockIDs.Remove(blkID) + + height := blk.Height() + blkBytes := blk.Bytes() + wantsParent, err := interval.Add( + db, + tree, + lastAcceptedHeight, + height, + blkBytes, + ) + if err != nil || !wantsParent { + return ids.Empty, false, err + } + + // If the parent was provided in the ancestors set, we can immediately + // process it. + parentID := blk.Parent() + parent, ok := ancestors[parentID] + if !ok { + return parentID, true, nil + } + + blk = parent + } +} + +// execute all the blocks tracked by the tree. If a block is in the tree but is +// already accepted based on the lastAcceptedHeight, it will be removed from the +// tree but not executed. +// +// execute assumes that getMissingBlockIDs would return an empty set. +// +// TODO: Replace usage of haltable with context cancellation. +func execute( + ctx context.Context, + haltable common.Haltable, + log logging.Func, + db database.Database, + parser block.Parser, + tree *interval.Tree, + lastAcceptedHeight uint64, +) error { + totalNumberToProcess := tree.Len() + if totalNumberToProcess >= minBlocksToCompact { + log("compacting database before executing blocks...") + if err := db.Compact(nil, nil); err != nil { + // Not a fatal error, log and move on. + log("failed to compact bootstrap database before executing blocks", + zap.Error(err), + ) + } + } + + var ( + batch = db.NewBatch() + processedSinceBatchWrite uint + writeBatch = func() error { + if processedSinceBatchWrite == 0 { + return nil + } + processedSinceBatchWrite = 0 + + if err := batch.Write(); err != nil { + return err + } + batch.Reset() + return nil + } + + iterator = interval.GetBlockIterator(db) + processedSinceIteratorRelease uint + + startTime = time.Now() + timeOfNextLog = startTime.Add(logPeriod) + ) + defer func() { + iterator.Release() + + var ( + numProcessed = totalNumberToProcess - tree.Len() + halted = haltable.Halted() + ) + if numProcessed >= minBlocksToCompact && !halted { + log("compacting database after executing blocks...") + if err := db.Compact(nil, nil); err != nil { + // Not a fatal error, log and move on. + log("failed to compact bootstrap database after executing blocks", + zap.Error(err), + ) + } + } + + log("executed blocks", + zap.Uint64("numExecuted", numProcessed), + zap.Uint64("numToExecute", totalNumberToProcess), + zap.Bool("halted", halted), + zap.Duration("duration", time.Since(startTime)), + ) + }() + + log("executing blocks", + zap.Uint64("numToExecute", totalNumberToProcess), + ) + + for !haltable.Halted() && iterator.Next() { + blkBytes := iterator.Value() + blk, err := parser.ParseBlock(ctx, blkBytes) + if err != nil { + return err + } + + height := blk.Height() + if err := interval.Remove(batch, tree, height); err != nil { + return err + } + + // Periodically write the batch to disk to avoid memory pressure. + processedSinceBatchWrite++ + if processedSinceBatchWrite >= batchWritePeriod { + if err := writeBatch(); err != nil { + return err + } + } + + // Periodically release and re-grab the database iterator to avoid + // keeping a reference to an old database revision. + processedSinceIteratorRelease++ + if processedSinceIteratorRelease >= iteratorReleasePeriod { + if err := iterator.Error(); err != nil { + return err + } + + // The batch must be written here to avoid re-processing a block. + if err := writeBatch(); err != nil { + return err + } + + processedSinceIteratorRelease = 0 + iterator.Release() + // We specify the starting key of the iterator so that the + // underlying database doesn't need to scan over the, potentially + // not yet compacted, blocks we just deleted. + iterator = interval.GetBlockIteratorWithStart(db, height+1) + } + + if now := time.Now(); now.After(timeOfNextLog) { + var ( + numProcessed = totalNumberToProcess - tree.Len() + eta = timer.EstimateETA(startTime, numProcessed, totalNumberToProcess) + ) + log("executing blocks", + zap.Uint64("numExecuted", numProcessed), + zap.Uint64("numToExecute", totalNumberToProcess), + zap.Duration("eta", eta), + ) + timeOfNextLog = now.Add(logPeriod) + } + + if height <= lastAcceptedHeight { + continue + } + + if err := blk.Verify(ctx); err != nil { + return fmt.Errorf("failed to verify block %s (height=%d, parentID=%s) in bootstrapping: %w", + blk.ID(), + height, + blk.Parent(), + err, + ) + } + if err := blk.Accept(ctx); err != nil { + return fmt.Errorf("failed to accept block %s (height=%d, parentID=%s) in bootstrapping: %w", + blk.ID(), + height, + blk.Parent(), + err, + ) + } + } + if err := writeBatch(); err != nil { + return err + } + return iterator.Error() +} diff --git a/snow/engine/snowman/bootstrap/storage_test.go b/snow/engine/snowman/bootstrap/storage_test.go new file mode 100644 index 000000000000..a4373f3bbfdf --- /dev/null +++ b/snow/engine/snowman/bootstrap/storage_test.go @@ -0,0 +1,312 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bootstrap + +import ( + "bytes" + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap/interval" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ block.Parser = testParser(nil) + +func TestGetMissingBlockIDs(t *testing.T) { + blocks := snowmantest.BuildChain(7) + parser := makeParser(blocks) + + tests := []struct { + name string + blocks []snowman.Block + lastAcceptedHeight uint64 + expected set.Set[ids.ID] + }{ + { + name: "initially empty", + blocks: nil, + lastAcceptedHeight: 0, + expected: nil, + }, + { + name: "wants one block", + blocks: []snowman.Block{blocks[4]}, + lastAcceptedHeight: 0, + expected: set.Of(blocks[3].ID()), + }, + { + name: "wants multiple blocks", + blocks: []snowman.Block{blocks[2], blocks[4]}, + lastAcceptedHeight: 0, + expected: set.Of(blocks[1].ID(), blocks[3].ID()), + }, + { + name: "doesn't want last accepted block", + blocks: []snowman.Block{blocks[1]}, + lastAcceptedHeight: 0, + expected: nil, + }, + { + name: "doesn't want known block", + blocks: []snowman.Block{blocks[2], blocks[3]}, + lastAcceptedHeight: 0, + expected: set.Of(blocks[1].ID()), + }, + { + name: "doesn't want already accepted block", + blocks: []snowman.Block{blocks[1]}, + lastAcceptedHeight: 4, + expected: nil, + }, + { + name: "doesn't underflow", + blocks: []snowman.Block{blocks[0]}, + lastAcceptedHeight: 0, + expected: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + db := memdb.New() + tree, err := interval.NewTree(db) + require.NoError(err) + for _, blk := range test.blocks { + _, err := interval.Add(db, tree, 0, blk.Height(), blk.Bytes()) + require.NoError(err) + } + + missingBlockIDs, err := getMissingBlockIDs( + context.Background(), + db, + parser, + tree, + test.lastAcceptedHeight, + ) + require.NoError(err) + require.Equal(test.expected, missingBlockIDs) + }) + } +} + +func TestProcess(t *testing.T) { + blocks := snowmantest.BuildChain(7) + + tests := []struct { + name string + initialBlocks []snowman.Block + lastAcceptedHeight uint64 + missingBlockIDs set.Set[ids.ID] + blk snowman.Block + ancestors map[ids.ID]snowman.Block + expectedParentID ids.ID + expectedShouldFetchParentID bool + expectedMissingBlockIDs set.Set[ids.ID] + expectedTrackedHeights []uint64 + }{ + { + name: "add single block", + initialBlocks: nil, + lastAcceptedHeight: 0, + missingBlockIDs: set.Of(blocks[5].ID()), + blk: blocks[5], + ancestors: nil, + expectedParentID: blocks[4].ID(), + expectedShouldFetchParentID: true, + expectedMissingBlockIDs: set.Set[ids.ID]{}, + expectedTrackedHeights: []uint64{5}, + }, + { + name: "add multiple blocks", + initialBlocks: nil, + lastAcceptedHeight: 0, + missingBlockIDs: set.Of(blocks[5].ID()), + blk: blocks[5], + ancestors: map[ids.ID]snowman.Block{ + blocks[4].ID(): blocks[4], + }, + expectedParentID: blocks[3].ID(), + expectedShouldFetchParentID: true, + expectedMissingBlockIDs: set.Set[ids.ID]{}, + expectedTrackedHeights: []uint64{4, 5}, + }, + { + name: "ignore non-consecutive blocks", + initialBlocks: nil, + lastAcceptedHeight: 0, + missingBlockIDs: set.Of(blocks[3].ID(), blocks[5].ID()), + blk: blocks[5], + ancestors: map[ids.ID]snowman.Block{ + blocks[3].ID(): blocks[3], + }, + expectedParentID: blocks[4].ID(), + expectedShouldFetchParentID: true, + expectedMissingBlockIDs: set.Of(blocks[3].ID()), + expectedTrackedHeights: []uint64{5}, + }, + { + name: "do not request the last accepted block", + initialBlocks: nil, + lastAcceptedHeight: 2, + missingBlockIDs: set.Of(blocks[3].ID()), + blk: blocks[3], + ancestors: nil, + expectedParentID: ids.Empty, + expectedShouldFetchParentID: false, + expectedMissingBlockIDs: set.Set[ids.ID]{}, + expectedTrackedHeights: []uint64{3}, + }, + { + name: "do not request already known block", + initialBlocks: []snowman.Block{blocks[2]}, + lastAcceptedHeight: 0, + missingBlockIDs: set.Of(blocks[1].ID(), blocks[3].ID()), + blk: blocks[3], + ancestors: nil, + expectedParentID: ids.Empty, + expectedShouldFetchParentID: false, + expectedMissingBlockIDs: set.Of(blocks[1].ID()), + expectedTrackedHeights: []uint64{2, 3}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + db := memdb.New() + tree, err := interval.NewTree(db) + require.NoError(err) + for _, blk := range test.initialBlocks { + _, err := interval.Add(db, tree, 0, blk.Height(), blk.Bytes()) + require.NoError(err) + } + + parentID, shouldFetchParentID, err := process( + db, + tree, + test.missingBlockIDs, + test.lastAcceptedHeight, + test.blk, + test.ancestors, + ) + require.NoError(err) + require.Equal(test.expectedShouldFetchParentID, shouldFetchParentID) + require.Equal(test.expectedParentID, parentID) + require.Equal(test.expectedMissingBlockIDs, test.missingBlockIDs) + + require.Equal(uint64(len(test.expectedTrackedHeights)), tree.Len()) + for _, height := range test.expectedTrackedHeights { + require.True(tree.Contains(height)) + } + }) + } +} + +func TestExecute(t *testing.T) { + const numBlocks = 7 + + unhalted := &common.Halter{} + halted := &common.Halter{} + halted.Halt(context.Background()) + + tests := []struct { + name string + haltable common.Haltable + lastAcceptedHeight uint64 + expectedProcessingHeights []uint64 + expectedAcceptedHeights []uint64 + }{ + { + name: "execute everything", + haltable: unhalted, + lastAcceptedHeight: 0, + expectedProcessingHeights: nil, + expectedAcceptedHeights: []uint64{0, 1, 2, 3, 4, 5, 6}, + }, + { + name: "do not execute blocks accepted by height", + haltable: unhalted, + lastAcceptedHeight: 3, + expectedProcessingHeights: []uint64{1, 2, 3}, + expectedAcceptedHeights: []uint64{0, 4, 5, 6}, + }, + { + name: "do not execute blocks when halted", + haltable: halted, + lastAcceptedHeight: 0, + expectedProcessingHeights: []uint64{1, 2, 3, 4, 5, 6}, + expectedAcceptedHeights: []uint64{0}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + db := memdb.New() + tree, err := interval.NewTree(db) + require.NoError(err) + + blocks := snowmantest.BuildChain(numBlocks) + parser := makeParser(blocks) + for _, blk := range blocks { + _, err := interval.Add(db, tree, 0, blk.Height(), blk.Bytes()) + require.NoError(err) + } + + require.NoError(execute( + context.Background(), + test.haltable, + logging.NoLog{}.Info, + db, + parser, + tree, + test.lastAcceptedHeight, + )) + for _, height := range test.expectedProcessingHeights { + require.Equal(choices.Processing, blocks[height].Status()) + } + for _, height := range test.expectedAcceptedHeights { + require.Equal(choices.Accepted, blocks[height].Status()) + } + + if test.haltable.Halted() { + return + } + + size, err := database.Count(db) + require.NoError(err) + require.Zero(size) + }) + } +} + +type testParser func(context.Context, []byte) (snowman.Block, error) + +func (f testParser) ParseBlock(ctx context.Context, bytes []byte) (snowman.Block, error) { + return f(ctx, bytes) +} + +func makeParser(blocks []*snowmantest.Block) block.Parser { + return testParser(func(_ context.Context, b []byte) (snowman.Block, error) { + for _, block := range blocks { + if bytes.Equal(b, block.Bytes()) { + return block, nil + } + } + return nil, database.ErrNotFound + }) +} diff --git a/snow/engine/snowman/config_test.go b/snow/engine/snowman/config_test.go index fe66256c68db..555a580442c1 100644 --- a/snow/engine/snowman/config_test.go +++ b/snow/engine/snowman/config_test.go @@ -28,8 +28,7 @@ func DefaultConfig(t testing.TB) Config { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 100, MaxOutstandingItems: 1, diff --git a/snow/engine/snowman/engine.go b/snow/engine/snowman/engine.go deleted file mode 100644 index b5e3fb1020e3..000000000000 --- a/snow/engine/snowman/engine.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" -) - -// Engine describes the events that can occur to a Snowman instance. -// -// The engine is used to fetch, order, and decide on the fate of blocks. This -// engine runs the leaderless version of the Snowman consensus protocol. -// Therefore, the liveness of this protocol tolerant to O(sqrt(n)) Byzantine -// Nodes where n is the number of nodes in the network. Therefore, this protocol -// should only be run in a Crash Fault Tolerant environment, or in an -// environment where lose of liveness and manual intervention is tolerable. -type Engine interface { - common.Engine - block.Getter -} diff --git a/snow/engine/snowman/getter/getter.go b/snow/engine/snowman/getter/getter.go index b58d7eb87428..b501aeef2680 100644 --- a/snow/engine/snowman/getter/getter.go +++ b/snow/engine/snowman/getter/getter.go @@ -43,8 +43,7 @@ func New( var err error gh.getAncestorsBlks, err = metric.NewAverager( - "bs", - "get_ancestors_blks", + "bs_get_ancestors_blks", "blocks fetched in a call to GetAncestors", reg, ) diff --git a/snow/engine/snowman/getter/getter_test.go b/snow/engine/snowman/getter/getter_test.go index 7d6482a1d3c3..cf58841b9581 100644 --- a/snow/engine/snowman/getter/getter_test.go +++ b/snow/engine/snowman/getter/getter_test.go @@ -14,8 +14,8 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/logging" @@ -77,30 +77,23 @@ func TestFilterAccepted(t *testing.T) { require := require.New(t) bs, vm, sender := newTest(t) - blkID0 := ids.GenerateTestID() - blkID1 := ids.GenerateTestID() - blkID2 := ids.GenerateTestID() + acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) + require.NoError(acceptedBlk.Accept(context.Background())) - blk0 := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: blkID0, - StatusV: choices.Accepted, - }} - blk1 := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: blkID1, - StatusV: choices.Accepted, - }} + unknownBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case blkID0: - return blk0, nil - case blkID1: - return blk1, nil - case blkID2: + case snowmantest.GenesisID: + return snowmantest.Genesis, nil + case acceptedBlk.ID(): + return acceptedBlk, nil + case unknownBlkID: + return nil, errUnknownBlock + default: + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock } var accepted []ids.ID @@ -108,11 +101,11 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } - blkIDs := set.Of(blkID0, blkID1, blkID2) + blkIDs := set.Of(snowmantest.GenesisID, acceptedBlk.ID(), unknownBlkID) require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs)) require.Len(accepted, 2) - require.Contains(accepted, blkID0) - require.Contains(accepted, blkID1) - require.NotContains(accepted, blkID2) + require.Contains(accepted, snowmantest.GenesisID) + require.Contains(accepted, acceptedBlk.ID()) + require.NotContains(accepted, unknownBlkID) } diff --git a/snow/engine/snowman/issuer.go b/snow/engine/snowman/issuer.go index d952dfe2cc6b..0a9069d00173 100644 --- a/snow/engine/snowman/issuer.go +++ b/snow/engine/snowman/issuer.go @@ -10,50 +10,30 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/snow/engine/snowman/job" ) +var _ job.Job[ids.ID] = (*issuer)(nil) + // issuer issues [blk] into to consensus after its dependencies are met. type issuer struct { t *Transitive nodeID ids.NodeID // nodeID of the peer that provided this block blk snowman.Block - issuedMetric prometheus.Counter - abandoned bool - deps set.Set[ids.ID] push bool + issuedMetric prometheus.Counter } -func (i *issuer) Dependencies() set.Set[ids.ID] { - return i.deps -} - -// Mark that a dependency has been met -func (i *issuer) Fulfill(ctx context.Context, id ids.ID) { - i.deps.Remove(id) - i.Update(ctx) -} - -// Abandon the attempt to issue [i.block] -func (i *issuer) Abandon(ctx context.Context, _ ids.ID) { - if !i.abandoned { - blkID := i.blk.ID() - i.t.removeFromPending(i.blk) - i.t.addToNonVerifieds(i.blk) - i.t.blocked.Abandon(ctx, blkID) - - // Tracks performance statistics - i.t.metrics.numRequests.Set(float64(i.t.blkReqs.Len())) - i.t.metrics.numBlocked.Set(float64(len(i.t.pending))) - i.t.metrics.numBlockers.Set(float64(i.t.blocked.Len())) +func (i *issuer) Execute(ctx context.Context, _ []ids.ID, abandoned []ids.ID) error { + if len(abandoned) == 0 { + // If the parent block wasn't abandoned, this block can be issued. + return i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric) } - i.abandoned = true -} -func (i *issuer) Update(ctx context.Context) { - if i.abandoned || i.deps.Len() != 0 || i.t.errs.Errored() { - return - } - // Issue the block into consensus - i.t.errs.Add(i.t.deliver(ctx, i.nodeID, i.blk, i.push, i.issuedMetric)) + // If the parent block was abandoned, this block should be abandoned as + // well. + blkID := i.blk.ID() + delete(i.t.pending, blkID) + i.t.addToNonVerifieds(i.blk) + return i.t.blocked.Abandon(ctx, blkID) } diff --git a/snow/engine/snowman/job/scheduler.go b/snow/engine/snowman/job/scheduler.go new file mode 100644 index 000000000000..e05f27130dec --- /dev/null +++ b/snow/engine/snowman/job/scheduler.go @@ -0,0 +1,109 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Package job provides a Scheduler to manage and execute Jobs with +// dependencies. +package job + +import "context" + +// Job is a unit of work that can be executed based on the result of resolving +// requested dependencies. +type Job[T any] interface { + Execute(ctx context.Context, fulfilled []T, abandoned []T) error +} + +type job[T comparable] struct { + // Once all dependencies are resolved, the job will be executed. + numUnresolved int + fulfilled []T + abandoned []T + job Job[T] +} + +// Scheduler implements a dependency graph for jobs. Jobs can be registered with +// dependencies, and once all dependencies are resolved, the job will be +// executed. +type Scheduler[T comparable] struct { + // dependents maps a dependency to the jobs that depend on it. + dependents map[T][]*job[T] +} + +func NewScheduler[T comparable]() *Scheduler[T] { + return &Scheduler[T]{ + dependents: make(map[T][]*job[T]), + } +} + +// Schedule a job to be executed once all of its dependencies are resolved. If a +// job is scheduled with no dependencies, it's executed immediately. +// +// In order to prevent a memory leak, all dependencies must eventually either be +// fulfilled or abandoned. +// +// While registering a job with duplicate dependencies is discouraged, it is +// allowed. +func (s *Scheduler[T]) Schedule(ctx context.Context, userJob Job[T], dependencies ...T) error { + numUnresolved := len(dependencies) + if numUnresolved == 0 { + return userJob.Execute(ctx, nil, nil) + } + + j := &job[T]{ + numUnresolved: numUnresolved, + job: userJob, + } + for _, d := range dependencies { + s.dependents[d] = append(s.dependents[d], j) + } + return nil +} + +// NumDependencies returns the number of dependencies that jobs are currently +// blocking on. +func (s *Scheduler[_]) NumDependencies() int { + return len(s.dependents) +} + +// Fulfill a dependency. If all dependencies for a job are resolved, the job +// will be executed. +// +// It is safe to call the scheduler during the execution of a job. +func (s *Scheduler[T]) Fulfill(ctx context.Context, dependency T) error { + return s.resolveDependency(ctx, dependency, true) +} + +// Abandon a dependency. If all dependencies for a job are resolved, the job +// will be executed. +// +// It is safe to call the scheduler during the execution of a job. +func (s *Scheduler[T]) Abandon(ctx context.Context, dependency T) error { + return s.resolveDependency(ctx, dependency, false) +} + +func (s *Scheduler[T]) resolveDependency( + ctx context.Context, + dependency T, + fulfilled bool, +) error { + jobs := s.dependents[dependency] + delete(s.dependents, dependency) + + for _, job := range jobs { + job.numUnresolved-- + if fulfilled { + job.fulfilled = append(job.fulfilled, dependency) + } else { + job.abandoned = append(job.abandoned, dependency) + } + + if job.numUnresolved > 0 { + continue + } + + if err := job.job.Execute(ctx, job.fulfilled, job.abandoned); err != nil { + return err + } + } + return nil +} diff --git a/snow/engine/snowman/job/scheduler_test.go b/snow/engine/snowman/job/scheduler_test.go new file mode 100644 index 000000000000..db6502c5f74c --- /dev/null +++ b/snow/engine/snowman/job/scheduler_test.go @@ -0,0 +1,338 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package job + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + depToResolve = iota + depToNeglect +) + +var errDuplicateExecution = errors.New("job already executed") + +type testJob struct { + calledExecute bool + fulfilled []int + abandoned []int +} + +func (j *testJob) Execute(_ context.Context, fulfilled []int, abandoned []int) error { + if j.calledExecute { + return errDuplicateExecution + } + j.calledExecute = true + j.fulfilled = fulfilled + j.abandoned = abandoned + return nil +} + +func (j *testJob) reset() { + j.calledExecute = false + j.fulfilled = nil + j.abandoned = nil +} + +func newSchedulerWithJob[T comparable]( + t *testing.T, + job Job[T], + dependencies []T, + fulfilled []T, + abandoned []T, +) *Scheduler[T] { + s := NewScheduler[T]() + require.NoError(t, s.Schedule(context.Background(), job, dependencies...)) + for _, d := range fulfilled { + require.NoError(t, s.Fulfill(context.Background(), d)) + } + for _, d := range abandoned { + require.NoError(t, s.Abandon(context.Background(), d)) + } + return s +} + +func TestScheduler_Schedule(t *testing.T) { + userJob := &testJob{} + tests := []struct { + name string + scheduler *Scheduler[int] + dependencies []int + expectedExecuted bool + expectedNumDependencies int + expectedScheduler *Scheduler[int] + }{ + { + name: "no dependencies", + scheduler: NewScheduler[int](), + dependencies: nil, + expectedExecuted: true, + expectedNumDependencies: 0, + expectedScheduler: NewScheduler[int](), + }, + { + name: "one dependency", + scheduler: NewScheduler[int](), + dependencies: []int{depToResolve}, + expectedExecuted: false, + expectedNumDependencies: 1, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToResolve: { + { + numUnresolved: 1, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + { + name: "two dependencies", + scheduler: NewScheduler[int](), + dependencies: []int{depToResolve, depToNeglect}, + expectedExecuted: false, + expectedNumDependencies: 2, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToResolve: { + { + numUnresolved: 2, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + depToNeglect: { + { + numUnresolved: 2, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + { + name: "additional dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve}, nil, nil), + dependencies: []int{depToResolve}, + expectedExecuted: false, + expectedNumDependencies: 1, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToResolve: { + { + numUnresolved: 1, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + { + numUnresolved: 1, + fulfilled: nil, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + // Reset the variable between tests + userJob.reset() + + require.NoError(test.scheduler.Schedule(context.Background(), userJob, test.dependencies...)) + require.Equal(test.expectedNumDependencies, test.scheduler.NumDependencies()) + require.Equal(test.expectedExecuted, userJob.calledExecute) + require.Empty(userJob.fulfilled) + require.Empty(userJob.abandoned) + require.Equal(test.expectedScheduler, test.scheduler) + }) + } +} + +func TestScheduler_Fulfill(t *testing.T) { + userJob := &testJob{} + tests := []struct { + name string + scheduler *Scheduler[int] + expectedExecuted bool + expectedFulfilled []int + expectedAbandoned []int + expectedScheduler *Scheduler[int] + }{ + { + name: "no jobs", + scheduler: NewScheduler[int](), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "single dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: []int{depToResolve}, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "non-existent dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + }, + { + name: "incomplete dependencies", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToNeglect: { + { + numUnresolved: 1, + fulfilled: []int{depToResolve}, + abandoned: nil, + job: userJob, + }, + }, + }, + }, + }, + { + name: "duplicate dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: []int{depToResolve, depToResolve}, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "previously abandoned", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, nil, []int{depToNeglect}), + expectedExecuted: true, + expectedFulfilled: []int{depToResolve}, + expectedAbandoned: []int{depToNeglect}, + expectedScheduler: NewScheduler[int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + // Reset the variable between tests + userJob.reset() + + require.NoError(test.scheduler.Fulfill(context.Background(), depToResolve)) + require.Equal(test.expectedExecuted, userJob.calledExecute) + require.Equal(test.expectedFulfilled, userJob.fulfilled) + require.Equal(test.expectedAbandoned, userJob.abandoned) + require.Equal(test.expectedScheduler, test.scheduler) + }) + } +} + +func TestScheduler_Abandon(t *testing.T) { + userJob := &testJob{} + tests := []struct { + name string + scheduler *Scheduler[int] + expectedExecuted bool + expectedFulfilled []int + expectedAbandoned []int + expectedScheduler *Scheduler[int] + }{ + { + name: "no jobs", + scheduler: NewScheduler[int](), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: NewScheduler[int](), + }, + { + name: "single dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: nil, + expectedAbandoned: []int{depToResolve}, + expectedScheduler: NewScheduler[int](), + }, + { + name: "non-existent dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: newSchedulerWithJob(t, userJob, []int{depToNeglect}, nil, nil), + }, + { + name: "incomplete dependencies", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, nil, nil), + expectedExecuted: false, + expectedFulfilled: nil, + expectedAbandoned: nil, + expectedScheduler: &Scheduler[int]{ + dependents: map[int][]*job[int]{ + depToNeglect: { + { + numUnresolved: 1, + fulfilled: nil, + abandoned: []int{depToResolve}, + job: userJob, + }, + }, + }, + }, + }, + { + name: "duplicate dependency", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToResolve}, nil, nil), + expectedExecuted: true, + expectedFulfilled: nil, + expectedAbandoned: []int{depToResolve, depToResolve}, + expectedScheduler: NewScheduler[int](), + }, + { + name: "previously fulfilled", + scheduler: newSchedulerWithJob(t, userJob, []int{depToResolve, depToNeglect}, []int{depToNeglect}, nil), + expectedExecuted: true, + expectedFulfilled: []int{depToNeglect}, + expectedAbandoned: []int{depToResolve}, + expectedScheduler: NewScheduler[int](), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + // Reset the variable between tests + userJob.reset() + + require.NoError(test.scheduler.Abandon(context.Background(), depToResolve)) + require.Equal(test.expectedExecuted, userJob.calledExecute) + require.Equal(test.expectedFulfilled, userJob.fulfilled) + require.Equal(test.expectedAbandoned, userJob.abandoned) + require.Equal(test.expectedScheduler, test.scheduler) + }) + } +} diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go index 5dd65d8afa14..922b18200d47 100644 --- a/snow/engine/snowman/metrics.go +++ b/snow/engine/snowman/metrics.go @@ -13,7 +13,6 @@ import ( const ( pullGossipSource = "pull_gossip" pushGossipSource = "push_gossip" - putGossipSource = "put_gossip" builtSource = "built" unknownSource = "unknown" ) @@ -39,109 +38,92 @@ type metrics struct { issued *prometheus.CounterVec } -func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error { +func newMetrics(reg prometheus.Registerer) (*metrics, error) { errs := wrappers.Errs{} - m.bootstrapFinished = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bootstrap_finished", - Help: "Whether or not bootstrap process has completed. 1 is success, 0 is fail or ongoing.", - }) - m.numRequests = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "requests", - Help: "Number of outstanding block requests", - }) - m.numBlocked = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blocked", - Help: "Number of blocks that are pending issuance", - }) - m.numBlockers = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "blockers", - Help: "Number of blocks that are blocking other blocks from being issued because they haven't been issued", - }) - m.numNonVerifieds = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "non_verified_blks", - Help: "Number of non-verified blocks in the memory", - }) - m.numBuilt = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "blks_built", - Help: "Number of blocks that have been built locally", - }) - m.numBuildsFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "blk_builds_failed", - Help: "Number of BuildBlock calls that have failed", - }) - m.numUselessPutBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_put_bytes", - Help: "Amount of useless bytes received in Put messages", - }) - m.numUselessPushQueryBytes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_push_query_bytes", - Help: "Amount of useless bytes received in PushQuery messages", - }) - m.numMissingAcceptedBlocks = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_missing_accepted_blocks", - Help: "Number of times an accepted block height was referenced and it wasn't locally available", - }) - m.numProcessingAncestorFetchesFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_failed", - Help: "Number of votes that were dropped due to unknown blocks", - }) - m.numProcessingAncestorFetchesDropped = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_dropped", - Help: "Number of votes that were dropped due to decided blocks", - }) - m.numProcessingAncestorFetchesSucceeded = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_succeeded", - Help: "Number of votes that were applied to ancestor blocks", - }) - m.numProcessingAncestorFetchesUnneeded = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_processing_ancestor_fetches_unneeded", - Help: "Number of votes that were directly applied to blocks", - }) - m.getAncestorsBlks = metric.NewAveragerWithErrs( - namespace, - "get_ancestors_blks", - "blocks fetched in a call to GetAncestors", - reg, - &errs, - ) - m.selectedVoteIndex = metric.NewAveragerWithErrs( - namespace, - "selected_vote_index", - "index of the voteID that was passed into consensus", - reg, - &errs, - ) - m.issuerStake = metric.NewAveragerWithErrs( - namespace, - "issuer_stake", - "stake weight of the peer who provided a block that was issued into consensus", - reg, - &errs, - ) - m.issued = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Name: "blks_issued", - Help: "number of blocks that have been issued into consensus by discovery mechanism", - }, []string{"source"}) + m := &metrics{ + bootstrapFinished: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "bootstrap_finished", + Help: "Whether or not bootstrap process has completed. 1 is success, 0 is fail or ongoing.", + }), + numRequests: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "requests", + Help: "Number of outstanding block requests", + }), + numBlocked: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "blocked", + Help: "Number of blocks that are pending issuance", + }), + numBlockers: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "blockers", + Help: "Number of blocks that are blocking other blocks from being issued because they haven't been issued", + }), + numNonVerifieds: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "non_verified_blks", + Help: "Number of non-verified blocks in the memory", + }), + numBuilt: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "blks_built", + Help: "Number of blocks that have been built locally", + }), + numBuildsFailed: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "blk_builds_failed", + Help: "Number of BuildBlock calls that have failed", + }), + numUselessPutBytes: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_useless_put_bytes", + Help: "Amount of useless bytes received in Put messages", + }), + numUselessPushQueryBytes: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_useless_push_query_bytes", + Help: "Amount of useless bytes received in PushQuery messages", + }), + numMissingAcceptedBlocks: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_missing_accepted_blocks", + Help: "Number of times an accepted block height was referenced and it wasn't locally available", + }), + numProcessingAncestorFetchesFailed: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_processing_ancestor_fetches_failed", + Help: "Number of votes that were dropped due to unknown blocks", + }), + numProcessingAncestorFetchesDropped: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_processing_ancestor_fetches_dropped", + Help: "Number of votes that were dropped due to decided blocks", + }), + numProcessingAncestorFetchesSucceeded: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_processing_ancestor_fetches_succeeded", + Help: "Number of votes that were applied to ancestor blocks", + }), + numProcessingAncestorFetchesUnneeded: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "num_processing_ancestor_fetches_unneeded", + Help: "Number of votes that were directly applied to blocks", + }), + getAncestorsBlks: metric.NewAveragerWithErrs( + "get_ancestors_blks", + "blocks fetched in a call to GetAncestors", + reg, + &errs, + ), + selectedVoteIndex: metric.NewAveragerWithErrs( + "selected_vote_index", + "index of the voteID that was passed into consensus", + reg, + &errs, + ), + issuerStake: metric.NewAveragerWithErrs( + "issuer_stake", + "stake weight of the peer who provided a block that was issued into consensus", + reg, + &errs, + ), + issued: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "blks_issued", + Help: "number of blocks that have been issued into consensus by discovery mechanism", + }, []string{"source"}), + } // Register the labels m.issued.WithLabelValues(pullGossipSource) m.issued.WithLabelValues(pushGossipSource) - m.issued.WithLabelValues(putGossipSource) m.issued.WithLabelValues(builtSource) m.issued.WithLabelValues(unknownSource) @@ -162,5 +144,5 @@ func (m *metrics) Initialize(namespace string, reg prometheus.Registerer) error reg.Register(m.numProcessingAncestorFetchesUnneeded), reg.Register(m.issued), ) - return errs.Err + return m, errs.Err } diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index 1ec1e67021bc..2ee745bb5c64 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -116,7 +116,7 @@ func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) @@ -159,7 +159,7 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -197,7 +197,7 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -228,7 +228,7 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, _, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -267,7 +267,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -357,7 +357,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -426,7 +426,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -509,7 +509,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -598,7 +598,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -669,7 +669,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -786,7 +786,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) @@ -890,7 +890,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) @@ -1035,7 +1035,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) @@ -1141,7 +1141,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, fullVM, sender := buildTestsObjects(t, ctx, startup, beacons, alpha) @@ -1286,7 +1286,7 @@ func TestStateSyncIsDoneOnceVMNotifies(t *testing.T) { peers := tracker.NewPeers() startup := tracker.NewStartup(peers, startupAlpha) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) syncer, _, _ := buildTestsObjects(t, ctx, startup, beacons, (totalWeight+1)/2) diff --git a/snow/engine/snowman/test_engine.go b/snow/engine/snowman/test_engine.go deleted file mode 100644 index eada8463a041..000000000000 --- a/snow/engine/snowman/test_engine.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "context" - "errors" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/common" -) - -var ( - _ Engine = (*EngineTest)(nil) - - errGetBlock = errors.New("unexpectedly called GetBlock") -) - -// EngineTest is a test engine -type EngineTest struct { - common.EngineTest - - CantGetBlock bool - GetBlockF func(context.Context, ids.ID) (snowman.Block, error) -} - -func (e *EngineTest) Default(cant bool) { - e.EngineTest.Default(cant) - e.CantGetBlock = false -} - -func (e *EngineTest) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { - if e.GetBlockF != nil { - return e.GetBlockF(ctx, blkID) - } - if e.CantGetBlock && e.T != nil { - require.FailNow(e.T, errGetBlock.Error()) - } - return nil, errGetBlock -} diff --git a/snow/engine/snowman/traced_engine.go b/snow/engine/snowman/traced_engine.go deleted file mode 100644 index e2306dcd1349..000000000000 --- a/snow/engine/snowman/traced_engine.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/trace" - - oteltrace "go.opentelemetry.io/otel/trace" -) - -var _ Engine = (*tracedEngine)(nil) - -type tracedEngine struct { - common.Engine - engine Engine - tracer trace.Tracer -} - -func TraceEngine(engine Engine, tracer trace.Tracer) Engine { - return &tracedEngine{ - Engine: common.TraceEngine(engine, tracer), - engine: engine, - tracer: tracer, - } -} - -func (e *tracedEngine) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { - ctx, span := e.tracer.Start(ctx, "tracedEngine.GetBlock", oteltrace.WithAttributes( - attribute.Stringer("blkID", blkID), - )) - defer span.End() - - return e.engine.GetBlock(ctx, blkID) -} diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index de39295e71d0..40106424f12e 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -15,13 +15,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/consensus/snowman/poll" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/ancestor" - "github.com/ava-labs/avalanchego/snow/event" + "github.com/ava-labs/avalanchego/snow/engine/snowman/job" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/bimap" @@ -30,23 +29,11 @@ import ( "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" ) -const ( - nonVerifiedCacheSize = 64 * units.MiB +const nonVerifiedCacheSize = 64 * units.MiB - // putGossipPeriod specifies the number of times Gossip will be called per - // Put gossip. This is done to avoid splitting Gossip into multiple - // functions and to allow more frequent pull gossip than push gossip. - putGossipPeriod = 10 -) - -var _ Engine = (*Transitive)(nil) - -func New(config Config) (Engine, error) { - return newTransitive(config) -} +var _ common.Engine = (*Transitive)(nil) func cachedBlockSize(_ ids.ID, blk snowman.Block) int { return ids.IDLen + len(blk.Bytes()) + constants.PointerOverhead @@ -56,7 +43,7 @@ func cachedBlockSize(_ ids.ID, blk snowman.Block) int { // Transitive dependencies. type Transitive struct { Config - metrics + *metrics // list of NoOpsHandler for messages dropped by engine common.StateSummaryFrontierHandler @@ -69,8 +56,6 @@ type Transitive struct { requestID uint32 - gossipCounter int - // track outstanding preference requests polls poll.Set @@ -96,17 +81,14 @@ type Transitive struct { // operations that are blocked on a block being issued. This could be // issuing another block, responding to a query, or applying votes to consensus - blocked event.Blocker + blocked *job.Scheduler[ids.ID] // number of times build block needs to be called once the number of // processing blocks has gone below the optimal number. pendingBuildBlocks int - - // errs tracks if an error has occurred in a callback - errs wrappers.Errs } -func newTransitive(config Config) (*Transitive, error) { +func New(config Config) (*Transitive, error) { config.Ctx.Log.Info("initializing consensus engine") nonVerifiedCache, err := metercacher.New[ids.ID, snowman.Block]( @@ -122,24 +104,33 @@ func newTransitive(config Config) (*Transitive, error) { } acceptedFrontiers := tracker.NewAccepted() - config.Validators.RegisterCallbackListener(config.Ctx.SubnetID, acceptedFrontiers) + config.Validators.RegisterSetCallbackListener(config.Ctx.SubnetID, acceptedFrontiers) - factory := poll.NewEarlyTermNoTraversalFactory( + factory, err := poll.NewEarlyTermNoTraversalFactory( config.Params.AlphaPreference, config.Params.AlphaConfidence, + config.Ctx.Registerer, ) + if err != nil { + return nil, err + } polls, err := poll.NewSet( factory, config.Ctx.Log, - "", config.Ctx.Registerer, ) if err != nil { return nil, err } - t := &Transitive{ + metrics, err := newMetrics(config.Ctx.Registerer) + if err != nil { + return nil, err + } + + return &Transitive{ Config: config, + metrics: metrics, StateSummaryFrontierHandler: common.NewNoOpStateSummaryFrontierHandler(config.Ctx.Log), AcceptedStateSummaryHandler: common.NewNoOpAcceptedStateSummaryHandler(config.Ctx.Log), AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), @@ -151,51 +142,16 @@ func newTransitive(config Config) (*Transitive, error) { nonVerifieds: ancestor.NewTree(), nonVerifiedCache: nonVerifiedCache, acceptedFrontiers: acceptedFrontiers, + blocked: job.NewScheduler[ids.ID](), polls: polls, blkReqs: bimap.New[common.Request, ids.ID](), blkReqSourceMetric: make(map[common.Request]prometheus.Counter), - } - - return t, t.metrics.Initialize("", config.Ctx.Registerer) + }, nil } func (t *Transitive) Gossip(ctx context.Context) error { lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() - if numProcessing := t.Consensus.NumProcessing(); numProcessing == 0 { - t.Ctx.Log.Verbo("sampling from validators", - zap.Stringer("validators", t.Validators), - ) - - // Uniform sampling is used here to reduce bandwidth requirements of - // nodes with a large amount of stake weight. - vdrID, ok := t.ConnectedValidators.SampleValidator() - if !ok { - t.Ctx.Log.Warn("skipping block gossip", - zap.String("reason", "no connected validators"), - ) - return nil - } - - nextHeightToAccept, err := math.Add64(lastAcceptedHeight, 1) - if err != nil { - t.Ctx.Log.Error("skipping block gossip", - zap.String("reason", "block height overflow"), - zap.Stringer("blkID", lastAcceptedID), - zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), - zap.Error(err), - ) - return nil - } - - t.requestID++ - t.Sender.SendPullQuery( - ctx, - set.Of(vdrID), - t.requestID, - t.Consensus.Preference(), - nextHeightToAccept, - ) - } else { + if numProcessing := t.Consensus.NumProcessing(); numProcessing != 0 { t.Ctx.Log.Debug("skipping block gossip", zap.String("reason", "blocks currently processing"), zap.Int("numProcessing", numProcessing), @@ -205,28 +161,42 @@ func (t *Transitive) Gossip(ctx context.Context) error { // when attempting to issue a query. This can happen if a subnet was // temporarily misconfigured and there were no validators. t.repoll(ctx) + return nil } - // TODO: Remove periodic push gossip after v1.11.x is activated - t.gossipCounter++ - t.gossipCounter %= putGossipPeriod - if t.gossipCounter > 0 { + t.Ctx.Log.Verbo("sampling from validators", + zap.Stringer("validators", t.Validators), + ) + + // Uniform sampling is used here to reduce bandwidth requirements of + // nodes with a large amount of stake weight. + vdrID, ok := t.ConnectedValidators.SampleValidator() + if !ok { + t.Ctx.Log.Warn("skipping block gossip", + zap.String("reason", "no connected validators"), + ) return nil } - lastAccepted, err := t.GetBlock(ctx, lastAcceptedID) + nextHeightToAccept, err := math.Add64(lastAcceptedHeight, 1) if err != nil { - t.Ctx.Log.Warn("dropping gossip request", - zap.String("reason", "block couldn't be loaded"), + t.Ctx.Log.Error("skipping block gossip", + zap.String("reason", "block height overflow"), zap.Stringer("blkID", lastAcceptedID), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), zap.Error(err), ) return nil } - t.Ctx.Log.Verbo("gossiping accepted block to the network", - zap.Stringer("blkID", lastAcceptedID), + + t.requestID++ + t.Sender.SendPullQuery( + ctx, + set.Of(vdrID), + t.requestID, + t.Consensus.Preference(), + nextHeightToAccept, ) - t.Sender.SendGossip(ctx, lastAccepted.Bytes()) return nil } @@ -276,8 +246,6 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 } issuedMetric = t.blkReqSourceMetric[req] - case requestID == constants.GossipMsgRequestID: - issuedMetric = t.metrics.issued.WithLabelValues(putGossipSource) default: // This can happen if this block was provided to this engine while a Get // request was outstanding. For example, the block may have been locally @@ -288,7 +256,7 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 issuedMetric = t.metrics.issued.WithLabelValues(unknownSource) } - if t.wasIssued(blk) { + if !t.shouldIssueBlock(blk) { t.metrics.numUselessPutBytes.Add(float64(len(blkBytes))) } @@ -297,10 +265,10 @@ func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { + if err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } - return t.buildBlocks(ctx) + return t.executeDeferredWork(ctx) } func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { @@ -321,11 +289,12 @@ func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID } delete(t.blkReqSourceMetric, req) - // Because the get request was dropped, we no longer expect blkID to be issued. - t.blocked.Abandon(ctx, blkID) - t.metrics.numRequests.Set(float64(t.blkReqs.Len())) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.buildBlocks(ctx) + // Because the get request was dropped, we no longer expect blkID to be + // issued. + if err := t.blocked.Abandon(ctx, blkID); err != nil { + return err + } + return t.executeDeferredWork(ctx) } func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID, requestedHeight uint64) error { @@ -335,11 +304,11 @@ func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID // Try to issue [blkID] to consensus. // If we're missing an ancestor, request it from [vdr] - if _, err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { + if err := t.issueFromByID(ctx, nodeID, blkID, issuedMetric); err != nil { return err } - return t.buildBlocks(ctx) + return t.executeDeferredWork(ctx) } func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte, requestedHeight uint64) error { @@ -365,7 +334,7 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID return nil } - if t.wasIssued(blk) { + if !t.shouldIssueBlock(blk) { t.metrics.numUselessPushQueryBytes.Add(float64(len(blkBytes))) } @@ -376,11 +345,11 @@ func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { + if err := t.issueFrom(ctx, nodeID, blk, issuedMetric); err != nil { return err } - return t.buildBlocks(ctx) + return t.executeDeferredWork(ctx) } func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDAtHeight ids.ID, acceptedID ids.ID) error { @@ -395,14 +364,12 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin ) issuedMetric := t.metrics.issued.WithLabelValues(pullGossipSource) - - addedPreferred, err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric) - if err != nil { + if err := t.issueFromByID(ctx, nodeID, preferredID, issuedMetric); err != nil { return err } var ( - addedPreferredIDAtHeight = addedPreferred + preferredIDAtHeightShouldBlock bool // Invariant: The order of [responseOptions] must be [preferredID] then // (optionally) [preferredIDAtHeight]. During vote application, the // first vote that can be applied will be used. So, the votes should be @@ -410,10 +377,10 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin responseOptions = []ids.ID{preferredID} ) if preferredID != preferredIDAtHeight { - addedPreferredIDAtHeight, err = t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric) - if err != nil { + if err := t.issueFromByID(ctx, nodeID, preferredIDAtHeight, issuedMetric); err != nil { return err } + preferredIDAtHeightShouldBlock = t.canDependOn(preferredIDAtHeight) responseOptions = append(responseOptions, preferredIDAtHeight) } @@ -421,23 +388,25 @@ func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin // issued into consensus v := &voter{ t: t, - vdr: nodeID, + nodeID: nodeID, requestID: requestID, responseOptions: responseOptions, } // Wait until [preferredID] and [preferredIDAtHeight] have been issued to // consensus before applying this chit. - if !addedPreferred { - v.deps.Add(preferredID) + var deps []ids.ID + if t.canDependOn(preferredID) { + deps = append(deps, preferredID) } - if !addedPreferredIDAtHeight { - v.deps.Add(preferredIDAtHeight) + if preferredIDAtHeightShouldBlock { + deps = append(deps, preferredIDAtHeight) } - t.blocked.Register(ctx, v) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.buildBlocks(ctx) + if err := t.blocked.Schedule(ctx, v, deps...); err != nil { + return err + } + return t.executeDeferredWork(ctx) } func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { @@ -446,16 +415,15 @@ func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, request return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted, lastAccepted) } - t.blocked.Register( - ctx, - &voter{ - t: t, - vdr: nodeID, - requestID: requestID, - }, - ) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.buildBlocks(ctx) + v := &voter{ + t: t, + nodeID: nodeID, + requestID: requestID, + } + if err := t.blocked.Schedule(ctx, v); err != nil { + return err + } + return t.executeDeferredWork(ctx) } func (*Transitive) Timeout(context.Context) error { @@ -478,7 +446,7 @@ func (t *Transitive) Notify(ctx context.Context, msg common.Message) error { case common.PendingTxs: // the pending txs message means we should attempt to build a block. t.pendingBuildBlocks++ - return t.buildBlocks(ctx) + return t.executeDeferredWork(ctx) case common.StateSyncDone: t.Ctx.StateSyncing.Set(false) return nil @@ -501,7 +469,7 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { return err } - lastAccepted, err := t.GetBlock(ctx, lastAcceptedID) + lastAccepted, err := t.getBlock(ctx, lastAcceptedID) if err != nil { t.Ctx.Log.Error("failed to get last accepted block", zap.Error(err), @@ -510,7 +478,8 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { } // initialize consensus to the last accepted blockID - if err := t.Consensus.Initialize(t.Ctx, t.Params, lastAcceptedID, lastAccepted.Height(), lastAccepted.Timestamp()); err != nil { + lastAcceptedHeight := lastAccepted.Height() + if err := t.Consensus.Initialize(t.Ctx, t.Params, lastAcceptedID, lastAcceptedHeight, lastAccepted.Timestamp()); err != nil { return err } @@ -540,8 +509,9 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { return err } - t.Ctx.Log.Info("consensus starting", - zap.Stringer("lastAcceptedBlock", lastAcceptedID), + t.Ctx.Log.Info("starting consensus", + zap.Stringer("lastAcceptedID", lastAcceptedID), + zap.Uint64("lastAcceptedHeight", lastAcceptedHeight), ) t.metrics.bootstrapFinished.Set(1) @@ -553,7 +523,7 @@ func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { return fmt.Errorf("failed to notify VM that consensus is starting: %w", err) } - return nil + return t.executeDeferredWork(ctx) } func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { @@ -562,10 +532,9 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { t.Ctx.Log.Verbo("running health check", zap.Uint32("requestID", t.requestID), - zap.Int("gossipCounter", t.gossipCounter), zap.Stringer("polls", t.polls), zap.Reflect("outstandingBlockRequests", t.blkReqs), - zap.Stringer("blockedJobs", &t.blocked), + zap.Int("numMissingDependencies", t.blocked.NumDependencies()), zap.Int("pendingBuildBlocks", t.pendingBuildBlocks), ) @@ -584,7 +553,19 @@ func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { return intf, fmt.Errorf("vm: %w ; consensus: %w", vmErr, consensusErr) } -func (t *Transitive) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { +func (t *Transitive) executeDeferredWork(ctx context.Context) error { + if err := t.buildBlocks(ctx); err != nil { + return err + } + + t.metrics.numRequests.Set(float64(t.blkReqs.Len())) + t.metrics.numBlocked.Set(float64(len(t.pending))) + t.metrics.numBlockers.Set(float64(t.blocked.NumDependencies())) + t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) + return nil +} + +func (t *Transitive) getBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { if blk, ok := t.pending[blkID]; ok { return blk, nil } @@ -665,9 +646,6 @@ func (t *Transitive) sendChits(ctx context.Context, nodeID ids.NodeID, requestID // Build blocks if they have been requested and the number of processing blocks // is less than optimal. func (t *Transitive) buildBlocks(ctx context.Context) error { - if err := t.errs.Err; err != nil { - return err - } for t.pendingBuildBlocks > 0 && t.Consensus.NumProcessing() < t.Params.OptimalProcessing { t.pendingBuildBlocks-- @@ -681,15 +659,6 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { } t.numBuilt.Inc() - // a newly created block is expected to be processing. If this check - // fails, there is potentially an error in the VM this engine is running - if status := blk.Status(); status != choices.Processing { - t.Ctx.Log.Warn("attempting to issue block with unexpected status", - zap.Stringer("expectedStatus", choices.Processing), - zap.Stringer("status", status), - ) - } - // The newly created block should be built on top of the preferred block. // Otherwise, the new block doesn't have the best chance of being confirmed. parentID := blk.Parent() @@ -701,16 +670,18 @@ func (t *Transitive) buildBlocks(ctx context.Context) error { } issuedMetric := t.metrics.issued.WithLabelValues(builtSource) - added, err := t.issueWithAncestors(ctx, blk, issuedMetric) - if err != nil { + if err := t.issueWithAncestors(ctx, blk, issuedMetric); err != nil { return err } - // issuing the block shouldn't have any missing dependencies - if added { + // TODO: Technically this may incorrectly log a warning if the block + // that was just built caused votes to be applied such that the block + // was rejected or was accepted along with one of its children. This + // should be cleaned up to never produce an invalid warning. + if t.canIssueChildOn(blk.ID()) { t.Ctx.Log.Verbo("successfully issued new block from the VM") } else { - t.Ctx.Log.Warn("built block with unissued ancestors") + t.Ctx.Log.Warn("block that was just built is not extendable") } } return nil @@ -728,47 +699,46 @@ func (t *Transitive) repoll(ctx context.Context) { } } -// issueFromByID attempts to issue the branch ending with a block [blkID] into consensus. +// issueFromByID attempts to issue the branch ending with a block [blkID] into +// consensus. // If we do not have [blkID], request it. -// Returns true if the block is processing in consensus or is decided. func (t *Transitive) issueFromByID( ctx context.Context, nodeID ids.NodeID, blkID ids.ID, issuedMetric prometheus.Counter, -) (bool, error) { - blk, err := t.GetBlock(ctx, blkID) +) error { + blk, err := t.getBlock(ctx, blkID) if err != nil { t.sendRequest(ctx, nodeID, blkID, issuedMetric) - return false, nil + return nil } return t.issueFrom(ctx, nodeID, blk, issuedMetric) } -// issueFrom attempts to issue the branch ending with block [blkID] to consensus. -// Returns true if the block is processing in consensus or is decided. -// If a dependency is missing, request it from [vdr]. +// issueFrom attempts to issue the branch ending with block [blkID] to +// consensus. +// If a dependency is missing, it will be requested it from [nodeID]. func (t *Transitive) issueFrom( ctx context.Context, nodeID ids.NodeID, blk snowman.Block, issuedMetric prometheus.Counter, -) (bool, error) { +) error { // issue [blk] and its ancestors to consensus. blkID := blk.ID() - for !t.wasIssued(blk) { - if err := t.issue(ctx, nodeID, blk, false, issuedMetric); err != nil { - return false, err + for t.shouldIssueBlock(blk) { + err := t.issue(ctx, nodeID, blk, false, issuedMetric) + if err != nil { + return err } + // If we don't have this ancestor, request it from [nodeID] blkID = blk.Parent() - var err error - blk, err = t.GetBlock(ctx, blkID) - - // If we don't have this ancestor, request it from [vdr] - if err != nil || !blk.Status().Fetched() { + blk, err = t.getBlock(ctx, blkID) + if err != nil { t.sendRequest(ctx, nodeID, blkID, issuedMetric) - return false, nil + return nil } } @@ -777,69 +747,45 @@ func (t *Transitive) issueFrom( delete(t.blkReqSourceMetric, req) } - issued := t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) - if issued { - // A dependency should never be waiting on a decided or processing - // block. However, if the block was marked as rejected by the VM, the - // dependencies may still be waiting. Therefore, they should abandoned. - t.blocked.Abandon(ctx, blkID) + // If this block isn't pending, make sure nothing is blocked on it. + if _, isPending := t.pending[blkID]; !isPending { + return t.blocked.Abandon(ctx, blkID) } - - // Tracks performance statistics - t.metrics.numRequests.Set(float64(t.blkReqs.Len())) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return issued, t.errs.Err + return nil } -// issueWithAncestors attempts to issue the branch ending with [blk] to consensus. -// Returns true if the block is processing in consensus or is decided. -// If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned. +// issueWithAncestors attempts to issue the branch ending with [blk] to +// consensus. +// If a dependency is missing and the dependency hasn't been requested, the +// issuance will be abandoned. func (t *Transitive) issueWithAncestors( ctx context.Context, blk snowman.Block, issuedMetric prometheus.Counter, -) (bool, error) { +) error { blkID := blk.ID() // issue [blk] and its ancestors into consensus - status := blk.Status() - for status.Fetched() && !t.wasIssued(blk) { + for t.shouldIssueBlock(blk) { err := t.issue(ctx, t.Ctx.NodeID, blk, true, issuedMetric) if err != nil { - return false, err + return err } blkID = blk.Parent() - blk, err = t.GetBlock(ctx, blkID) + blk, err = t.getBlock(ctx, blkID) if err != nil { - status = choices.Unknown break } - status = blk.Status() - } - - // The block was issued into consensus. This is the happy path. - if status != choices.Unknown && (t.Consensus.Decided(blk) || t.Consensus.Processing(blkID)) { - return true, nil } - // There's an outstanding request for this block. - // We can just wait for that request to succeed or fail. + // There's an outstanding request for this block. We can wait for that + // request to succeed or fail. if t.blkReqs.HasValue(blkID) { - return false, nil + return nil } - // We don't have this block and have no reason to expect that we will get it. - // Abandon the block to avoid a memory leak. - t.blocked.Abandon(ctx, blkID) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return false, t.errs.Err -} - -// If the block has been decided, then it is marked as having been issued. -// If the block is processing, then it was issued. -// If the block is queued to be added to consensus, then it was issued. -func (t *Transitive) wasIssued(blk snowman.Block) bool { - blkID := blk.ID() - return t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) || t.pendingContains(blkID) + // If the block wasn't already issued, we have no reason to expect that it + // will be able to be issued. + return t.blocked.Abandon(ctx, blkID) } // Issue [blk] to consensus once its ancestors have been issued. @@ -867,27 +813,22 @@ func (t *Transitive) issue( t: t, nodeID: nodeID, blk: blk, - issuedMetric: issuedMetric, push: push, + issuedMetric: issuedMetric, } - // block on the parent if needed - parentID := blk.Parent() - if parent, err := t.GetBlock(ctx, parentID); err != nil || !(t.Consensus.Decided(parent) || t.Consensus.Processing(parentID)) { + // We know that shouldIssueBlock(blk) is true. This means that parent is + // either the last accepted block or is not decided. + var deps []ids.ID + if parentID := blk.Parent(); !t.canIssueChildOn(parentID) { t.Ctx.Log.Verbo("block waiting for parent to be issued", zap.Stringer("blkID", blkID), zap.Stringer("parentID", parentID), ) - i.deps.Add(parentID) + deps = append(deps, parentID) } - t.blocked.Register(ctx, i) - - // Tracks performance statistics - t.metrics.numRequests.Set(float64(t.blkReqs.Len())) - t.metrics.numBlocked.Set(float64(len(t.pending))) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.errs.Err + return t.blocked.Schedule(ctx, i, deps...) } // Request that [vdr] send us block [blkID] @@ -916,9 +857,6 @@ func (t *Transitive) sendRequest( zap.Stringer("blkID", blkID), ) t.Sender.SendGet(ctx, nodeID, t.requestID, blkID) - - // Tracks performance statistics - t.metrics.numRequests.Set(float64(t.blkReqs.Len())) } // Send a query for this block. If push is set to true, blkBytes will be used to @@ -985,26 +923,18 @@ func (t *Transitive) deliver( push bool, issuedMetric prometheus.Counter, ) error { - blkID := blk.ID() - if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { - return nil - } - // we are no longer waiting on adding the block to consensus, so it is no // longer pending - t.removeFromPending(blk) + blkID := blk.ID() + delete(t.pending, blkID) + parentID := blk.Parent() - parent, err := t.GetBlock(ctx, parentID) - // Because the dependency must have been fulfilled by the time this function - // is called - we don't expect [err] to be non-nil. But it is handled for - // completness and future proofing. - if err != nil || !(parent.Status() == choices.Accepted || t.Consensus.Processing(parentID)) { - // if the parent isn't processing or the last accepted block, then this - // block is effectively rejected - t.blocked.Abandon(ctx, blkID) - t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.errs.Err + if !t.canIssueChildOn(parentID) || t.Consensus.Processing(blkID) { + // If the parent isn't processing or the last accepted block, then this + // block is effectively rejected. + // Additionally, if [blkID] is already in the processing set, it + // shouldn't be added to consensus again. + return t.blocked.Abandon(ctx, blkID) } // By ensuring that the parent is either processing or accepted, it is @@ -1015,10 +945,7 @@ func (t *Transitive) deliver( return err } if !blkAdded { - t.blocked.Abandon(ctx, blkID) - t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.errs.Err + return t.blocked.Abandon(ctx, blkID) } // Add all the oracle blocks if they exist. We call verify on all the blocks @@ -1053,66 +980,66 @@ func (t *Transitive) deliver( // If the block is now preferred, query the network for its preferences // with this new block. - if t.Consensus.IsPreferred(blk) { + if t.Consensus.IsPreferred(blkID) { t.sendQuery(ctx, blkID, blk.Bytes(), push) } - t.blocked.Fulfill(ctx, blkID) + if err := t.blocked.Fulfill(ctx, blkID); err != nil { + return err + } for _, blk := range added { blkID := blk.ID() - if t.Consensus.IsPreferred(blk) { + if t.Consensus.IsPreferred(blkID) { t.sendQuery(ctx, blkID, blk.Bytes(), push) } - t.removeFromPending(blk) - t.blocked.Fulfill(ctx, blkID) + delete(t.pending, blkID) + if err := t.blocked.Fulfill(ctx, blkID); err != nil { + return err + } if req, ok := t.blkReqs.DeleteValue(blkID); ok { delete(t.blkReqSourceMetric, req) } } for _, blk := range dropped { blkID := blk.ID() - t.removeFromPending(blk) - t.blocked.Abandon(ctx, blkID) + delete(t.pending, blkID) + if err := t.blocked.Abandon(ctx, blkID); err != nil { + return err + } if req, ok := t.blkReqs.DeleteValue(blkID); ok { delete(t.blkReqSourceMetric, req) } } + // It's possible that the blocks we just added to consensus were decided + // immediately by votes that were pending their issuance. If this is the + // case, we should not be requesting any chits. + if t.Consensus.NumProcessing() == 0 { + return nil + } + // If we should issue multiple queries at the same time, we need to repoll t.repoll(ctx) - - // Tracks performance statistics - t.metrics.numRequests.Set(float64(t.blkReqs.Len())) - t.metrics.numBlocked.Set(float64(len(t.pending))) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.errs.Err -} - -// Returns true if the block whose ID is [blkID] is waiting to be issued to consensus -func (t *Transitive) pendingContains(blkID ids.ID) bool { - _, ok := t.pending[blkID] - return ok -} - -func (t *Transitive) removeFromPending(blk snowman.Block) { - delete(t.pending, blk.ID()) + return nil } func (t *Transitive) addToNonVerifieds(blk snowman.Block) { - // don't add this blk if it's decided or processing. + // If this block is processing, we don't need to add it to non-verifieds. blkID := blk.ID() - if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { + if t.Consensus.Processing(blkID) { return } parentID := blk.Parent() - // we might still need this block so we can bubble votes to the parent - // only add blocks with parent already in the tree or processing. - // decided parents should not be in this map. + // We might still need this block so we can bubble votes to the parent. + // + // If the non-verified set contains the parentID, then we know that the + // parent is not decided and therefore blk is not decided. + // Similarly, if the parent is processing, then the parent is not decided + // and therefore blk is not decided. if t.nonVerifieds.Has(parentID) || t.Consensus.Processing(parentID) { t.nonVerifieds.Add(blkID, parentID) t.nonVerifiedCache.Put(blkID, blk) - t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) } } @@ -1144,16 +1071,110 @@ func (t *Transitive) addUnverifiedBlockToConsensus( issuedMetric.Inc() t.nonVerifieds.Remove(blkID) t.nonVerifiedCache.Evict(blkID) - t.metrics.numNonVerifieds.Set(float64(t.nonVerifieds.Len())) t.metrics.issuerStake.Observe(float64(t.Validators.GetWeight(t.Ctx.SubnetID, nodeID))) t.Ctx.Log.Verbo("adding block to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), zap.Uint64("height", blkHeight), ) - return true, t.Consensus.Add(ctx, &memoryBlock{ + return true, t.Consensus.Add(&memoryBlock{ Block: blk, - metrics: &t.metrics, + metrics: t.metrics, tree: t.nonVerifieds, }) } + +// getProcessingAncestor finds [initialVote]'s most recent ancestor that is +// processing in consensus. If no ancestor could be found, false is returned. +// +// Note: If [initialVote] is processing, then [initialVote] will be returned. +func (t *Transitive) getProcessingAncestor(ctx context.Context, initialVote ids.ID) (ids.ID, bool) { + // If [bubbledVote] != [initialVote], it is guaranteed that [bubbledVote] is + // in processing. Otherwise, we attempt to iterate through any blocks we + // have at our disposal as a best-effort mechanism to find a valid ancestor. + bubbledVote := t.nonVerifieds.GetAncestor(initialVote) + for { + if t.Consensus.Processing(bubbledVote) { + t.Ctx.Log.Verbo("applying vote", + zap.Stringer("initialVoteID", initialVote), + zap.Stringer("bubbledVoteID", bubbledVote), + ) + if bubbledVote != initialVote { + t.numProcessingAncestorFetchesSucceeded.Inc() + } else { + t.numProcessingAncestorFetchesUnneeded.Inc() + } + return bubbledVote, true + } + + blk, err := t.getBlock(ctx, bubbledVote) + // If we cannot retrieve the block, drop [vote] + if err != nil { + t.Ctx.Log.Debug("dropping vote", + zap.String("reason", "ancestor couldn't be fetched"), + zap.Stringer("initialVoteID", initialVote), + zap.Stringer("bubbledVoteID", bubbledVote), + zap.Error(err), + ) + t.numProcessingAncestorFetchesFailed.Inc() + return ids.Empty, false + } + + if t.isDecided(blk) { + t.Ctx.Log.Debug("dropping vote", + zap.String("reason", "bubbled vote already decided"), + zap.Stringer("initialVoteID", initialVote), + zap.Stringer("bubbledVoteID", bubbledVote), + zap.Uint64("height", blk.Height()), + ) + t.numProcessingAncestorFetchesDropped.Inc() + return ids.Empty, false + } + + bubbledVote = blk.Parent() + } +} + +// shouldIssueBlock returns true if the provided block should be enqueued for +// issuance. If the block is already decided, already enqueued, or has already +// been issued, this function will return false. +func (t *Transitive) shouldIssueBlock(blk snowman.Block) bool { + if t.isDecided(blk) { + return false + } + + blkID := blk.ID() + _, isPending := t.pending[blkID] + return !isPending && // If the block is already pending, don't issue it again. + !t.Consensus.Processing(blkID) // If the block was previously issued, don't issue it again. +} + +// canDependOn reports true if it is guaranteed for the provided block ID to +// eventually either be fulfilled or abandoned. +func (t *Transitive) canDependOn(blkID ids.ID) bool { + _, isPending := t.pending[blkID] + return isPending || t.blkReqs.HasValue(blkID) +} + +// canIssueChildOn reports true if it is valid for a child of parentID to be +// verified and added to consensus. +func (t *Transitive) canIssueChildOn(parentID ids.ID) bool { + lastAcceptedID, _ := t.Consensus.LastAccepted() + return parentID == lastAcceptedID || t.Consensus.Processing(parentID) +} + +// isDecided reports true if the provided block's height implies that the block +// is either Accepted or Rejected. +func (t *Transitive) isDecided(blk snowman.Block) bool { + height := blk.Height() + lastAcceptedID, lastAcceptedHeight := t.Consensus.LastAccepted() + if height <= lastAcceptedHeight { + return true // block is either accepted or rejected + } + + // This is guaranteed not to underflow because the above check ensures + // [height] > 0. + parentHeight := height - 1 + parentID := blk.Parent() + return parentHeight == lastAcceptedHeight && parentID != lastAcceptedID // the parent was rejected +} diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index dcae2e26c9f6..310a472b8009 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -7,322 +7,315 @@ import ( "bytes" "context" "errors" + "fmt" "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/ancestor" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) var ( - errUnknownBlock = errors.New("unknown block") - errUnknownBytes = errors.New("unknown bytes") - errInvalid = errors.New("invalid") - errUnexpectedCall = errors.New("unexpected call") - errTest = errors.New("non-nil test") - Genesis = ids.GenerateTestID() + errUnknownBlock = errors.New("unknown block") + errUnknownBytes = errors.New("unknown bytes") + errInvalid = errors.New("invalid") + errTest = errors.New("non-nil test") ) -func setup(t *testing.T, engCfg Config) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { - require := require.New(t) +func MakeGetBlockF(blks ...[]*snowmantest.Block) func(context.Context, ids.ID) (snowman.Block, error) { + return func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + for _, blkSet := range blks { + for _, blk := range blkSet { + if blkID == blk.ID() { + return blk, nil + } + } + } + return nil, errUnknownBlock + } +} - vals := validators.NewManager() - engCfg.Validators = vals +func MakeParseBlockF(blks ...[]*snowmantest.Block) func(context.Context, []byte) (snowman.Block, error) { + return func(_ context.Context, blkBytes []byte) (snowman.Block, error) { + for _, blkSet := range blks { + for _, blk := range blkSet { + if bytes.Equal(blkBytes, blk.Bytes()) { + return blk, nil + } + } + } + return nil, errUnknownBlock + } +} - vdr := ids.GenerateTestNodeID() - require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) - require.NoError(engCfg.ConnectedValidators.Connected(context.Background(), vdr, version.CurrentApp)) +func MakeLastAcceptedBlockF(defaultBlk *snowmantest.Block, blks ...[]*snowmantest.Block) func(context.Context) (ids.ID, error) { + return func(_ context.Context) (ids.ID, error) { + highestHeight := defaultBlk.Height() + highestID := defaultBlk.ID() + for _, blkSet := range blks { + for _, blk := range blkSet { + if blk.Status() == choices.Accepted && blk.Height() > highestHeight { + highestHeight = blk.Height() + highestID = blk.ID() + } + } + } + return highestID, nil + } +} + +func setup(t *testing.T, config Config) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive) { + require := require.New(t) - vals.RegisterCallbackListener(engCfg.Ctx.SubnetID, engCfg.ConnectedValidators) + vdr := ids.GenerateTestNodeID() + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) + require.NoError(config.ConnectedValidators.Connected(context.Background(), vdr, version.CurrentApp)) + config.Validators.RegisterSetCallbackListener(config.Ctx.SubnetID, config.ConnectedValidators) sender := &common.SenderTest{T: t} - engCfg.Sender = sender + config.Sender = sender sender.Default(true) vm := &block.TestVM{} vm.T = t - engCfg.VM = vm + config.VM = vm snowGetHandler, err := getter.New( vm, sender, - engCfg.Ctx.Log, + config.Ctx.Log, time.Second, 2000, - engCfg.Ctx.Registerer, + config.Ctx.Registerer, ) require.NoError(err) - engCfg.AllGetsServer = snowGetHandler + config.AllGetsServer = snowGetHandler vm.Default(true) vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: Genesis, - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } - te, err := newTransitive(engCfg) + te, err := New(config) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil - return vdr, vals, sender, vm, te, gBlk -} - -func setupDefaultConfig(t *testing.T) (ids.NodeID, validators.Manager, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { - engCfg := DefaultConfig(t) - return setup(t, engCfg) -} - -func TestEngineShutdown(t *testing.T) { - require := require.New(t) - - _, _, _, vm, transitive, _ := setupDefaultConfig(t) - vmShutdownCalled := false - vm.ShutdownF = func(context.Context) error { - vmShutdownCalled = true - return nil - } - vm.CantShutdown = false - require.NoError(transitive.Shutdown(context.Background())) - require.True(vmShutdownCalled) + return vdr, config.Validators, sender, vm, te } -func TestEngineAdd(t *testing.T) { +func TestEngineDropsAttemptToIssueBlockAfterFailedRequest(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - - require.Equal(ids.Empty, te.Ctx.ChainID) + peerID, _, sender, vm, engine := setup(t, DefaultConfig(t)) - parent := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }} - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: parent.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + parent := snowmantest.BuildChild(snowmantest.Genesis) + child := snowmantest.BuildChild(parent) - asked := new(bool) - reqID := new(uint32) - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - *reqID = requestID - require.False(*asked) - *asked = true - require.Equal(vdr, inVdr) - require.Equal(blk.Parent(), blkID) + var request *common.Request + sender.SendGetF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Nil(request) + request = &common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + require.Equal(parent.ID(), blkID) } - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.Equal(blk.Bytes(), b) - return blk, nil + require.Equal(child.Bytes(), b) + return child, nil } - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil - case parent.ID(): - return parent, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } - require.NoError(te.Put(context.Background(), vdr, 0, blk.Bytes())) - - vm.ParseBlockF = nil - - require.True(*asked) - require.Len(te.blocked, 1) + // Attempting to add [child] will cause [parent] to be requested. While the + // request for [parent] is outstanding, [child] will be registered into a + // job blocked on [parent]'s issuance. + require.NoError(engine.Put(context.Background(), peerID, 0, child.Bytes())) + require.NotNil(request) + require.Equal(1, engine.blocked.NumDependencies()) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errUnknownBytes } - require.NoError(te.Put(context.Background(), vdr, *reqID, nil)) - - vm.ParseBlockF = nil - - require.Empty(te.blocked) + // Because this request doesn't provide [parent], the [child] job should be + // cancelled. + require.NoError(engine.Put(context.Background(), request.NodeID, request.RequestID, nil)) + require.Zero(engine.blocked.NumDependencies()) } func TestEngineQuery(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + peerID, _, sender, vm, engine := setup(t, DefaultConfig(t)) - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + parent := snowmantest.BuildChild(snowmantest.Genesis) + child := snowmantest.BuildChild(parent) - chitted := new(bool) + var sendChitsCalled bool sender.SendChitsF = func(_ context.Context, _ ids.NodeID, requestID uint32, preferredID ids.ID, preferredIDByHeight ids.ID, accepted ids.ID) { - require.False(*chitted) - *chitted = true + require.False(sendChitsCalled) + sendChitsCalled = true require.Equal(uint32(15), requestID) - require.Equal(gBlk.ID(), preferredID) - require.Equal(gBlk.ID(), preferredIDByHeight) - require.Equal(gBlk.ID(), accepted) + require.Equal(snowmantest.GenesisID, preferredID) + require.Equal(snowmantest.GenesisID, preferredIDByHeight) + require.Equal(snowmantest.GenesisID, accepted) } - blocked := new(bool) + var getBlockCalled bool vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - *blocked = true + getBlockCalled = true + switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } - asked := new(bool) - getRequestID := new(uint32) - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - require.False(*asked) - *asked = true - *getRequestID = requestID - require.Equal(vdr, inVdr) + var getRequest *common.Request + sender.SendGetF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Nil(getRequest) + getRequest = &common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + require.Equal(peerID, nodeID) require.Contains([]ids.ID{ - blk.ID(), - gBlk.ID(), + parent.ID(), + snowmantest.GenesisID, }, blkID) } - require.NoError(te.PullQuery(context.Background(), vdr, 15, blk.ID(), 1)) - require.True(*chitted) - require.True(*blocked) - require.True(*asked) - - queried := new(bool) - queryRequestID := new(uint32) - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { - require.False(*queried) - *queried = true - *queryRequestID = requestID - vdrSet := set.Of(vdr) - require.Equal(vdrSet, inVdrs) - require.Equal(blk.ID(), blockID) + // Handling a pull query for [parent] should result in immediately + // responding with chits for [Genesis] along with a request for [parent]. + require.NoError(engine.PullQuery(context.Background(), peerID, 15, parent.ID(), 1)) + require.True(sendChitsCalled) + require.True(getBlockCalled) + require.NotNil(getRequest) + + var queryRequest *common.Request + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { + require.Nil(queryRequest) + require.Equal(set.Of(peerID), nodeIDs) + queryRequest = &common.Request{ + NodeID: peerID, + RequestID: requestID, + } + require.Equal(parent.ID(), blockID) require.Equal(uint64(1), requestedHeight) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.Equal(blk.Bytes(), b) - return blk, nil + require.Equal(parent.Bytes(), b) + return parent, nil } - require.NoError(te.Put(context.Background(), vdr, *getRequestID, blk.Bytes())) - vm.ParseBlockF = nil - - require.True(*queried) - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk.IDV, - HeightV: 2, - BytesV: []byte{5, 4, 3, 2, 1, 9}, - } + // After receiving [parent], the engine will parse it, issue it, and then + // send a pull query. + require.NoError(engine.Put(context.Background(), getRequest.NodeID, getRequest.RequestID, parent.Bytes())) + require.NotNil(queryRequest) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case blk.ID(), blk1.ID(): + case parent.ID(), child.ID(): return nil, errUnknownBlock } require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } + vm.ParseBlockF = nil - *asked = false - sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - require.False(*asked) - *asked = true - *getRequestID = requestID - require.Equal(vdr, inVdr) - require.Equal(blk1.ID(), blkID) + getRequest = nil + sender.SendGetF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Nil(getRequest) + getRequest = &common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + require.Equal(peerID, nodeID) + require.Equal(child.ID(), blkID) } - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID())) - *queried = false - *queryRequestID = 0 - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { - require.False(*queried) - *queried = true - *queryRequestID = requestID - vdrSet := set.Of(vdr) - require.Equal(vdrSet, inVdrs) - require.Equal(blk1.ID(), blockID) + // Handling chits for [child] register a voter job blocking on [child]'s + // issuance and send a request for [child]. + require.NoError(engine.Chits(context.Background(), queryRequest.NodeID, queryRequest.RequestID, child.ID(), child.ID(), child.ID())) + + queryRequest = nil + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { + require.Nil(queryRequest) + require.Equal(set.Of(peerID), nodeIDs) + queryRequest = &common.Request{ + NodeID: peerID, + RequestID: requestID, + } + require.Equal(child.ID(), blockID) require.Equal(uint64(1), requestedHeight) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.Equal(blk1.Bytes(), b) + require.Equal(child.Bytes(), b) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case blk.ID(): - return blk, nil - case blk1.ID(): - return blk1, nil + case parent.ID(): + return parent, nil + case child.ID(): + return child, nil } require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - return blk1, nil + return child, nil } - require.NoError(te.Put(context.Background(), vdr, *getRequestID, blk1.Bytes())) - vm.ParseBlockF = nil - require.Equal(choices.Accepted, blk1.Status()) - require.Empty(te.blocked) - - _ = te.polls.String() // Shouldn't panic - - require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) - require.Empty(te.blocked) + // After receiving [child], the engine will parse it, issue it, and then + // apply the votes received during the poll for [parent]. Applying the votes + // should cause both [parent] and [child] to be accepted. + require.NoError(engine.Put(context.Background(), getRequest.NodeID, getRequest.RequestID, child.Bytes())) + require.Equal(choices.Accepted, parent.Status()) + require.Equal(choices.Accepted, child.Status()) + require.Zero(engine.blocked.NumDependencies()) } func TestEngineMultipleQuery(t *testing.T) { @@ -333,8 +326,7 @@ func TestEngineMultipleQuery(t *testing.T) { K: 3, AlphaPreference: 2, AlphaConfidence: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -364,20 +356,15 @@ func TestEngineMultipleQuery(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), blkID) - return gBlk, nil + require.Equal(snowmantest.GenesisID, blkID) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) @@ -385,15 +372,8 @@ func TestEngineMultipleQuery(t *testing.T) { vm.GetBlockF = nil vm.LastAcceptedF = nil - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + blk0 := snowmantest.BuildChild(snowmantest.Genesis) + blk1 := snowmantest.BuildChild(blk0) queried := new(bool) queryRequestID := new(uint32) @@ -409,8 +389,8 @@ func TestEngineMultipleQuery(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -424,20 +404,10 @@ func TestEngineMultipleQuery(t *testing.T) { te.metrics.issued.WithLabelValues(unknownSource), )) - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk0.IDV, - HeightV: 2, - BytesV: []byte{2}, - } - vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk0.ID(): return blk0, nil case blk1.ID(): @@ -491,40 +461,24 @@ func TestEngineMultipleQuery(t *testing.T) { require.NoError(te.Chits(context.Background(), vdr2, *queryRequestID, blk0.ID(), blk0.ID(), blk0.ID())) require.Equal(choices.Accepted, blk1.Status()) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineBlockedIssue(t *testing.T) { require := require.New(t) - _, _, sender, vm, te, gBlk := setupDefaultConfig(t) + _, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(false) - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk0.IDV, - HeightV: 2, - BytesV: []byte{2}, - } + blk0 := snowmantest.BuildChild(snowmantest.Genesis) + blk1 := snowmantest.BuildChild(blk0) sender.SendGetF = func(context.Context, ids.NodeID, uint32, ids.ID) {} vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk0.ID(): return blk0, nil default: @@ -540,7 +494,6 @@ func TestEngineBlockedIssue(t *testing.T) { te.metrics.issued.WithLabelValues(unknownSource), )) - blk0.StatusV = choices.Processing require.NoError(te.issue( context.Background(), te.Ctx.NodeID, @@ -552,87 +505,40 @@ func TestEngineBlockedIssue(t *testing.T) { require.Equal(blk1.ID(), te.Consensus.Preference()) } -func TestEngineAbandonResponse(t *testing.T) { - require := require.New(t) - - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - - sender.Default(false) - - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == gBlk.ID(): - return gBlk, nil - case blkID == blk.ID(): - return nil, errUnknownBlock - } - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - - require.NoError(te.issue( - context.Background(), - te.Ctx.NodeID, - blk, - false, - te.metrics.issued.WithLabelValues(unknownSource), - )) - require.NoError(te.QueryFailed(context.Background(), vdr, 1)) - - require.Empty(te.blocked) -} - -func TestEngineFetchBlock(t *testing.T) { +func TestEngineRespondsToGetRequest(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(false) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), id) - return gBlk, nil + require.Equal(snowmantest.GenesisID, id) + return snowmantest.Genesis, nil } - added := new(bool) - sender.SendPutF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blk []byte) { - require.Equal(vdr, inVdr) + var sentPut bool + sender.SendPutF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, blk []byte) { + require.False(sentPut) + sentPut = true + + require.Equal(vdr, nodeID) require.Equal(uint32(123), requestID) - require.Equal(gBlk.Bytes(), blk) - *added = true + require.Equal(snowmantest.GenesisBytes, blk) } - require.NoError(te.Get(context.Background(), vdr, 123, gBlk.ID())) - - require.True(*added) + require.NoError(te.Get(context.Background(), vdr, 123, snowmantest.GenesisID)) + require.True(sentPut) } func TestEnginePushQuery(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, blk.Bytes()) { @@ -643,8 +549,8 @@ func TestEnginePushQuery(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk.ID(): return blk, nil default: @@ -658,9 +564,9 @@ func TestEnginePushQuery(t *testing.T) { *chitted = true require.Equal(vdr, inVdr) require.Equal(uint32(20), requestID) - require.Equal(gBlk.ID(), preferredID) - require.Equal(gBlk.ID(), preferredIDByHeight) - require.Equal(gBlk.ID(), acceptedID) + require.Equal(snowmantest.GenesisID, preferredID) + require.Equal(snowmantest.GenesisID, preferredIDByHeight) + require.Equal(snowmantest.GenesisID, acceptedID) } queried := new(bool) @@ -682,24 +588,16 @@ func TestEnginePushQuery(t *testing.T) { func TestEngineBuildBlock(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -727,7 +625,7 @@ func TestEngineBuildBlock(t *testing.T) { func TestEngineRepoll(t *testing.T) { require := require.New(t) - vdr, _, sender, _, te, _ := setupDefaultConfig(t) + vdr, _, sender, _, te := setup(t, DefaultConfig(t)) sender.Default(true) @@ -752,8 +650,7 @@ func TestVoteCanceling(t *testing.T) { K: 3, AlphaPreference: 2, AlphaConfidence: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -783,35 +680,22 @@ func TestVoteCanceling(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), id) - return gBlk, nil + require.Equal(snowmantest.GenesisID, id) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) queried := new(bool) queryRequestID := new(uint32) @@ -857,40 +741,27 @@ func TestEngineNoQuery(t *testing.T) { engCfg.Sender = sender sender.Default(true) - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm := &block.TestVM{} vm.T = t vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID == gBlk.ID() { - return gBlk, nil + if blkID == snowmantest.GenesisID { + return snowmantest.Genesis, nil } return nil, errUnknownBlock } engCfg.VM = vm - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) require.NoError(te.issue( context.Background(), @@ -910,27 +781,22 @@ func TestEngineNoRepollQuery(t *testing.T) { engCfg.Sender = sender sender.Default(true) - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm := &block.TestVM{} vm.T = t vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID == gBlk.ID() { - return gBlk, nil + if blkID == snowmantest.GenesisID { + return snowmantest.Genesis, nil } return nil, errUnknownBlock } engCfg.VM = vm - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) @@ -941,7 +807,7 @@ func TestEngineNoRepollQuery(t *testing.T) { func TestEngineAbandonQuery(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, _ := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) @@ -971,24 +837,16 @@ func TestEngineAbandonQuery(t *testing.T) { func TestEngineAbandonChit(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk.ID(): return nil, errUnknownBlock } @@ -1021,35 +879,27 @@ func TestEngineAbandonChit(t *testing.T) { // Register a voter dependency on an unknown block. require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID)) - require.Len(te.blocked, 1) + require.Equal(1, te.blocked.NumDependencies()) sender.CantSendPullQuery = false require.NoError(te.GetFailed(context.Background(), vdr, reqID)) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk.ID(): return nil, errUnknownBlock } @@ -1082,61 +932,36 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { // Register a voter dependency on an unknown block. require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID)) - require.Len(te.blocked, 1) + require.Equal(1, te.blocked.NumDependencies()) sender.CantSendPullQuery = false - gBlkBytes := gBlk.Bytes() vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - require.Equal(gBlkBytes, b) - return gBlk, nil + require.Equal(snowmantest.GenesisBytes, b) + return snowmantest.Genesis, nil } // Respond with an unexpected block and verify that the request is correctly // cleared. - require.NoError(te.Put(context.Background(), vdr, reqID, gBlkBytes)) - require.Empty(te.blocked) + require.NoError(te.Put(context.Background(), vdr, reqID, snowmantest.GenesisBytes)) + require.Zero(te.blocked.NumDependencies()) } func TestEngineBlockingChitRequest(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - missingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - parentBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: missingBlk.IDV, - HeightV: 2, - BytesV: []byte{2}, - } - blockingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: parentBlk.IDV, - HeightV: 3, - BytesV: []byte{3}, - } + missingBlk := snowmantest.BuildChild(snowmantest.Genesis) + parentBlk := snowmantest.BuildChild(missingBlk) + blockingBlk := snowmantest.BuildChild(parentBlk) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blockingBlk.ID(): return blockingBlk, nil default: @@ -1163,11 +988,10 @@ func TestEngineBlockingChitRequest(t *testing.T) { require.NoError(te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes(), 0)) - require.Len(te.blocked, 2) + require.Equal(2, te.blocked.NumDependencies()) sender.CantSendPullQuery = false - missingBlk.StatusV = choices.Processing require.NoError(te.issue( context.Background(), te.Ctx.NodeID, @@ -1176,48 +1000,27 @@ func TestEngineBlockingChitRequest(t *testing.T) { te.metrics.issued.WithLabelValues(unknownSource), )) - require.Empty(te.blocked) + require.Zero(te.blocked.NumDependencies()) } func TestEngineBlockingChitResponse(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + config := DefaultConfig(t) + + peerID, _, sender, vm, te := setup(t, config) sender.Default(true) - issuedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - missingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{2}, - } - blockingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: missingBlk.IDV, - HeightV: 2, - BytesV: []byte{3}, - } + issuedBlk := snowmantest.BuildChild(snowmantest.Genesis) + + missingBlk := snowmantest.BuildChild(snowmantest.Genesis) + blockingBlk := snowmantest.BuildChild(missingBlk) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case issuedBlk.ID(): return issuedBlk, nil case blockingBlk.ID(): @@ -1226,66 +1029,125 @@ func TestEngineBlockingChitResponse(t *testing.T) { return nil, errUnknownBlock } } + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(snowmantest.GenesisBytes, blkBytes): + return snowmantest.Genesis, nil + case bytes.Equal(issuedBlk.Bytes(), blkBytes): + return issuedBlk, nil + case bytes.Equal(missingBlk.Bytes(), blkBytes): + return missingBlk, nil + case bytes.Equal(blockingBlk.Bytes(), blkBytes): + return blockingBlk, nil + default: + return nil, errUnknownBlock + } + } - require.NoError(te.issue( + var getRequest *common.Request + sender.SendGetF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Nil(getRequest) + getRequest = &common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + require.Equal(missingBlk.ID(), blkID) + } + + // Issuing [blockingBlk] will register an issuer job for [blockingBlk] + // awaiting on [missingBlk]. It will also send a request for [missingBlk]. + require.NoError(te.Put( context.Background(), - te.Ctx.NodeID, - blockingBlk, - false, - te.metrics.issued.WithLabelValues(unknownSource), + peerID, + 0, + blockingBlk.Bytes(), )) - queryRequestID := new(uint32) - sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { - *queryRequestID = requestID - vdrSet := set.Of(vdr) - require.Equal(vdrSet, inVdrs) + var queryRequest *common.Request + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.Nil(queryRequest) + require.Equal(set.Of(peerID), nodeIDs) + queryRequest = &common.Request{ + NodeID: peerID, + RequestID: requestID, + } require.Equal(issuedBlk.ID(), blkID) require.Equal(uint64(1), requestedHeight) } - require.NoError(te.issue( + // Issuing [issuedBlk] will immediately adds [issuedBlk] to consensus, sets + // it as the preferred block, and sends a query for [issuedBlk]. + require.NoError(te.Put( context.Background(), - te.Ctx.NodeID, - issuedBlk, - false, - te.metrics.issued.WithLabelValues(unknownSource), + peerID, + 0, + issuedBlk.Bytes(), )) - sender.SendPushQueryF = nil - sender.CantSendPushQuery = false + sender.SendPullQueryF = nil - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blockingBlk.ID(), issuedBlk.ID(), blockingBlk.ID())) + // In response to the query for [issuedBlk], the peer is responding with, + // the currently pending issuance, [blockingBlk]. The direct conflict of + // [issuedBlk] is [missingBlk]. This registers a voter job dependent on + // [blockingBlk] and [missingBlk]. + require.NoError(te.Chits( + context.Background(), + queryRequest.NodeID, + queryRequest.RequestID, + blockingBlk.ID(), + missingBlk.ID(), + blockingBlk.ID(), + )) + require.Equal(2, te.blocked.NumDependencies()) + + queryRequest = nil + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { + require.Nil(queryRequest) + require.Equal(set.Of(peerID), nodeIDs) + queryRequest = &common.Request{ + NodeID: peerID, + RequestID: requestID, + } + require.Equal(blockingBlk.ID(), blkID) + require.Equal(uint64(1), requestedHeight) + } - require.Len(te.blocked, 2) - sender.CantSendPullQuery = false + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case snowmantest.GenesisID: + return snowmantest.Genesis, nil + case issuedBlk.ID(): + return issuedBlk, nil + case missingBlk.ID(): + return missingBlk, nil + case blockingBlk.ID(): + return blockingBlk, nil + default: + return nil, errUnknownBlock + } + } - missingBlk.StatusV = choices.Processing - require.NoError(te.issue( + // Issuing [missingBlk] will add the block into consensus. However, it will + // not send a query for it as it is not the preferred block. + require.NoError(te.Put( context.Background(), - te.Ctx.NodeID, - missingBlk, - false, - te.metrics.issued.WithLabelValues(unknownSource), + getRequest.NodeID, + getRequest.RequestID, + missingBlk.Bytes(), )) + require.Equal(choices.Accepted, missingBlk.Status()) + require.Equal(choices.Accepted, blockingBlk.Status()) + require.Equal(choices.Rejected, issuedBlk.Status()) } func TestEngineRetryFetch(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - missingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + missingBlk := snowmantest.BuildChild(snowmantest.Genesis) vm.CantGetBlock = false @@ -1320,29 +1182,13 @@ func TestEngineRetryFetch(t *testing.T) { func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - validBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - invalidBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: validBlk.IDV, - HeightV: 2, - VerifyV: errTest, - BytesV: []byte{2}, - } + validBlk := snowmantest.BuildChild(snowmantest.Genesis) + invalidBlk := snowmantest.BuildChild(validBlk) + invalidBlk.VerifyV = errTest invalidBlkID := invalidBlk.ID() @@ -1353,8 +1199,8 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case validBlk.ID(): return validBlk, nil case invalidBlk.ID(): @@ -1386,14 +1232,14 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { func TestEngineGossip(t *testing.T) { require := require.New(t) - nodeID, _, sender, vm, te, gBlk := setupDefaultConfig(t) + nodeID, _, sender, vm, te := setup(t, DefaultConfig(t)) vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), blkID) - return gBlk, nil + require.Equal(snowmantest.GenesisID, blkID) + return snowmantest.Genesis, nil } var calledSendPullQuery bool @@ -1410,31 +1256,15 @@ func TestEngineGossip(t *testing.T) { func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { require := require.New(t) - vdr, vdrs, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, vdrs, sender, vm, te := setup(t, DefaultConfig(t)) secondVdr := ids.GenerateTestNodeID() require.NoError(vdrs.AddStaker(te.Ctx.SubnetID, secondVdr, nil, ids.Empty, 1)) sender.Default(true) - missingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: missingBlk.IDV, - HeightV: 2, - BytesV: []byte{2}, - } + missingBlk := snowmantest.BuildChild(snowmantest.Genesis) + pendingBlk := snowmantest.BuildChild(missingBlk) parsed := new(bool) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1447,8 +1277,8 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case pendingBlk.ID(): if !*parsed { return nil, errUnknownBlock @@ -1481,8 +1311,8 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case missingBlk.ID(): if !*parsed { return nil, errUnknownBlock @@ -1494,8 +1324,6 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } sender.CantSendPullQuery = false - missingBlk.StatusV = choices.Processing - require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) require.Equal(pendingBlk.ID(), te.Consensus.Preference()) @@ -1504,28 +1332,12 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { func TestEnginePushQueryRequestIDConflict(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - missingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: missingBlk.IDV, - HeightV: 2, - BytesV: []byte{2}, - } + missingBlk := snowmantest.BuildChild(snowmantest.Genesis) + pendingBlk := snowmantest.BuildChild(missingBlk) parsed := new(bool) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1538,8 +1350,8 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case pendingBlk.ID(): if !*parsed { return nil, errUnknownBlock @@ -1575,8 +1387,8 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case missingBlk.ID(): if !*parsed { return nil, errUnknownBlock @@ -1598,6 +1410,7 @@ func TestEngineAggressivePolling(t *testing.T) { engCfg := DefaultConfig(t) engCfg.Params.ConcurrentRepolls = 2 + engCfg.Params.Beta = 2 vals := validators.NewManager() engCfg.Validators = vals @@ -1617,20 +1430,15 @@ func TestEngineAggressivePolling(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), blkID) - return gBlk, nil + require.Equal(snowmantest.GenesisID, blkID) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) @@ -1638,15 +1446,7 @@ func TestEngineAggressivePolling(t *testing.T) { vm.GetBlockF = nil vm.LastAcceptedF = nil - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + pendingBlk := snowmantest.BuildChild(snowmantest.Genesis) parsed := new(bool) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1659,8 +1459,8 @@ func TestEngineAggressivePolling(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case pendingBlk.ID(): if !*parsed { return nil, errUnknownBlock @@ -1689,8 +1489,7 @@ func TestEngineDoubleChit(t *testing.T) { K: 2, AlphaPreference: 2, AlphaConfidence: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -1719,35 +1518,22 @@ func TestEngineDoubleChit(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), id) - return gBlk, nil + require.Equal(snowmantest.GenesisID, id) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) queried := new(bool) queryRequestID := new(uint32) @@ -1770,8 +1556,8 @@ func TestEngineDoubleChit(t *testing.T) { vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk.ID(): return blk, nil } @@ -1818,20 +1604,15 @@ func TestEngineBuildBlockLimit(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: Genesis, - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), blkID) - return gBlk, nil + require.Equal(snowmantest.GenesisID, blkID) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) @@ -1839,25 +1620,8 @@ func TestEngineBuildBlockLimit(t *testing.T) { vm.GetBlockF = nil vm.LastAcceptedF = nil - blk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk0.IDV, - HeightV: 2, - BytesV: []byte{2}, - } - blks := []snowman.Block{blk0, blk1} + blks := snowmantest.BuildDescendants(snowmantest.Genesis, 2) + blk0 := blks[0] var ( queried bool @@ -1873,8 +1637,8 @@ func TestEngineBuildBlockLimit(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -1898,8 +1662,8 @@ func TestEngineBuildBlockLimit(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk0.ID(): return blk0, nil default: @@ -1912,410 +1676,59 @@ func TestEngineBuildBlockLimit(t *testing.T) { require.True(queried) } -func TestEngineReceiveNewRejectedBlock(t *testing.T) { +func TestEngineDropRejectedBlockOnReceipt(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + nodeID, _, sender, vm, te := setup(t, DefaultConfig(t)) - acceptedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - rejectedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{2}, - } - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, + // Ignore outbound chits + sender.SendChitsF = func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {} + + acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) + rejectedChain := snowmantest.BuildDescendants(snowmantest.Genesis, 2) + vm.ParseBlockF = MakeParseBlockF( + []*snowmantest.Block{ + snowmantest.Genesis, + acceptedBlk, }, - ParentV: rejectedBlk.IDV, - HeightV: 2, - BytesV: []byte{3}, + rejectedChain, + ) + vm.GetBlockF = MakeGetBlockF([]*snowmantest.Block{ + snowmantest.Genesis, + acceptedBlk, + }) + + // Track outbound queries + var queryRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeID), nodeIDs) + queryRequestIDs = append(queryRequestIDs, requestID) } - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - asked bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - asked = true - reqID = rID - } + // Issue [acceptedBlk] to the engine. This + require.NoError(te.PushQuery(context.Background(), nodeID, 0, acceptedBlk.Bytes(), acceptedBlk.Height())) + require.Len(queryRequestIDs, 1) - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - - require.True(asked) - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - sender.SendPullQueryF = nil - asked = false - - sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, _ ids.ID) { - asked = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - - require.True(asked) - - rejectedBlk.StatusV = choices.Rejected - - require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) + // Vote for [acceptedBlk] and cause it to be accepted. + require.NoError(te.Chits(context.Background(), nodeID, queryRequestIDs[0], acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) + require.Len(queryRequestIDs, 1) // Shouldn't have caused another query + require.Equal(choices.Accepted, acceptedBlk.Status()) + // Attempt to issue rejectedChain[1] to the engine. This should be dropped + // because the engine knows it has rejected it's parent rejectedChain[0]. + require.NoError(te.PushQuery(context.Background(), nodeID, 0, rejectedChain[1].Bytes(), acceptedBlk.Height())) + require.Len(queryRequestIDs, 1) // Shouldn't have caused another query require.Zero(te.blkReqs.Len()) } -func TestEngineRejectionAmplification(t *testing.T) { - require := require.New(t) - - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - - acceptedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - rejectedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Unknown, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{2}, - } - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: rejectedBlk.IDV, - HeightV: 2, - BytesV: []byte{3}, - } - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - queried bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - queried = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - - require.True(queried) - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - require.Zero(te.Consensus.NumProcessing()) - - queried = false - var asked bool - sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { - queried = true - } - sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, blkID ids.ID) { - asked = true - reqID = rID - - require.Equal(rejectedBlk.ID(), blkID) - } - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - - require.False(queried) - require.True(asked) - - rejectedBlk.StatusV = choices.Processing - require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - - require.False(queried) -} - -// Test that the node will not issue a block into consensus that it knows will -// be rejected because the parent is rejected. -func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) { - require := require.New(t) - - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - - acceptedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - rejectedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Rejected, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{2}, - } - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - RejectV: errUnexpectedCall, - StatusV: choices.Processing, - }, - ParentV: rejectedBlk.IDV, - HeightV: 2, - BytesV: []byte{3}, - } - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - case rejectedBlk.ID(): - return rejectedBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - queried bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - queried = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - - require.True(queried) - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - require.Zero(te.Consensus.NumProcessing()) - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - - require.Zero(te.Consensus.NumProcessing()) - - require.Empty(te.pending) -} - -// Test that the node will not issue a block into consensus that it knows will -// be rejected because the parent is failing verification. -func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) { - require := require.New(t) - - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - - acceptedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - rejectedBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - VerifyV: errUnexpectedCall, - BytesV: []byte{2}, - } - pendingBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - RejectV: errUnexpectedCall, - StatusV: choices.Processing, - }, - ParentV: rejectedBlk.IDV, - HeightV: 2, - BytesV: []byte{3}, - } - - vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, acceptedBlk.Bytes()): - return acceptedBlk, nil - case bytes.Equal(b, rejectedBlk.Bytes()): - return rejectedBlk, nil - case bytes.Equal(b, pendingBlk.Bytes()): - return pendingBlk, nil - default: - require.FailNow(errUnknownBlock.Error()) - return nil, errUnknownBlock - } - } - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - default: - return nil, errUnknownBlock - } - } - - var ( - queried bool - reqID uint32 - ) - sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ ids.ID, _ uint64) { - queried = true - reqID = rID - } - - require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - require.True(queried) - - vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch blkID { - case gBlk.ID(): - return gBlk, nil - case rejectedBlk.ID(): - return rejectedBlk, nil - case acceptedBlk.ID(): - return acceptedBlk, nil - default: - return nil, errUnknownBlock - } - } - - require.NoError(te.Chits(context.Background(), vdr, reqID, acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID())) - - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - require.Zero(te.Consensus.NumProcessing()) - require.Empty(te.pending) -} - // Test that the node will not gossip a block that isn't preferred. func TestEngineNonPreferredAmplification(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) - preferredBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - nonPreferredBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{2}, - } + preferredBlk := snowmantest.BuildChild(snowmantest.Genesis) + nonPreferredBlk := snowmantest.BuildChild(snowmantest.Genesis) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -2331,8 +1744,8 @@ func TestEngineNonPreferredAmplification(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -2366,31 +1779,13 @@ func TestEngineNonPreferredAmplification(t *testing.T) { func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) expectedVdrSet := set.Of(vdr) - // [blk1] is a child of [gBlk] and currently passes verification - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - // [blk2] is a child of [blk1] and cannot pass verification until [blk1] - // has been marked as accepted. - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk1.ID(), - HeightV: 2, - BytesV: []byte{2}, - VerifyV: errInvalid, - } + blk1 := snowmantest.BuildChild(snowmantest.Genesis) + // blk2 cannot pass verification until [blk1] has been marked as accepted. + blk2 := snowmantest.BuildChild(blk1) + blk2.VerifyV = errInvalid // The VM should be able to parse [blk1] and [blk2] vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -2405,13 +1800,13 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } } - // for now, this VM should only be able to retrieve [gBlk] from storage + // for now, this VM should only be able to retrieve [Genesis] from storage // this "GetBlockF" will be updated after blocks are verified/accepted // in the following tests vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -2426,13 +1821,15 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { require.Equal(vdr, inVdr) *asked = true } + sender.CantSendChits = false + // This engine receives a Gossip message for [blk2] which was "unknown" in this engine. // The engine thus learns about its ancestor [blk1] and should send a Get request for it. // (see above for expected "Get" request) - require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes())) + require.NoError(te.PushQuery(context.Background(), vdr, 0, blk2.Bytes(), 0)) require.True(*asked) - // Prepare to PushQuery [blk1] after our Get request is fulfilled. We should not PushQuery + // Prepare to PullQuery [blk1] after our Get request is fulfilled. We should not PullQuery // [blk2] since it currently fails verification. queried := new(bool) queryRequestID := new(uint32) @@ -2448,8 +1845,8 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } // This engine now handles the response to the "Get" request. This should cause [blk1] to be issued // which will result in attempting to issue [blk2]. However, [blk2] should fail verification and be dropped. - // By issuing [blk1], this node should fire a "PushQuery" request for [blk1]. - // (see above for expected "PushQuery" request) + // By issuing [blk1], this node should fire a "PullQuery" request for [blk1]. + // (see above for expected "PullQuery" request) require.NoError(te.Put(context.Background(), vdr, *reqID, blk1.Bytes())) require.True(*asked) require.True(*queried, "Didn't query the newly issued blk1") @@ -2457,8 +1854,8 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // now [blk1] is verified, vm can return it vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk1.ID(): return blk1, nil default: @@ -2493,8 +1890,8 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { blk2.VerifyV = nil vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk1.ID(): return blk1, nil case blk2.ID(): @@ -2503,8 +1900,9 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { return nil, errUnknownBlock } } + *queried = false - // Prepare to PushQuery [blk2] after receiving a Gossip message with [blk2]. + // Prepare to PullQuery [blk2] after receiving a Gossip message with [blk2]. sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { require.False(*queried) *queried = true @@ -2513,8 +1911,8 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { require.Equal(blk2.ID(), blkID) require.Equal(uint64(2), requestedHeight) } - // Expect that the Engine will send a PushQuery after receiving this Gossip message for [blk2]. - require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes())) + // Expect that the Engine will send a PullQuery after receiving this Gossip message for [blk2]. + require.NoError(te.PushQuery(context.Background(), vdr, 0, blk2.Bytes(), 0)) require.True(*queried) // After a single vote for [blk2], it should be marked as accepted. @@ -2538,42 +1936,14 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) expectedVdrSet := set.Of(vdr) - // [blk1] is a child of [gBlk] and currently passes verification - blk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } - // [blk2] is a child of [blk1] and cannot pass verification until [blk1] - // has been marked as accepted. - blk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk1.ID(), - HeightV: 2, - BytesV: []byte{2}, - VerifyV: errInvalid, - } - // [blk3] is a child of [blk2] and will not attempt to be issued until - // [blk2] has successfully been verified. - blk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blk2.ID(), - HeightV: 3, - BytesV: []byte{3}, - } + blk1 := snowmantest.BuildChild(snowmantest.Genesis) + // blk2 cannot pass verification until [blk1] has been marked as accepted. + blk2 := snowmantest.BuildChild(blk1) + blk2.VerifyV = errInvalid + blk3 := snowmantest.BuildChild(blk2) // The VM should be able to parse [blk1], [blk2], and [blk3] vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -2590,11 +1960,11 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } } - // The VM should be able to retrieve [gBlk] and [blk1] from storage + // The VM should be able to retrieve [Genesis] and [blk1] from storage vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk1.ID(): return blk1, nil default: @@ -2611,14 +1981,16 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { require.Equal(vdr, inVdr) *asked = true } + sender.CantSendChits = false + // Receive Gossip message for [blk3] first and expect the sender to issue a // Get request for its ancestor: [blk2]. - require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk3.Bytes())) + require.NoError(te.PushQuery(context.Background(), vdr, 0, blk3.Bytes(), 0)) require.True(*asked) - // Prepare to PushQuery [blk1] after our request for [blk2] is fulfilled. - // We should not PushQuery [blk2] since it currently fails verification. - // We should not PushQuery [blk3] because [blk2] wasn't issued. + // Prepare to PullQuery [blk1] after our request for [blk2] is fulfilled. + // We should not PullQuery [blk2] since it currently fails verification. + // We should not PullQuery [blk3] because [blk2] wasn't issued. queried := new(bool) queryRequestID := new(uint32) sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID, requestedHeight uint64) { @@ -2673,53 +2045,23 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { require := require.New(t) - vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + + vdr, _, sender, vm, te := setup(t, DefaultConfig(t)) sender.Default(true) - grandParentBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.ID(), - HeightV: 1, - BytesV: []byte{1}, - } + grandParentBlk := snowmantest.BuildChild(snowmantest.Genesis) - parentBlkA := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: grandParentBlk.ID(), - HeightV: 2, - VerifyV: errTest, // Reports as invalid - BytesV: []byte{2}, - } + parentBlkA := snowmantest.BuildChild(grandParentBlk) + parentBlkA.VerifyV = errInvalid // Note that [parentBlkB] has the same [ID()] as [parentBlkA]; // it's a different instantiation of the same block. - parentBlkB := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: parentBlkA.IDV, - StatusV: choices.Processing, - }, - ParentV: parentBlkA.ParentV, - HeightV: parentBlkA.HeightV, - BytesV: parentBlkA.BytesV, - } + parentBlkB := *parentBlkA + parentBlkB.VerifyV = nil // Child of [parentBlkA]/[parentBlkB] - childBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: parentBlkA.ID(), - HeightV: 3, - BytesV: []byte{3}, - } + childBlk := snowmantest.BuildChild(parentBlkA) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(grandParentBlk.BytesV, b) @@ -2728,8 +2070,8 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case grandParentBlk.IDV: return grandParentBlk, nil default: @@ -2759,17 +2101,17 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkB.BytesV, b) - return parentBlkB, nil + return &parentBlkB, nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case grandParentBlk.IDV: return grandParentBlk, nil case parentBlkB.IDV: - return parentBlkB, nil + return &parentBlkB, nil default: return nil, errUnknownBlock } @@ -2821,8 +2163,7 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 2, - BetaRogue: 2, + Beta: 2, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -2848,34 +2189,21 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), id) - return gBlk, nil + require.Equal(snowmantest.GenesisID, id) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte, requestedHeight uint64) { @@ -2895,8 +2223,8 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk.ID(): return blk, nil } @@ -2930,8 +2258,7 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -2956,34 +2283,21 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { vm.CantSetState = false vm.CantSetPreference = false - gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }} - vm.LastAcceptedF = func(context.Context) (ids.ID, error) { - return gBlk.ID(), nil + return snowmantest.GenesisID, nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(gBlk.ID(), id) - return gBlk, nil + require.Equal(snowmantest.GenesisID, id) + return snowmantest.Genesis, nil } - te, err := newTransitive(engCfg) + te, err := New(engCfg) require.NoError(err) require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil - blk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: gBlk.IDV, - HeightV: 1, - BytesV: []byte{1}, - } + blk := snowmantest.BuildChild(snowmantest.Genesis) // Issue the block. This shouldn't call the sender, because creating the // poll should fail. @@ -3021,8 +2335,8 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { - case gBlk.ID(): - return gBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case blk.ID(): return blk, nil } @@ -3035,3 +2349,922 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { require.NoError(te.Chits(context.Background(), vdr, queryRequestID, blk.ID(), blk.ID(), blk.ID())) require.Equal(choices.Accepted, blk.Status()) } + +// Full blockchain structure: +// +// G +// / \ +// 0 3 +// | | +// 1 4 +// | +// 2 +// +// K = 3, Alpha = 2, Beta = 1, ConcurrentRepolls = 1 +// +// Initial configuration: +// +// G +// | +// 0 +// | +// 1 +// | +// 2 +// +// The following is a regression test for a bug where the engine would stall. +// +// 1. Poll = 0: Handle a chit for block 1. +// 2. Poll = 0: Handle a chit for block 2. +// 3. Poll = 0: Handle a chit for block 3. This will issue a Get request for block 3. This will block on the issuance of block 3. +// 4. Attempt to issue block 4. This will block on the issuance of block 3. +// 5. Poll = 1: Handle a chit for block 1. +// 6. Poll = 1: Handle a chit for block 2. +// 7. Poll = 1: Handle a chit for block 4. This will block on the issuance of block 4. +// 8. Issue block 3. +// Poll = 0 terminates. This will accept blocks 0 and 1. This will also reject block 3. +// Block = 4 will attempt to be delivered, but because it is effectively rejected due to the acceptance of block 1, it will be dropped. +// Poll = 1 should terminate and block 2 should be repolled. +func TestEngineVoteStallRegression(t *testing.T) { + require := require.New(t) + + config := DefaultConfig(t) + config.Params = snowball.Parameters{ + K: 3, + AlphaPreference: 2, + AlphaConfidence: 2, + Beta: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + nodeIDs := []ids.NodeID{nodeID0, nodeID1, nodeID2} + + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID0, nil, ids.Empty, 1)) + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID1, nil, ids.Empty, 1)) + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID2, nil, ids.Empty, 1)) + + sender := &common.SenderTest{ + T: t, + SendChitsF: func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {}, + } + sender.Default(true) + config.Sender = sender + + acceptedChain := snowmantest.BuildDescendants(snowmantest.Genesis, 3) + rejectedChain := snowmantest.BuildDescendants(snowmantest.Genesis, 2) + + vm := &block.TestVM{ + TestVM: common.TestVM{ + T: t, + InitializeF: func( + context.Context, + *snow.Context, + database.Database, + []byte, + []byte, + []byte, + chan<- common.Message, + []*common.Fx, + common.AppSender, + ) error { + return nil + }, + SetStateF: func(context.Context, snow.State) error { + return nil + }, + }, + ParseBlockF: MakeParseBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + rejectedChain, + ), + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + ), + SetPreferenceF: func(context.Context, ids.ID) error { + return nil + }, + LastAcceptedF: MakeLastAcceptedBlockF( + snowmantest.Genesis, + acceptedChain, + ), + } + vm.Default(true) + config.VM = vm + + engine, err := New(config) + require.NoError(err) + require.NoError(engine.Start(context.Background(), 0)) + + var pollRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeIDs...), polledNodeIDs) + pollRequestIDs = append(pollRequestIDs, requestID) + } + + // Issue block 0. + require.NoError(engine.PushQuery( + context.Background(), + nodeID0, + 0, + acceptedChain[0].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + + // Issue block 1. + require.NoError(engine.PushQuery( + context.Background(), + nodeID0, + 0, + acceptedChain[1].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 2) + + // Issue block 2. + require.NoError(engine.PushQuery( + context.Background(), + nodeID0, + 0, + acceptedChain[2].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 3) + + // Apply votes in poll 0 to the blocks that will be accepted. + require.NoError(engine.Chits( + context.Background(), + nodeID0, + pollRequestIDs[0], + acceptedChain[1].ID(), + acceptedChain[1].ID(), + acceptedChain[1].ID(), + )) + require.NoError(engine.Chits( + context.Background(), + nodeID1, + pollRequestIDs[0], + acceptedChain[2].ID(), + acceptedChain[2].ID(), + acceptedChain[2].ID(), + )) + + // Attempt to apply votes in poll 0 for block 3. This will send a Get + // request for block 3 and register the chits as a dependency on block 3. + var getBlock3Request *common.Request + sender.SendGetF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Nil(getBlock3Request) + require.Equal(nodeID2, nodeID) + getBlock3Request = &common.Request{ + NodeID: nodeID, + RequestID: requestID, + } + require.Equal(rejectedChain[0].ID(), blkID) + } + + require.NoError(engine.Chits( + context.Background(), + nodeID2, + pollRequestIDs[0], + rejectedChain[0].ID(), + rejectedChain[0].ID(), + rejectedChain[0].ID(), + )) + require.NotNil(getBlock3Request) + + // Attempt to issue block 4. This will register a dependency on block 3 for + // the issuance of block 4. + require.NoError(engine.PushQuery( + context.Background(), + nodeID0, + 0, + rejectedChain[1].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 3) + + // Apply votes in poll 1 that will cause blocks 3 and 4 to be rejected once + // poll 0 finishes. + require.NoError(engine.Chits( + context.Background(), + nodeID0, + pollRequestIDs[1], + acceptedChain[1].ID(), + acceptedChain[1].ID(), + acceptedChain[1].ID(), + )) + require.NoError(engine.Chits( + context.Background(), + nodeID1, + pollRequestIDs[1], + acceptedChain[2].ID(), + acceptedChain[2].ID(), + acceptedChain[2].ID(), + )) + require.NoError(engine.Chits( + context.Background(), + nodeID2, + pollRequestIDs[1], + rejectedChain[1].ID(), + rejectedChain[1].ID(), + rejectedChain[1].ID(), + )) + + // Provide block 3. + // This will cause poll 0 to terminate and accept blocks 0 and 1. + // Then the engine will attempt to deliver block 4, but because block 1 is + // accepted, block 4 will be dropped. + // Then poll 1 should terminate because block 4 was dropped. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + rejectedChain, + ) + + require.NoError(engine.Put( + context.Background(), + getBlock3Request.NodeID, + getBlock3Request.RequestID, + rejectedChain[0].Bytes(), + )) + require.Equal(choices.Accepted, acceptedChain[0].Status()) + require.Equal(choices.Accepted, acceptedChain[1].Status()) + require.Equal(choices.Processing, acceptedChain[2].Status()) + require.Equal(choices.Rejected, rejectedChain[0].Status()) + + // Then engine should issue as many queries as needed to confirm block 2. + for i := 2; i < len(pollRequestIDs); i++ { + for _, nodeID := range nodeIDs { + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[i], + acceptedChain[2].ID(), + acceptedChain[2].ID(), + acceptedChain[2].ID(), + )) + } + } + require.Equal(choices.Accepted, acceptedChain[0].Status()) + require.Equal(choices.Accepted, acceptedChain[1].Status()) + require.Equal(choices.Accepted, acceptedChain[2].Status()) + require.Equal(choices.Rejected, rejectedChain[0].Status()) +} + +// When a voter is registered with multiple dependencies, the engine must not +// execute the voter until all of the dependencies have been resolved; even if +// one of the dependencies has been abandoned. +func TestEngineEarlyTerminateVoterRegression(t *testing.T) { + require := require.New(t) + + config := DefaultConfig(t) + nodeID := ids.GenerateTestNodeID() + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID, nil, ids.Empty, 1)) + + sender := &common.SenderTest{ + T: t, + SendChitsF: func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {}, + } + sender.Default(true) + config.Sender = sender + + chain := snowmantest.BuildDescendants(snowmantest.Genesis, 3) + vm := &block.TestVM{ + TestVM: common.TestVM{ + T: t, + InitializeF: func( + context.Context, + *snow.Context, + database.Database, + []byte, + []byte, + []byte, + chan<- common.Message, + []*common.Fx, + common.AppSender, + ) error { + return nil + }, + SetStateF: func(context.Context, snow.State) error { + return nil + }, + }, + ParseBlockF: MakeParseBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + chain, + ), + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + SetPreferenceF: func(context.Context, ids.ID) error { + return nil + }, + LastAcceptedF: MakeLastAcceptedBlockF( + snowmantest.Genesis, + chain, + ), + } + vm.Default(true) + config.VM = vm + + engine, err := New(config) + require.NoError(err) + require.NoError(engine.Start(context.Background(), 0)) + + var pollRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeID), polledNodeIDs) + pollRequestIDs = append(pollRequestIDs, requestID) + } + + getRequestIDs := make(map[ids.ID]uint32) + sender.SendGetF = func(_ context.Context, requestedNodeID ids.NodeID, requestID uint32, blkID ids.ID) { + require.Equal(nodeID, requestedNodeID) + getRequestIDs[blkID] = requestID + } + + // Issue block 0 to trigger poll 0. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + chain[0].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + require.Empty(getRequestIDs) + + // Update GetBlock to return, the newly issued, block 0. This is needed to + // enable the issuance of block 1. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + chain[:1], + ) + + // Vote for block 2 or block 1 in poll 0. This should trigger Get requests + // for both block 2 and block 1. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[0], + chain[2].ID(), + chain[1].ID(), + snowmantest.GenesisID, + )) + require.Len(pollRequestIDs, 1) + require.Contains(getRequestIDs, chain[1].ID()) + require.Contains(getRequestIDs, chain[2].ID()) + + // Mark the request for block 2 as failed. This should not cause the poll to + // be applied as there is still an outstanding request for block 1. + require.NoError(engine.GetFailed( + context.Background(), + nodeID, + getRequestIDs[chain[2].ID()], + )) + require.Len(pollRequestIDs, 1) + + // Issue block 1. This should cause the poll to be applied to both block 0 + // and block 1. + require.NoError(engine.Put( + context.Background(), + nodeID, + getRequestIDs[chain[1].ID()], + chain[1].Bytes(), + )) + // Because Put added a new preferred block to the chain, a new poll will be + // created. + require.Len(pollRequestIDs, 2) + require.Equal(choices.Accepted, chain[0].Status()) + require.Equal(choices.Accepted, chain[1].Status()) + // Block 2 still hasn't been issued, so it's status should remain + // Processing. + require.Equal(choices.Processing, chain[2].Status()) +} + +// Voting for an unissued cached block that fails verification should not +// register any dependencies. +// +// Full blockchain structure: +// +// Genesis +// / \ +// 0 2 +// | | +// 1 3 +// +// We first issue block 2, and then block 3 fails verification. This causes +// block 3 to be added to the invalid blocks cache. +// +// We then issue block 0, issue block 1, and accept block 0. +// +// If we then vote for block 3, the vote should be dropped and trigger a repoll +// which could then be used to accept block 1. +func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { + require := require.New(t) + + config := DefaultConfig(t) + nodeID := ids.GenerateTestNodeID() + require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, nodeID, nil, ids.Empty, 1)) + + sender := &common.SenderTest{ + T: t, + SendChitsF: func(context.Context, ids.NodeID, uint32, ids.ID, ids.ID, ids.ID) {}, + } + sender.Default(true) + config.Sender = sender + + var ( + acceptedChain = snowmantest.BuildDescendants(snowmantest.Genesis, 2) + rejectedChain = snowmantest.BuildDescendants(snowmantest.Genesis, 2) + ) + rejectedChain[1].VerifyV = errInvalid + + vm := &block.TestVM{ + TestVM: common.TestVM{ + T: t, + InitializeF: func( + context.Context, + *snow.Context, + database.Database, + []byte, + []byte, + []byte, + chan<- common.Message, + []*common.Fx, + common.AppSender, + ) error { + return nil + }, + SetStateF: func(context.Context, snow.State) error { + return nil + }, + }, + ParseBlockF: MakeParseBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + rejectedChain, + ), + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + SetPreferenceF: func(context.Context, ids.ID) error { + return nil + }, + LastAcceptedF: MakeLastAcceptedBlockF( + snowmantest.Genesis, + acceptedChain, + rejectedChain, + ), + } + vm.Default(true) + config.VM = vm + + engine, err := New(config) + require.NoError(err) + require.NoError(engine.Start(context.Background(), 0)) + + var pollRequestIDs []uint32 + sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { + require.Equal(set.Of(nodeID), polledNodeIDs) + pollRequestIDs = append(pollRequestIDs, requestID) + } + + // Issue rejectedChain[0] to consensus. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + rejectedChain[0].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + + // In order to attempt to issue rejectedChain[1], the engine expects the VM + // to be willing to provide rejectedChain[0]. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + rejectedChain[:1], + ) + + // Attempt to issue rejectedChain[1] which should add it to the invalid + // block cache. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + rejectedChain[1].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 1) + + _, wasCached := engine.nonVerifiedCache.Get(rejectedChain[1].ID()) + require.True(wasCached) + + // Issue acceptedChain[0] to consensus. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + acceptedChain[0].Bytes(), + 0, + )) + // Because acceptedChain[0] isn't initially preferred, a new poll won't be + // created. + require.Len(pollRequestIDs, 1) + + // In order to vote for acceptedChain[0], the engine expects the VM to be + // willing to provide it. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain[:1], + rejectedChain[:1], + ) + + // Accept acceptedChain[0] and reject rejectedChain[0]. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[0], + acceptedChain[0].ID(), + acceptedChain[0].ID(), + snowmantest.GenesisID, + )) + // There are no processing blocks, so no new poll should be created. + require.Len(pollRequestIDs, 1) + require.Equal(choices.Accepted, acceptedChain[0].Status()) + require.Equal(choices.Rejected, rejectedChain[0].Status()) + + // Issue acceptedChain[1] to consensus. + require.NoError(engine.PushQuery( + context.Background(), + nodeID, + 0, + acceptedChain[1].Bytes(), + 0, + )) + require.Len(pollRequestIDs, 2) + + // Vote for the transitively rejected rejectedChain[1]. This should cause a + // repoll. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[1], + rejectedChain[1].ID(), + rejectedChain[1].ID(), + snowmantest.GenesisID, + )) + require.Len(pollRequestIDs, 3) + + // In order to vote for acceptedChain[1], the engine expects the VM to be + // willing to provide it. + vm.GetBlockF = MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + acceptedChain, + rejectedChain[:1], + ) + + // Accept acceptedChain[1]. + require.NoError(engine.Chits( + context.Background(), + nodeID, + pollRequestIDs[2], + acceptedChain[1].ID(), + acceptedChain[1].ID(), + snowmantest.GenesisID, + )) + require.Len(pollRequestIDs, 3) + require.Equal(choices.Accepted, acceptedChain[1].Status()) +} + +func TestGetProcessingAncestor(t *testing.T) { + var ( + ctx = snowtest.ConsensusContext( + snowtest.Context(t, snowtest.PChainID), + ) + issuedBlock = snowmantest.BuildChild(snowmantest.Genesis) + unissuedBlock = snowmantest.BuildChild(issuedBlock) + ) + + metrics, err := newMetrics(prometheus.NewRegistry()) + require.NoError(t, err) + + c := &snowman.Topological{} + require.NoError(t, c.Initialize( + ctx, + snowball.DefaultParameters, + snowmantest.GenesisID, + 0, + time.Now(), + )) + + require.NoError(t, c.Add(issuedBlock)) + + nonVerifiedAncestors := ancestor.NewTree() + nonVerifiedAncestors.Add(unissuedBlock.ID(), unissuedBlock.Parent()) + + tests := []struct { + name string + engine *Transitive + initialVote ids.ID + expectedAncestor ids.ID + expectedFound bool + }{ + { + name: "drop accepted blockID", + engine: &Transitive{ + Config: Config{ + Ctx: ctx, + VM: &block.TestVM{ + TestVM: common.TestVM{ + T: t, + }, + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + }, + Consensus: c, + }, + metrics: metrics, + nonVerifieds: ancestor.NewTree(), + pending: map[ids.ID]snowman.Block{}, + nonVerifiedCache: &cache.Empty[ids.ID, snowman.Block]{}, + }, + initialVote: snowmantest.GenesisID, + expectedAncestor: ids.Empty, + expectedFound: false, + }, + { + name: "return processing blockID", + engine: &Transitive{ + Config: Config{ + Ctx: ctx, + VM: &block.TestVM{ + TestVM: common.TestVM{ + T: t, + }, + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + }, + Consensus: c, + }, + metrics: metrics, + nonVerifieds: ancestor.NewTree(), + pending: map[ids.ID]snowman.Block{}, + nonVerifiedCache: &cache.Empty[ids.ID, snowman.Block]{}, + }, + initialVote: issuedBlock.ID(), + expectedAncestor: issuedBlock.ID(), + expectedFound: true, + }, + { + name: "drop unknown blockID", + engine: &Transitive{ + Config: Config{ + Ctx: ctx, + VM: &block.TestVM{ + TestVM: common.TestVM{ + T: t, + }, + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + }, + Consensus: c, + }, + metrics: metrics, + nonVerifieds: ancestor.NewTree(), + pending: map[ids.ID]snowman.Block{}, + nonVerifiedCache: &cache.Empty[ids.ID, snowman.Block]{}, + }, + initialVote: ids.GenerateTestID(), + expectedAncestor: ids.Empty, + expectedFound: false, + }, + { + name: "apply vote through ancestor tree", + engine: &Transitive{ + Config: Config{ + Ctx: ctx, + VM: &block.TestVM{ + TestVM: common.TestVM{ + T: t, + }, + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + }, + Consensus: c, + }, + metrics: metrics, + nonVerifieds: nonVerifiedAncestors, + pending: map[ids.ID]snowman.Block{}, + nonVerifiedCache: &cache.Empty[ids.ID, snowman.Block]{}, + }, + initialVote: unissuedBlock.ID(), + expectedAncestor: issuedBlock.ID(), + expectedFound: true, + }, + { + name: "apply vote through pending set", + engine: &Transitive{ + Config: Config{ + Ctx: ctx, + VM: &block.TestVM{ + TestVM: common.TestVM{ + T: t, + }, + GetBlockF: MakeGetBlockF( + []*snowmantest.Block{snowmantest.Genesis}, + ), + }, + Consensus: c, + }, + metrics: metrics, + nonVerifieds: ancestor.NewTree(), + pending: map[ids.ID]snowman.Block{ + unissuedBlock.ID(): unissuedBlock, + }, + nonVerifiedCache: &cache.Empty[ids.ID, snowman.Block]{}, + }, + initialVote: unissuedBlock.ID(), + expectedAncestor: issuedBlock.ID(), + expectedFound: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + ancestor, found := test.engine.getProcessingAncestor(context.Background(), test.initialVote) + require.Equal(test.expectedAncestor, ancestor) + require.Equal(test.expectedFound, found) + }) + } +} + +// Test the engine's classification for blocks to either be dropped or try +// issuance. +// +// Full blockchain structure: +// +// Genesis +// / \ +// 0 7 +// / \ | +// 1 4 8 +// | | / \ +// 2 5 9 11 +// | | | +// 3 6 10 +// +// Genesis and 0 are accepted. +// 1 is issued. +// 5 and 9 are pending. +// +// Structure known to engine: +// +// Genesis +// / +// 0 +// / +// 1 +// +// 5 9 +func TestShouldIssueBlock(t *testing.T) { + var ( + ctx = snowtest.ConsensusContext( + snowtest.Context(t, snowtest.PChainID), + ) + chain0Through3 = snowmantest.BuildDescendants(snowmantest.Genesis, 4) + chain4Through6 = snowmantest.BuildDescendants(chain0Through3[0], 3) + chain7Through10 = snowmantest.BuildDescendants(snowmantest.Genesis, 4) + chain11Through11 = snowmantest.BuildDescendants(chain7Through10[1], 1) + blocks = join(chain0Through3, chain4Through6, chain7Through10, chain11Through11) + ) + + require.NoError(t, blocks[0].Accept(context.Background())) + + c := &snowman.Topological{} + require.NoError(t, c.Initialize( + ctx, + snowball.DefaultParameters, + blocks[0].ID(), + blocks[0].Height(), + blocks[0].Timestamp(), + )) + require.NoError(t, c.Add(blocks[1])) + + engine := &Transitive{ + Config: Config{ + Consensus: c, + }, + pending: map[ids.ID]snowman.Block{ + blocks[5].ID(): blocks[5], + blocks[9].ID(): blocks[9], + }, + } + + tests := []struct { + name string + block snowman.Block + expectedShouldIssue bool + }{ + { + name: "genesis", + block: snowmantest.Genesis, + expectedShouldIssue: false, + }, + { + name: "last accepted", + block: blocks[0], + expectedShouldIssue: false, + }, + { + name: "already processing", + block: blocks[1], + expectedShouldIssue: false, + }, + { + name: "next block to enqueue for issuance on top of a processing block", + block: blocks[2], + expectedShouldIssue: true, + }, + { + name: "block to enqueue for issuance which depends on another block", + block: blocks[3], + expectedShouldIssue: true, + }, + { + name: "next block to enqueue for issuance on top of an accepted block", + block: blocks[4], + expectedShouldIssue: true, + }, + { + name: "already pending block", + block: blocks[5], + expectedShouldIssue: false, + }, + { + name: "block to enqueue on top of a pending block", + block: blocks[6], + expectedShouldIssue: true, + }, + { + name: "block was directly rejected", + block: blocks[7], + expectedShouldIssue: false, + }, + { + name: "block was transitively rejected", + block: blocks[8], + expectedShouldIssue: false, + }, + { + name: "block was transitively rejected but that is not known and was marked as pending", + block: blocks[9], + expectedShouldIssue: false, + }, + { + name: "block was transitively rejected but that is not known and is built on top of pending", + block: blocks[10], + expectedShouldIssue: true, + }, + { + name: "block was transitively rejected but that is not known", + block: blocks[11], + expectedShouldIssue: true, + }, + } + for i, test := range tests { + t.Run(fmt.Sprintf("%d %s", i-1, test.name), func(t *testing.T) { + shouldIssue := engine.shouldIssueBlock(test.block) + require.Equal(t, test.expectedShouldIssue, shouldIssue) + }) + } +} + +// join the provided slices into a single slice. +// +// TODO: Use slices.Concat once the minimum go version is 1.22. +func join[T any](slices ...[]T) []T { + size := 0 + for _, s := range slices { + size += len(s) + } + newSlice := make([]T, 0, size) + for _, s := range slices { + newSlice = append(newSlice, s...) + } + return newSlice +} diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go index 0a029e870ec2..c57a1c733551 100644 --- a/snow/engine/snowman/voter.go +++ b/snow/engine/snowman/voter.go @@ -9,39 +9,25 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/snowman/job" "github.com/ava-labs/avalanchego/utils/bag" - "github.com/ava-labs/avalanchego/utils/set" ) -// Voter records chits received from [vdr] once its dependencies are met. +var _ job.Job[ids.ID] = (*voter)(nil) + +// Voter records chits received from [nodeID] once its dependencies are met. type voter struct { t *Transitive - vdr ids.NodeID + nodeID ids.NodeID requestID uint32 responseOptions []ids.ID - deps set.Set[ids.ID] -} - -func (v *voter) Dependencies() set.Set[ids.ID] { - return v.deps -} - -// Mark that a dependency has been met. -func (v *voter) Fulfill(ctx context.Context, id ids.ID) { - v.deps.Remove(id) - v.Update(ctx) } -// Abandon this attempt to record chits. -func (v *voter) Abandon(ctx context.Context, id ids.ID) { - v.Fulfill(ctx, id) -} - -func (v *voter) Update(ctx context.Context) { - if v.deps.Len() != 0 || v.t.errs.Errored() { - return - } - +// The resolution results from the dependencies of the voter aren't explicitly +// used. The responseOptions are used to determine which block to apply the vote +// to. The dependencies are only used to optimistically delay the application of +// the vote until the blocks have been issued. +func (v *voter) Execute(ctx context.Context, _ []ids.ID, _ []ids.ID) error { var ( vote ids.ID shouldVote bool @@ -50,7 +36,7 @@ func (v *voter) Update(ctx context.Context) { for i, voteOption := range v.responseOptions { // To prevent any potential deadlocks with undisclosed dependencies, // votes must be bubbled to the nearest valid block - vote, shouldVote = v.getProcessingAncestor(ctx, voteOption) + vote, shouldVote = v.t.getProcessingAncestor(ctx, voteOption) if shouldVote { voteIndex = i break @@ -60,13 +46,13 @@ func (v *voter) Update(ctx context.Context) { var results []bag.Bag[ids.ID] if shouldVote { v.t.selectedVoteIndex.Observe(float64(voteIndex)) - results = v.t.polls.Vote(v.requestID, v.vdr, vote) + results = v.t.polls.Vote(v.requestID, v.nodeID, vote) } else { - results = v.t.polls.Drop(v.requestID, v.vdr) + results = v.t.polls.Drop(v.requestID, v.nodeID) } if len(results) == 0 { - return + return nil } for _, result := range results { @@ -75,77 +61,20 @@ func (v *voter) Update(ctx context.Context) { zap.Stringer("result", &result), ) if err := v.t.Consensus.RecordPoll(ctx, result); err != nil { - v.t.errs.Add(err) + return err } } - if v.t.errs.Errored() { - return - } - if err := v.t.VM.SetPreference(ctx, v.t.Consensus.Preference()); err != nil { - v.t.errs.Add(err) - return + return err } if v.t.Consensus.NumProcessing() == 0 { v.t.Ctx.Log.Debug("Snowman engine can quiesce") - return + return nil } v.t.Ctx.Log.Debug("Snowman engine can't quiesce") v.t.repoll(ctx) -} - -// getProcessingAncestor finds [initialVote]'s most recent ancestor that is -// processing in consensus. If no ancestor could be found, false is returned. -// -// Note: If [initialVote] is processing, then [initialVote] will be returned. -func (v *voter) getProcessingAncestor(ctx context.Context, initialVote ids.ID) (ids.ID, bool) { - // If [bubbledVote] != [initialVote], it is guaranteed that [bubbledVote] is - // in processing. Otherwise, we attempt to iterate through any blocks we - // have at our disposal as a best-effort mechanism to find a valid ancestor. - bubbledVote := v.t.nonVerifieds.GetAncestor(initialVote) - for { - blk, err := v.t.GetBlock(ctx, bubbledVote) - // If we cannot retrieve the block, drop [vote] - if err != nil { - v.t.Ctx.Log.Debug("dropping vote", - zap.String("reason", "ancestor couldn't be fetched"), - zap.Stringer("initialVoteID", initialVote), - zap.Stringer("bubbledVoteID", bubbledVote), - zap.Error(err), - ) - v.t.numProcessingAncestorFetchesFailed.Inc() - return ids.Empty, false - } - - if v.t.Consensus.Decided(blk) { - v.t.Ctx.Log.Debug("dropping vote", - zap.String("reason", "bubbled vote already decided"), - zap.Stringer("initialVoteID", initialVote), - zap.Stringer("bubbledVoteID", bubbledVote), - zap.Stringer("status", blk.Status()), - zap.Uint64("height", blk.Height()), - ) - v.t.numProcessingAncestorFetchesDropped.Inc() - return ids.Empty, false - } - - if v.t.Consensus.Processing(bubbledVote) { - v.t.Ctx.Log.Verbo("applying vote", - zap.Stringer("initialVoteID", initialVote), - zap.Stringer("bubbledVoteID", bubbledVote), - zap.Uint64("height", blk.Height()), - ) - if bubbledVote != initialVote { - v.t.numProcessingAncestorFetchesSucceeded.Inc() - } else { - v.t.numProcessingAncestorFetchesUnneeded.Inc() - } - return bubbledVote, true - } - - bubbledVote = blk.Parent() - } + return nil } diff --git a/snow/event/blockable.go b/snow/event/blockable.go deleted file mode 100644 index 404e95c2aee3..000000000000 --- a/snow/event/blockable.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package event - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -// Blockable defines what an object must implement to be able to block on -// dependent events being completed. -type Blockable interface { - // IDs that this object is blocking on - Dependencies() set.Set[ids.ID] - // Notify this object that an event has been fulfilled - Fulfill(context.Context, ids.ID) - // Notify this object that an event has been abandoned - Abandon(context.Context, ids.ID) - // Update the state of this object without changing the status of any events - Update(context.Context) -} diff --git a/snow/event/blocker.go b/snow/event/blocker.go deleted file mode 100644 index 9c15ffb50604..000000000000 --- a/snow/event/blocker.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package event - -import ( - "context" - "fmt" - "strings" - - "github.com/ava-labs/avalanchego/ids" -) - -const ( - minBlockerSize = 16 -) - -// Blocker tracks Blockable events. -// Blocker is used to track events that require their dependencies to be -// fulfilled before them. Once a Blockable event is registered, it will be -// notified once any of its dependencies are fulfilled or abandoned. -type Blocker map[ids.ID][]Blockable - -func (b *Blocker) init() { - if *b == nil { - *b = make(map[ids.ID][]Blockable, minBlockerSize) - } -} - -// Returns the number of items that have dependencies waiting on -// them to be fulfilled -func (b *Blocker) Len() int { - return len(*b) -} - -// Fulfill notifies all objects blocking on the event whose ID is that -// the event has happened -func (b *Blocker) Fulfill(ctx context.Context, id ids.ID) { - b.init() - - blocking := (*b)[id] - delete(*b, id) - - for _, pending := range blocking { - pending.Fulfill(ctx, id) - } -} - -// Abandon notifies all objects blocking on the event whose ID is that -// the event has been abandoned -func (b *Blocker) Abandon(ctx context.Context, id ids.ID) { - b.init() - - blocking := (*b)[id] - delete(*b, id) - - for _, pending := range blocking { - pending.Abandon(ctx, id) - } -} - -// Register a new Blockable and its dependencies -func (b *Blocker) Register(ctx context.Context, pending Blockable) { - b.init() - - for pendingID := range pending.Dependencies() { - (*b)[pendingID] = append((*b)[pendingID], pending) - } - - pending.Update(ctx) -} - -// PrefixedString returns the same value as the String function, with all the -// new lines prefixed by [prefix] -func (b *Blocker) PrefixedString(prefix string) string { - b.init() - - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("Blocking on %d IDs:", len(*b))) - for key, value := range *b { - sb.WriteString(fmt.Sprintf("\n%sID[%s]: %d", - prefix, - key, - len(value), - )) - } - return strings.TrimSuffix(sb.String(), "\n") -} - -func (b *Blocker) String() string { - return b.PrefixedString("") -} diff --git a/snow/event/blocker_test.go b/snow/event/blocker_test.go deleted file mode 100644 index d7620bfebe1a..000000000000 --- a/snow/event/blocker_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package event - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -func TestBlocker(t *testing.T) { - require := require.New(t) - - b := Blocker(nil) - - a := newTestBlockable() - - id0 := ids.GenerateTestID() - id1 := ids.GenerateTestID() - id2 := ids.GenerateTestID() - - calledDep := new(bool) - a.dependencies = func() set.Set[ids.ID] { - *calledDep = true - - s := set.Of(id0, id1) - return s - } - calledFill := new(bool) - a.fulfill = func(context.Context, ids.ID) { - *calledFill = true - } - calledAbandon := new(bool) - a.abandon = func(context.Context, ids.ID) { - *calledAbandon = true - } - calledUpdate := new(bool) - a.update = func(context.Context) { - *calledUpdate = true - } - - b.Register(context.Background(), a) - - require.True(*calledDep) - require.False(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Fulfill(context.Background(), id2) - b.Abandon(context.Background(), id2) - - require.True(*calledDep) - require.False(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Fulfill(context.Background(), id0) - - require.True(*calledDep) - require.True(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Abandon(context.Background(), id0) - - require.True(*calledDep) - require.True(*calledFill) - require.False(*calledAbandon) - require.True(*calledUpdate) - - b.Abandon(context.Background(), id1) - - require.True(*calledDep) - require.True(*calledFill) - require.True(*calledAbandon) - require.True(*calledUpdate) -} - -type testBlockable struct { - dependencies func() set.Set[ids.ID] - fulfill func(context.Context, ids.ID) - abandon func(context.Context, ids.ID) - update func(context.Context) -} - -func newTestBlockable() *testBlockable { - return &testBlockable{ - dependencies: func() set.Set[ids.ID] { - return set.Set[ids.ID]{} - }, - fulfill: func(context.Context, ids.ID) {}, - abandon: func(context.Context, ids.ID) {}, - update: func(context.Context) {}, - } -} - -func (b *testBlockable) Dependencies() set.Set[ids.ID] { - return b.dependencies() -} - -func (b *testBlockable) Fulfill(ctx context.Context, id ids.ID) { - b.fulfill(ctx, id) -} - -func (b *testBlockable) Abandon(ctx context.Context, id ids.ID) { - b.abandon(ctx, id) -} - -func (b *testBlockable) Update(ctx context.Context) { - b.update(ctx) -} diff --git a/snow/networking/benchlist/benchlist.go b/snow/networking/benchlist/benchlist.go index 08f7e7d8d65e..453395379435 100644 --- a/snow/networking/benchlist/benchlist.go +++ b/snow/networking/benchlist/benchlist.go @@ -9,11 +9,13 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/heap" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -50,8 +52,9 @@ type failureStreak struct { type benchlist struct { lock sync.RWMutex // Context of the chain this is the benchlist for - ctx *snow.ConsensusContext - metrics metrics + ctx *snow.ConsensusContext + + numBenched, weightBenched prometheus.Gauge // Used to notify the timer that it should recalculate when it should fire resetTimer chan struct{} @@ -99,13 +102,22 @@ func NewBenchlist( minimumFailingDuration, duration time.Duration, maxPortion float64, + reg prometheus.Registerer, ) (Benchlist, error) { if maxPortion < 0 || maxPortion >= 1 { return nil, fmt.Errorf("max portion of benched stake must be in [0,1) but got %f", maxPortion) } benchlist := &benchlist{ - ctx: ctx, + ctx: ctx, + numBenched: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "benched_num", + Help: "Number of currently benched validators", + }), + weightBenched: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "benched_weight", + Help: "Weight of currently benched validators", + }), resetTimer: make(chan struct{}, 1), failureStreaks: make(map[ids.NodeID]failureStreak), benchlistSet: set.Set[ids.NodeID]{}, @@ -117,7 +129,12 @@ func NewBenchlist( duration: duration, maxPortion: maxPortion, } - if err := benchlist.metrics.Initialize(ctx.Registerer); err != nil { + + err := utils.Err( + reg.Register(benchlist.numBenched), + reg.Register(benchlist.weightBenched), + ) + if err != nil { return nil, err } @@ -188,7 +205,7 @@ func (b *benchlist) removedExpiredNodes() { b.benchable.Unbenched(b.ctx.ChainID, nodeID) } - b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) + b.numBenched.Set(float64(b.benchedHeap.Len())) benchedStake, err := b.vdrs.SubsetWeight(b.ctx.SubnetID, b.benchlistSet) if err != nil { b.ctx.Log.Error("error calculating benched stake", @@ -197,7 +214,7 @@ func (b *benchlist) removedExpiredNodes() { ) return } - b.metrics.weightBenched.Set(float64(benchedStake)) + b.weightBenched.Set(float64(benchedStake)) } func (b *benchlist) durationToSleep() time.Duration { @@ -230,7 +247,7 @@ func (b *benchlist) RegisterResponse(nodeID ids.NodeID) { delete(b.failureStreaks, nodeID) } -// RegisterResponse notes that a request to [nodeID] timed out +// RegisterFailure notes that a request to [nodeID] timed out func (b *benchlist) RegisterFailure(nodeID ids.NodeID) { b.lock.Lock() defer b.lock.Unlock() @@ -338,6 +355,6 @@ func (b *benchlist) bench(nodeID ids.NodeID) { } // Update metrics - b.metrics.numBenched.Set(float64(b.benchedHeap.Len())) - b.metrics.weightBenched.Set(float64(newBenchedStake)) + b.numBenched.Set(float64(b.benchedHeap.Len())) + b.weightBenched.Set(float64(newBenchedStake)) } diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index 45568392297e..3a52be818f75 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" @@ -49,6 +50,7 @@ func TestBenchlistAdd(t *testing.T) { minimumFailingDuration, duration, maxPortion, + prometheus.NewRegistry(), ) require.NoError(err) b := benchIntf.(*benchlist) @@ -173,6 +175,7 @@ func TestBenchlistMaxStake(t *testing.T) { minimumFailingDuration, duration, maxPortion, + prometheus.NewRegistry(), ) require.NoError(err) b := benchIntf.(*benchlist) @@ -295,6 +298,7 @@ func TestBenchlistRemove(t *testing.T) { minimumFailingDuration, duration, maxPortion, + prometheus.NewRegistry(), ) require.NoError(err) b := benchIntf.(*benchlist) diff --git a/snow/networking/benchlist/manager.go b/snow/networking/benchlist/manager.go index e6ac45da4400..e19c54410447 100644 --- a/snow/networking/benchlist/manager.go +++ b/snow/networking/benchlist/manager.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" @@ -39,12 +40,13 @@ type Manager interface { // Config defines the configuration for a benchlist type Config struct { - Benchable Benchable `json:"-"` - Validators validators.Manager `json:"-"` - Threshold int `json:"threshold"` - MinimumFailingDuration time.Duration `json:"minimumFailingDuration"` - Duration time.Duration `json:"duration"` - MaxPortion float64 `json:"maxPortion"` + Benchable Benchable `json:"-"` + Validators validators.Manager `json:"-"` + BenchlistRegisterer metrics.MultiGatherer `json:"-"` + Threshold int `json:"threshold"` + MinimumFailingDuration time.Duration `json:"minimumFailingDuration"` + Duration time.Duration `json:"duration"` + MaxPortion float64 `json:"maxPortion"` } type manager struct { @@ -108,6 +110,14 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { return nil } + reg, err := metrics.MakeAndRegister( + m.config.BenchlistRegisterer, + ctx.PrimaryAlias, + ) + if err != nil { + return err + } + benchlist, err := NewBenchlist( ctx, m.config.Benchable, @@ -116,6 +126,7 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { m.config.MinimumFailingDuration, m.config.Duration, m.config.MaxPortion, + reg, ) if err != nil { return err diff --git a/snow/networking/benchlist/metrics.go b/snow/networking/benchlist/metrics.go deleted file mode 100644 index 25f9e50f7da8..000000000000 --- a/snow/networking/benchlist/metrics.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package benchlist - -import ( - "fmt" - - "github.com/prometheus/client_golang/prometheus" -) - -type metrics struct { - numBenched, weightBenched prometheus.Gauge -} - -func (m *metrics) Initialize(registerer prometheus.Registerer) error { - m.numBenched = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "benchlist", - Name: "benched_num", - Help: "Number of currently benched validators", - }) - if err := registerer.Register(m.numBenched); err != nil { - return fmt.Errorf("failed to register num benched statistics due to %w", err) - } - - m.weightBenched = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "benchlist", - Name: "benched_weight", - Help: "Weight of currently benched validators", - }) - if err := registerer.Register(m.weightBenched); err != nil { - return fmt.Errorf("failed to register weight benched statistics due to %w", err) - } - - return nil -} diff --git a/snow/networking/handler/handler.go b/snow/networking/handler/handler.go index 7a62dd7d928b..1eb42ca0dcdc 100644 --- a/snow/networking/handler/handler.go +++ b/snow/networking/handler/handler.go @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" @@ -30,6 +30,7 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) @@ -123,6 +124,7 @@ type handler struct { // Tracks the peers that are currently connected to this subnet peerTracker commontracker.Peers + p2pTracker *p2p.PeerTracker } // Initialize this consensus handler @@ -137,6 +139,8 @@ func New( subnetConnector validators.SubnetConnector, subnet subnets.Subnet, peerTracker commontracker.Peers, + p2pTracker *p2p.PeerTracker, + reg prometheus.Registerer, ) (Handler, error) { h := &handler{ ctx: ctx, @@ -151,21 +155,36 @@ func New( subnetConnector: subnetConnector, subnet: subnet, peerTracker: peerTracker, + p2pTracker: p2pTracker, } h.asyncMessagePool.SetLimit(threadPoolSize) var err error - h.metrics, err = newMetrics("handler", h.ctx.Registerer) + h.metrics, err = newMetrics(reg) if err != nil { return nil, fmt.Errorf("initializing handler metrics errored with: %w", err) } cpuTracker := resourceTracker.CPUTracker() - h.syncMessageQueue, err = NewMessageQueue(h.ctx, h.validators, cpuTracker, "handler", message.SynchronousOps) + h.syncMessageQueue, err = NewMessageQueue( + h.ctx.Log, + h.ctx.SubnetID, + h.validators, + cpuTracker, + "sync", + reg, + ) if err != nil { return nil, fmt.Errorf("initializing sync message queue errored with: %w", err) } - h.asyncMessageQueue, err = NewMessageQueue(h.ctx, h.validators, cpuTracker, "handler_async", message.AsynchronousOps) + h.asyncMessageQueue, err = NewMessageQueue( + h.ctx.Log, + h.ctx.SubnetID, + h.validators, + cpuTracker, + "async", + reg, + ) if err != nil { return nil, fmt.Errorf("initializing async message queue errored with: %w", err) } @@ -363,7 +382,7 @@ func (h *handler) dispatchSync(ctx context.Context) { for { // Get the next message we should process. If the handler is shutting // down, we may fail to pop a message. - ctx, msg, ok := h.popUnexpiredMsg(h.syncMessageQueue, h.metrics.expired) + ctx, msg, ok := h.popUnexpiredMsg(h.syncMessageQueue) if !ok { return } @@ -371,9 +390,10 @@ func (h *handler) dispatchSync(ctx context.Context) { // If there is an error handling the message, shut down the chain if err := h.handleSyncMsg(ctx, msg); err != nil { h.StopWithError(ctx, fmt.Errorf( - "%w while processing sync message: %s", + "%w while processing sync message: %s from %s", err, - msg, + msg.Op(), + msg.NodeID(), )) return } @@ -392,7 +412,7 @@ func (h *handler) dispatchAsync(ctx context.Context) { for { // Get the next message we should process. If the handler is shutting // down, we may fail to pop a message. - ctx, msg, ok := h.popUnexpiredMsg(h.asyncMessageQueue, h.metrics.asyncExpired) + ctx, msg, ok := h.popUnexpiredMsg(h.asyncMessageQueue) if !ok { return } @@ -429,7 +449,7 @@ func (h *handler) dispatchChans(ctx context.Context) { h.StopWithError(ctx, fmt.Errorf( "%w while processing chan message: %s", err, - msg, + msg.Op(), )) return } @@ -440,7 +460,7 @@ func (h *handler) dispatchChans(ctx context.Context) { func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { var ( nodeID = msg.NodeID() - op = msg.Op() + op = msg.Op().String() body = msg.Message() startTime = h.clock.Time() // Check if the chain is in normal operation at the start of message @@ -450,13 +470,13 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { if h.ctx.Log.Enabled(logging.Verbo) { h.ctx.Log.Verbo("forwarding sync message to consensus", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding sync message to consensus", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), ) } h.resourceTracker.StartProcessing(nodeID, startTime) @@ -466,24 +486,28 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { h.ctx.Lock.Unlock() var ( - endTime = h.clock.Time() - messageHistograms = h.metrics.messages[op] - processingTime = endTime.Sub(startTime) - msgHandlingTime = endTime.Sub(lockAcquiredTime) + endTime = h.clock.Time() + lockingTime = lockAcquiredTime.Sub(startTime) + handlingTime = endTime.Sub(lockAcquiredTime) ) h.resourceTracker.StopProcessing(nodeID, endTime) - messageHistograms.processingTime.Observe(float64(processingTime)) - messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) + h.metrics.lockingTime.Add(float64(lockingTime)) + labels := prometheus.Labels{ + opLabel: op, + } + h.metrics.messages.With(labels).Inc() + h.metrics.messageHandlingTime.With(labels).Add(float64(handlingTime)) + msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling sync message", - zap.Stringer("messageOp", op), + zap.String("messageOp", op), ) - if processingTime > syncProcessingTimeWarnLimit && isNormalOp { + if lockingTime+handlingTime > syncProcessingTimeWarnLimit && isNormalOp { h.ctx.Log.Warn("handling sync message took longer than expected", - zap.Duration("processingTime", processingTime), - zap.Duration("msgHandlingTime", msgHandlingTime), + zap.Duration("lockingTime", lockingTime), + zap.Duration("handlingTime", handlingTime), zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), zap.Stringer("message", body), ) } @@ -492,23 +516,23 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { // We will attempt to pass the message to the requested type for the state // we are currently in. currentState := h.ctx.State.Get() - if msg.EngineType == p2p.EngineType_ENGINE_TYPE_SNOWMAN && - currentState.Type == p2p.EngineType_ENGINE_TYPE_AVALANCHE { + if msg.EngineType == p2ppb.EngineType_ENGINE_TYPE_SNOWMAN && + currentState.Type == p2ppb.EngineType_ENGINE_TYPE_AVALANCHE { // The peer is requesting an engine type that hasn't been initialized // yet. This means we know that this isn't a response, so we can safely // drop the message. h.ctx.Log.Debug("dropping sync message", zap.String("reason", "uninitialized engine type"), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), zap.Stringer("currentEngineType", currentState.Type), zap.Stringer("requestedEngineType", msg.EngineType), ) return nil } - var engineType p2p.EngineType + var engineType p2ppb.EngineType switch msg.EngineType { - case p2p.EngineType_ENGINE_TYPE_AVALANCHE, p2p.EngineType_ENGINE_TYPE_SNOWMAN: + case p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, p2ppb.EngineType_ENGINE_TYPE_SNOWMAN: // The peer is requesting an engine type that has been initialized, so // we should attempt to honor the request. engineType = msg.EngineType @@ -529,7 +553,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { // requested an Avalanche engine handle the message. h.ctx.Log.Debug("dropping sync message", zap.String("reason", "uninitialized engine state"), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), zap.Stringer("currentEngineType", currentState.Type), zap.Stringer("requestedEngineType", msg.EngineType), zap.Stringer("engineState", currentState.State), @@ -543,16 +567,16 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { // response fails. switch msg := body.(type) { // State messages should always be sent to the snowman engine - case *p2p.GetStateSummaryFrontier: + case *p2ppb.GetStateSummaryFrontier: return engine.GetStateSummaryFrontier(ctx, nodeID, msg.RequestId) - case *p2p.StateSummaryFrontier: + case *p2ppb.StateSummaryFrontier: return engine.StateSummaryFrontier(ctx, nodeID, msg.RequestId, msg.Summary) case *message.GetStateSummaryFrontierFailed: return engine.GetStateSummaryFrontierFailed(ctx, nodeID, msg.RequestID) - case *p2p.GetAcceptedStateSummary: + case *p2ppb.GetAcceptedStateSummary: return engine.GetAcceptedStateSummary( ctx, nodeID, @@ -560,7 +584,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { set.Of(msg.Heights...), ) - case *p2p.AcceptedStateSummary: + case *p2ppb.AcceptedStateSummary: summaryIDs, err := getIDs(msg.SummaryIds) if err != nil { h.ctx.Log.Debug("message with invalid field", @@ -580,10 +604,10 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { // Bootstrapping messages may be forwarded to either avalanche or snowman // engines, depending on the EngineType field - case *p2p.GetAcceptedFrontier: + case *p2ppb.GetAcceptedFrontier: return engine.GetAcceptedFrontier(ctx, nodeID, msg.RequestId) - case *p2p.AcceptedFrontier: + case *p2ppb.AcceptedFrontier: containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("message with invalid field", @@ -601,7 +625,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *message.GetAcceptedFrontierFailed: return engine.GetAcceptedFrontierFailed(ctx, nodeID, msg.RequestID) - case *p2p.GetAccepted: + case *p2ppb.GetAccepted: containerIDs, err := getIDs(msg.ContainerIds) if err != nil { h.ctx.Log.Debug("message with invalid field", @@ -616,7 +640,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.GetAccepted(ctx, nodeID, msg.RequestId, containerIDs) - case *p2p.Accepted: + case *p2ppb.Accepted: containerIDs, err := getIDs(msg.ContainerIds) if err != nil { h.ctx.Log.Debug("message with invalid field", @@ -634,7 +658,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *message.GetAcceptedFailed: return engine.GetAcceptedFailed(ctx, nodeID, msg.RequestID) - case *p2p.GetAncestors: + case *p2ppb.GetAncestors: containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("dropping message with invalid field", @@ -652,10 +676,10 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *message.GetAncestorsFailed: return engine.GetAncestorsFailed(ctx, nodeID, msg.RequestID) - case *p2p.Ancestors: + case *p2ppb.Ancestors: return engine.Ancestors(ctx, nodeID, msg.RequestId, msg.Containers) - case *p2p.Get: + case *p2ppb.Get: containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("dropping message with invalid field", @@ -673,13 +697,13 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { case *message.GetFailed: return engine.GetFailed(ctx, nodeID, msg.RequestID) - case *p2p.Put: + case *p2ppb.Put: return engine.Put(ctx, nodeID, msg.RequestId, msg.Container) - case *p2p.PushQuery: + case *p2ppb.PushQuery: return engine.PushQuery(ctx, nodeID, msg.RequestId, msg.Container, msg.RequestedHeight) - case *p2p.PullQuery: + case *p2ppb.PullQuery: containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("dropping message with invalid field", @@ -694,7 +718,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { return engine.PullQuery(ctx, nodeID, msg.RequestId, containerID, msg.RequestedHeight) - case *p2p.Chits: + case *p2ppb.Chits: preferredID, err := ids.ToID(msg.PreferredId) if err != nil { h.ctx.Log.Debug("message with invalid field", @@ -716,9 +740,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { zap.String("field", "PreferredIDAtHeight"), zap.Error(err), ) - // TODO: Require this field to be populated correctly after v1.11.x - // is activated. - preferredIDAtHeight = preferredID + return engine.QueryFailed(ctx, nodeID, msg.RequestId) } acceptedID, err := ids.ToID(msg.AcceptedId) @@ -744,6 +766,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { if err != nil { return err } + h.p2pTracker.Connected(nodeID, msg.NodeVersion) return engine.Connected(ctx, nodeID, msg.NodeVersion) case *message.ConnectedSubnet: @@ -754,6 +777,7 @@ func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { if err != nil { return err } + h.p2pTracker.Disconnected(nodeID) return engine.Disconnected(ctx, nodeID) default: @@ -768,9 +792,10 @@ func (h *handler) handleAsyncMsg(ctx context.Context, msg Message) { h.asyncMessagePool.Go(func() error { if err := h.executeAsyncMsg(ctx, msg); err != nil { h.StopWithError(ctx, fmt.Errorf( - "%w while processing async message: %s", + "%w while processing async message: %s from %s", err, - msg, + msg.Op(), + msg.NodeID(), )) } return nil @@ -781,36 +806,38 @@ func (h *handler) handleAsyncMsg(ctx context.Context, msg Message) { func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { var ( nodeID = msg.NodeID() - op = msg.Op() + op = msg.Op().String() body = msg.Message() startTime = h.clock.Time() ) if h.ctx.Log.Enabled(logging.Verbo) { h.ctx.Log.Verbo("forwarding async message to consensus", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding async message to consensus", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), + zap.String("messageOp", op), ) } h.resourceTracker.StartProcessing(nodeID, startTime) defer func() { var ( - endTime = h.clock.Time() - messageHistograms = h.metrics.messages[op] - processingTime = endTime.Sub(startTime) + endTime = h.clock.Time() + handlingTime = endTime.Sub(startTime) ) h.resourceTracker.StopProcessing(nodeID, endTime) - // There is no lock grabbed here, so both metrics are identical - messageHistograms.processingTime.Observe(float64(processingTime)) - messageHistograms.msgHandlingTime.Observe(float64(processingTime)) + labels := prometheus.Labels{ + opLabel: op, + } + h.metrics.messages.With(labels).Inc() + h.metrics.messageHandlingTime.With(labels).Add(float64(handlingTime)) + msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling async message", - zap.Stringer("messageOp", op), + zap.String("messageOp", op), ) }() @@ -826,7 +853,7 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { } switch m := body.(type) { - case *p2p.AppRequest: + case *p2ppb.AppRequest: return engine.AppRequest( ctx, nodeID, @@ -835,10 +862,10 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { m.AppBytes, ) - case *p2p.AppResponse: + case *p2ppb.AppResponse: return engine.AppResponse(ctx, nodeID, m.RequestId, m.AppBytes) - case *p2p.AppError: + case *p2ppb.AppError: err := &common.AppError{ Code: m.ErrorCode, Message: m.ErrorMessage, @@ -851,7 +878,7 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { err, ) - case *p2p.AppGossip: + case *p2ppb.AppGossip: return engine.AppGossip(ctx, nodeID, m.AppBytes) case *message.CrossChainAppRequest: @@ -895,7 +922,7 @@ func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { // Any returned error is treated as fatal func (h *handler) handleChanMsg(msg message.InboundMessage) error { var ( - op = msg.Op() + op = msg.Op().String() body = msg.Message() startTime = h.clock.Time() // Check if the chain is in normal operation at the start of message @@ -904,12 +931,12 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { ) if h.ctx.Log.Enabled(logging.Verbo) { h.ctx.Log.Verbo("forwarding chan message to consensus", - zap.Stringer("messageOp", op), + zap.String("messageOp", op), zap.Stringer("message", body), ) } else { h.ctx.Log.Debug("forwarding chan message to consensus", - zap.Stringer("messageOp", op), + zap.String("messageOp", op), ) } h.ctx.Lock.Lock() @@ -918,22 +945,26 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { h.ctx.Lock.Unlock() var ( - endTime = h.clock.Time() - messageHistograms = h.metrics.messages[op] - processingTime = endTime.Sub(startTime) - msgHandlingTime = endTime.Sub(lockAcquiredTime) + endTime = h.clock.Time() + lockingTime = lockAcquiredTime.Sub(startTime) + handlingTime = endTime.Sub(lockAcquiredTime) ) - messageHistograms.processingTime.Observe(float64(processingTime)) - messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) + h.metrics.lockingTime.Add(float64(lockingTime)) + labels := prometheus.Labels{ + opLabel: op, + } + h.metrics.messages.With(labels).Inc() + h.metrics.messageHandlingTime.With(labels).Add(float64(handlingTime)) + msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling chan message", - zap.Stringer("messageOp", op), + zap.String("messageOp", op), ) - if processingTime > syncProcessingTimeWarnLimit && isNormalOp { + if lockingTime+handlingTime > syncProcessingTimeWarnLimit && isNormalOp { h.ctx.Log.Warn("handling chan message took longer than expected", - zap.Duration("processingTime", processingTime), - zap.Duration("msgHandlingTime", msgHandlingTime), - zap.Stringer("messageOp", op), + zap.Duration("lockingTime", lockingTime), + zap.Duration("handlingTime", handlingTime), + zap.String("messageOp", op), zap.Stringer("message", body), ) } @@ -968,10 +999,7 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { } } -func (h *handler) popUnexpiredMsg( - queue MessageQueue, - expired prometheus.Counter, -) (context.Context, Message, bool) { +func (h *handler) popUnexpiredMsg(queue MessageQueue) (context.Context, Message, bool) { for { // Get the next message we should process. If the handler is shutting // down, we may fail to pop a message. @@ -982,16 +1010,19 @@ func (h *handler) popUnexpiredMsg( // If this message's deadline has passed, don't process it. if expiration := msg.Expiration(); h.clock.Time().After(expiration) { + op := msg.Op().String() h.ctx.Log.Debug("dropping message", zap.String("reason", "timeout"), zap.Stringer("nodeID", msg.NodeID()), - zap.Stringer("messageOp", msg.Op()), + zap.String("messageOp", op), ) span := trace.SpanFromContext(ctx) span.AddEvent("dropping message", trace.WithAttributes( attribute.String("reason", "timeout"), )) - expired.Inc() + h.metrics.expired.With(prometheus.Labels{ + opLabel: op, + }).Inc() msg.OnFinishedHandling() continue } diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index dbf378c0d366..cb24040643f3 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -16,17 +16,20 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) @@ -53,6 +56,16 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { time.Second, ) require.NoError(err) + + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handlerIntf, err := New( ctx, vdrs, @@ -63,6 +76,8 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) handler := handlerIntf.(*handler) @@ -90,7 +105,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { }, }) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrap is ongoing }) @@ -101,8 +116,8 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { reqID := uint32(1) chainID := ids.ID{} msg := Message{ - InboundMessage: message.InboundGetAcceptedFrontier(chainID, reqID, 0*time.Second, nodeID, p2p.EngineType_ENGINE_TYPE_SNOWMAN), - EngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + InboundMessage: message.InboundGetAcceptedFrontier(chainID, reqID, 0*time.Second, nodeID), + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } handler.Push(context.Background(), msg) @@ -111,8 +126,8 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { reqID++ msg = Message{ - InboundMessage: message.InboundGetAccepted(chainID, reqID, 1*time.Second, nil, nodeID, p2p.EngineType_ENGINE_TYPE_SNOWMAN), - EngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + InboundMessage: message.InboundGetAccepted(chainID, reqID, 1*time.Second, nil, nodeID), + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } handler.Push(context.Background(), msg) @@ -148,6 +163,16 @@ func TestHandlerClosesOnError(t *testing.T) { time.Second, ) require.NoError(err) + + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handlerIntf, err := New( ctx, vdrs, @@ -158,6 +183,8 @@ func TestHandlerClosesOnError(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) handler := handlerIntf.(*handler) @@ -196,7 +223,7 @@ func TestHandlerClosesOnError(t *testing.T) { // assume bootstrapping is ongoing so that InboundGetAcceptedFrontier // should normally be handled ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, }) @@ -210,8 +237,8 @@ func TestHandlerClosesOnError(t *testing.T) { reqID := uint32(1) deadline := time.Nanosecond msg := Message{ - InboundMessage: message.InboundGetAcceptedFrontier(ids.ID{}, reqID, deadline, nodeID, 0), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + InboundMessage: message.InboundGetAcceptedFrontier(ids.ID{}, reqID, deadline, nodeID), + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } handler.Push(context.Background(), msg) @@ -239,6 +266,16 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { time.Second, ) require.NoError(err) + + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handlerIntf, err := New( ctx, vdrs, @@ -249,6 +286,8 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) handler := handlerIntf.(*handler) @@ -274,7 +313,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { }, }) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrap is ongoing }) @@ -288,8 +327,8 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { chainID := ids.Empty reqID := uint32(1) inInboundMessage := Message{ - InboundMessage: message.InternalGetFailed(nodeID, chainID, reqID, p2p.EngineType_ENGINE_TYPE_SNOWMAN), - EngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + InboundMessage: message.InternalGetFailed(nodeID, chainID, reqID), + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } handler.Push(context.Background(), inInboundMessage) @@ -318,6 +357,16 @@ func TestHandlerDispatchInternal(t *testing.T) { time.Second, ) require.NoError(err) + + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handler, err := New( ctx, vdrs, @@ -328,6 +377,8 @@ func TestHandlerDispatchInternal(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -358,7 +409,7 @@ func TestHandlerDispatchInternal(t *testing.T) { }) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, // assumed bootstrap is done }) @@ -393,6 +444,15 @@ func TestHandlerSubnetConnector(t *testing.T) { nodeID := ids.GenerateTestNodeID() subnetID := ids.GenerateTestID() + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handler, err := New( ctx, vdrs, @@ -403,6 +463,8 @@ func TestHandlerSubnetConnector(t *testing.T) { connector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -426,7 +488,7 @@ func TestHandlerSubnetConnector(t *testing.T) { }, }) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, // assumed bootstrap is done }) @@ -448,7 +510,7 @@ func TestHandlerSubnetConnector(t *testing.T) { subnetInboundMessage := Message{ InboundMessage: message.InternalConnectedSubnet(nodeID, subnetID), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } handler.Push(context.Background(), subnetInboundMessage) } @@ -457,8 +519,8 @@ func TestHandlerSubnetConnector(t *testing.T) { func TestDynamicEngineTypeDispatch(t *testing.T) { tests := []struct { name string - currentEngineType p2p.EngineType - requestedEngineType p2p.EngineType + currentEngineType p2ppb.EngineType + requestedEngineType p2ppb.EngineType setup func( h Handler, b common.BootstrapableEngine, @@ -467,8 +529,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { }{ { name: "current - avalanche, requested - unspecified", - currentEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - requestedEngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + currentEngineType: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, + requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { h.SetEngineManager(&EngineManager{ Avalanche: &Engine{ @@ -482,8 +544,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { }, { name: "current - avalanche, requested - avalanche", - currentEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, - requestedEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + currentEngineType: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, + requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { h.SetEngineManager(&EngineManager{ Avalanche: &Engine{ @@ -497,8 +559,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { }, { name: "current - snowman, requested - unspecified", - currentEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - requestedEngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + currentEngineType: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { h.SetEngineManager(&EngineManager{ Avalanche: nil, @@ -512,8 +574,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { }, { name: "current - snowman, requested - avalanche", - currentEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - requestedEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + currentEngineType: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_AVALANCHE, setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { h.SetEngineManager(&EngineManager{ Avalanche: &Engine{ @@ -531,8 +593,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { }, { name: "current - snowman, requested - snowman", - currentEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, - requestedEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + currentEngineType: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + requestedEngineType: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { h.SetEngineManager(&EngineManager{ Avalanche: nil, @@ -563,6 +625,16 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { time.Second, ) require.NoError(err) + + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handler, err := New( ctx, vdrs, @@ -573,6 +645,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ids.EmptyNodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -635,6 +709,15 @@ func TestHandlerStartError(t *testing.T) { ) require.NoError(err) + peerTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handler, err := New( ctx, validators.NewManager(), @@ -645,6 +728,8 @@ func TestHandlerStartError(t *testing.T) { nil, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -652,7 +737,7 @@ func TestHandlerStartError(t *testing.T) { // handler to shutdown. handler.SetEngineManager(&EngineManager{}) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Initializing, }) handler.Start(context.Background(), false) diff --git a/snow/networking/handler/health_test.go b/snow/networking/handler/health_test.go index adeb3430f277..789d3464187e 100644 --- a/snow/networking/handler/health_test.go +++ b/snow/networking/handler/health_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -20,10 +20,13 @@ import ( "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) @@ -61,7 +64,7 @@ func TestHealthCheckSubnet(t *testing.T) { require.NoError(err) peerTracker := commontracker.NewPeers() - vdrs.RegisterCallbackListener(ctx.SubnetID, peerTracker) + vdrs.RegisterSetCallbackListener(ctx.SubnetID, peerTracker) sb := subnets.New( ctx.NodeID, @@ -69,6 +72,16 @@ func TestHealthCheckSubnet(t *testing.T) { ConsensusParameters: test.consensusParams, }, ) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + handlerIntf, err := New( ctx, vdrs, @@ -79,6 +92,8 @@ func TestHealthCheckSubnet(t *testing.T) { validators.UnhandledSubnetConnector, sb, peerTracker, + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -103,7 +118,7 @@ func TestHealthCheckSubnet(t *testing.T) { }) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, // assumed bootstrap is done }) diff --git a/snow/networking/handler/message_queue.go b/snow/networking/handler/message_queue.go index 58e4f2b3b29e..4d632c62d77e 100644 --- a/snow/networking/handler/message_queue.go +++ b/snow/networking/handler/message_queue.go @@ -7,15 +7,16 @@ import ( "context" "sync" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/buffer" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -59,7 +60,8 @@ type messageQueue struct { clock mockable.Clock metrics messageQueueMetrics - ctx *snow.ConsensusContext + log logging.Logger + subnetID ids.ID // Validator set for the chain associated with this vdrs validators.Manager // Tracks CPU utilization of each node @@ -74,21 +76,23 @@ type messageQueue struct { } func NewMessageQueue( - ctx *snow.ConsensusContext, + log logging.Logger, + subnetID ids.ID, vdrs validators.Manager, cpuTracker tracker.Tracker, metricsNamespace string, - ops []message.Op, + reg prometheus.Registerer, ) (MessageQueue, error) { m := &messageQueue{ - ctx: ctx, + log: log, + subnetID: subnetID, vdrs: vdrs, cpuTracker: cpuTracker, cond: sync.NewCond(&sync.Mutex{}), nodeToUnprocessedMsgs: make(map[ids.NodeID]int), msgAndCtxs: buffer.NewUnboundedDeque[*msgAndContext](1 /*=initSize*/), } - return m, m.metrics.initialize(metricsNamespace, ctx.Registerer, ops) + return m, m.metrics.initialize(metricsNamespace, reg) } func (m *messageQueue) Push(ctx context.Context, msg Message) { @@ -108,9 +112,10 @@ func (m *messageQueue) Push(ctx context.Context, msg Message) { m.nodeToUnprocessedMsgs[msg.NodeID()]++ // Update metrics + m.metrics.count.With(prometheus.Labels{ + opLabel: msg.Op().String(), + }).Inc() m.metrics.nodesWithMessages.Set(float64(len(m.nodeToUnprocessedMsgs))) - m.metrics.len.Inc() - m.metrics.ops[msg.Op()].Inc() // Signal a waiting thread m.cond.Signal() @@ -136,7 +141,7 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { i := 0 for { if i == n { - m.ctx.Log.Debug("canPop is false for all unprocessed messages", + m.log.Debug("canPop is false for all unprocessed messages", zap.Int("numMessages", n), ) } @@ -154,9 +159,10 @@ func (m *messageQueue) Pop() (context.Context, Message, bool) { if m.nodeToUnprocessedMsgs[nodeID] == 0 { delete(m.nodeToUnprocessedMsgs, nodeID) } + m.metrics.count.With(prometheus.Labels{ + opLabel: msg.Op().String(), + }).Dec() m.metrics.nodesWithMessages.Set(float64(len(m.nodeToUnprocessedMsgs))) - m.metrics.len.Dec() - m.metrics.ops[msg.Op()].Dec() return ctx, msg, true } // [msg.nodeID] is causing excessive CPU usage. @@ -186,8 +192,8 @@ func (m *messageQueue) Shutdown() { m.nodeToUnprocessedMsgs = nil // Update metrics + m.metrics.count.Reset() m.metrics.nodesWithMessages.Set(0) - m.metrics.len.Set(0) // Mark the queue as closed m.closed = true @@ -210,21 +216,21 @@ func (m *messageQueue) canPop(msg message.InboundMessage) bool { // the number of nodes with unprocessed messages. baseMaxCPU := 1 / float64(len(m.nodeToUnprocessedMsgs)) nodeID := msg.NodeID() - weight := m.vdrs.GetWeight(m.ctx.SubnetID, nodeID) + weight := m.vdrs.GetWeight(m.subnetID, nodeID) var portionWeight float64 - if totalVdrsWeight, err := m.vdrs.TotalWeight(m.ctx.SubnetID); err != nil { + if totalVdrsWeight, err := m.vdrs.TotalWeight(m.subnetID); err != nil { // The sum of validator weights should never overflow, but if they do, // we treat portionWeight as 0. - m.ctx.Log.Error("failed to get total weight of validators", - zap.Stringer("subnetID", m.ctx.SubnetID), + m.log.Error("failed to get total weight of validators", + zap.Stringer("subnetID", m.subnetID), zap.Error(err), ) } else if totalVdrsWeight == 0 { // The sum of validator weights should never be 0, but handle that case // for completeness here to avoid divide by 0. - m.ctx.Log.Warn("validator set is empty", - zap.Stringer("subnetID", m.ctx.SubnetID), + m.log.Warn("validator set is empty", + zap.Stringer("subnetID", m.subnetID), ) } else { portionWeight = float64(weight) / float64(totalVdrsWeight) diff --git a/snow/networking/handler/message_queue_metrics.go b/snow/networking/handler/message_queue_metrics.go index 74cf4d236656..827edbf5c162 100644 --- a/snow/networking/handler/message_queue_metrics.go +++ b/snow/networking/handler/message_queue_metrics.go @@ -4,18 +4,18 @@ package handler import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" ) +const opLabel = "op" + +var opLabels = []string{opLabel} + type messageQueueMetrics struct { - ops map[message.Op]prometheus.Gauge - len prometheus.Gauge + count *prometheus.GaugeVec nodesWithMessages prometheus.Gauge numExcessiveCPU prometheus.Counter } @@ -23,43 +23,30 @@ type messageQueueMetrics struct { func (m *messageQueueMetrics) initialize( metricsNamespace string, metricsRegisterer prometheus.Registerer, - ops []message.Op, ) error { namespace := metric.AppendNamespace(metricsNamespace, "unprocessed_msgs") - m.len = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "len", - Help: "Messages ready to be processed", - }) + m.count = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "messages in the queue", + }, + opLabels, + ) m.nodesWithMessages = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "nodes", - Help: "Nodes from which there are at least 1 message ready to be processed", + Help: "nodes with at least 1 message ready to be processed", }) m.numExcessiveCPU = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "excessive_cpu", - Help: "Times we deferred handling a message from a node because the node was using excessive CPU", + Help: "times a message has been deferred due to excessive CPU usage", }) - errs := wrappers.Errs{} - m.ops = make(map[message.Op]prometheus.Gauge, len(ops)) - - for _, op := range ops { - opStr := op.String() - opMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: opStr + "_count", - Help: fmt.Sprintf("Number of %s messages in the message queue.", opStr), - }) - m.ops[op] = opMetric - errs.Add(metricsRegisterer.Register(opMetric)) - } - - errs.Add( - metricsRegisterer.Register(m.len), + return utils.Err( + metricsRegisterer.Register(m.count), metricsRegisterer.Register(m.nodesWithMessages), metricsRegisterer.Register(m.numExcessiveCPU), ) - return errs.Err } diff --git a/snow/networking/handler/message_queue_test.go b/snow/networking/handler/message_queue_test.go index 69fbaf531d32..a74ffcfb4469 100644 --- a/snow/networking/handler/message_queue_test.go +++ b/snow/networking/handler/message_queue_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -15,23 +16,27 @@ import ( "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/tracker" - "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" ) -const engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN - func TestQueue(t *testing.T) { ctrl := gomock.NewController(t) require := require.New(t) cpuTracker := tracker.NewMockTracker(ctrl) - snowCtx := snowtest.Context(t, snowtest.CChainID) - ctx := snowtest.ConsensusContext(snowCtx) vdrs := validators.NewManager() vdr1ID, vdr2ID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr1ID, nil, ids.Empty, 1)) - require.NoError(vdrs.AddStaker(ctx.SubnetID, vdr2ID, nil, ids.Empty, 1)) - mIntf, err := NewMessageQueue(ctx, vdrs, cpuTracker, "", message.SynchronousOps) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.AddStaker(constants.PrimaryNetworkID, vdr2ID, nil, ids.Empty, 1)) + mIntf, err := NewMessageQueue( + logging.NoLog{}, + constants.PrimaryNetworkID, + vdrs, + cpuTracker, + "", + prometheus.NewRegistry(), + ) require.NoError(err) u := mIntf.(*messageQueue) currentTime := time.Now() @@ -45,9 +50,8 @@ func TestQueue(t *testing.T) { ids.GenerateTestID(), 0, vdr1ID, - engineType, ), - EngineType: engineType, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, } // Push then pop should work regardless of usage when there are no other @@ -105,9 +109,8 @@ func TestQueue(t *testing.T) { ids.GenerateTestID(), 0, vdr2ID, - engineType, ), - EngineType: engineType, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, } // Push msg2 from vdr2ID @@ -132,12 +135,12 @@ func TestQueue(t *testing.T) { // Non-validators should be able to put messages onto [u] nonVdrNodeID1, nonVdrNodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() msg3 := Message{ - InboundMessage: message.InboundPullQuery(ids.Empty, 0, 0, ids.Empty, 0, nonVdrNodeID1, engineType), - EngineType: engineType, + InboundMessage: message.InboundPullQuery(ids.Empty, 0, 0, ids.Empty, 0, nonVdrNodeID1), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, } msg4 := Message{ - InboundMessage: message.InboundPushQuery(ids.Empty, 0, 0, nil, 0, nonVdrNodeID2, engineType), - EngineType: engineType, + InboundMessage: message.InboundPushQuery(ids.Empty, 0, 0, nil, 0, nonVdrNodeID2), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, } u.Push(context.Background(), msg3) u.Push(context.Background(), msg4) diff --git a/snow/networking/handler/metrics.go b/snow/networking/handler/metrics.go index efb6cf558d1e..f3a21149f26c 100644 --- a/snow/networking/handler/metrics.go +++ b/snow/networking/handler/metrics.go @@ -4,69 +4,50 @@ package handler import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) type metrics struct { - expired prometheus.Counter - asyncExpired prometheus.Counter - messages map[message.Op]*messageProcessing -} - -type messageProcessing struct { - processingTime metric.Averager - msgHandlingTime metric.Averager + expired *prometheus.CounterVec // op + messages *prometheus.CounterVec // op + lockingTime prometheus.Gauge + messageHandlingTime *prometheus.GaugeVec // op } -func newMetrics(namespace string, reg prometheus.Registerer) (*metrics, error) { - errs := wrappers.Errs{} - - expired := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "expired", - Help: "Incoming sync messages dropped because the message deadline expired", - }) - asyncExpired := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "async_expired", - Help: "Incoming async messages dropped because the message deadline expired", - }) - errs.Add( - reg.Register(expired), - reg.Register(asyncExpired), - ) - - messages := make(map[message.Op]*messageProcessing, len(message.ConsensusOps)) - for _, op := range message.ConsensusOps { - opStr := op.String() - messageProcessing := &messageProcessing{ - processingTime: metric.NewAveragerWithErrs( - namespace, - opStr, - "time (in ns) spent handling a "+opStr, - reg, - &errs, - ), - msgHandlingTime: metric.NewAveragerWithErrs( - namespace, - opStr+"_msg_handling", - fmt.Sprintf("time (in ns) spent handling a %s after grabbing the lock", opStr), - reg, - &errs, - ), - } - messages[op] = messageProcessing +func newMetrics(reg prometheus.Registerer) (*metrics, error) { + m := &metrics{ + expired: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "expired", + Help: "messages dropped because the deadline expired", + }, + opLabels, + ), + messages: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "messages", + Help: "messages handled", + }, + opLabels, + ), + messageHandlingTime: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "message_handling_time", + Help: "time spent handling messages", + }, + opLabels, + ), + lockingTime: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "locking_time", + Help: "time spent acquiring the context lock", + }), } - - return &metrics{ - expired: expired, - asyncExpired: asyncExpired, - messages: messages, - }, errs.Err + return m, utils.Err( + reg.Register(m.expired), + reg.Register(m.messages), + reg.Register(m.messageHandlingTime), + reg.Register(m.lockingTime), + ) } diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index 9c2425883b14..27bf891ab4f9 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -21,7 +21,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/handler" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -83,7 +83,7 @@ type ChainRouter struct { // Parameters for doing health checks healthConfig HealthConfig // aggregator of requests based on their time - timedRequests linkedhashmap.LinkedHashmap[ids.RequestID, requestEntry] + timedRequests *linked.Hashmap[ids.RequestID, requestEntry] } // Initialize the router. @@ -101,8 +101,7 @@ func (cr *ChainRouter) Initialize( trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, - metricsNamespace string, - metricsRegisterer prometheus.Registerer, + reg prometheus.Registerer, ) error { cr.log = log cr.chainHandlers = make(map[ids.ID]handler.Handler) @@ -112,7 +111,7 @@ func (cr *ChainRouter) Initialize( cr.criticalChains = criticalChains cr.sybilProtectionEnabled = sybilProtectionEnabled cr.onFatal = onFatal - cr.timedRequests = linkedhashmap.New[ids.RequestID, requestEntry]() + cr.timedRequests = linked.NewHashmap[ids.RequestID, requestEntry]() cr.peers = make(map[ids.NodeID]*peer) cr.healthConfig = healthConfig @@ -126,7 +125,7 @@ func (cr *ChainRouter) Initialize( cr.peers[nodeID] = myself // Register metrics - rMetrics, err := newRouterMetrics(metricsNamespace, metricsRegisterer) + rMetrics, err := newRouterMetrics(reg) if err != nil { return err } @@ -270,11 +269,7 @@ func (cr *ChainRouter) HandleInbound(ctx context.Context, msg message.InboundMes } chainCtx := chain.Context() - - // TODO: [requestID] can overflow, which means a timeout on the request - // before the overflow may not be handled properly. - if notRequested := message.UnrequestedOps.Contains(op); notRequested || - (op == message.PutOp && requestID == constants.GossipMsgRequestID) { + if message.UnrequestedOps.Contains(op) { if chainCtx.Executing.Get() { cr.log.Debug("dropping message and skipping queue", zap.String("reason", "the chain is currently executing"), diff --git a/snow/networking/router/chain_router_metrics.go b/snow/networking/router/chain_router_metrics.go index bc8f26223586..8855acc5ccdf 100644 --- a/snow/networking/router/chain_router_metrics.go +++ b/snow/networking/router/chain_router_metrics.go @@ -16,27 +16,24 @@ type routerMetrics struct { droppedRequests prometheus.Counter } -func newRouterMetrics(namespace string, registerer prometheus.Registerer) (*routerMetrics, error) { +func newRouterMetrics(registerer prometheus.Registerer) (*routerMetrics, error) { rMetrics := &routerMetrics{} rMetrics.outstandingRequests = prometheus.NewGauge( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "outstanding", - Help: "Number of outstanding requests (all types)", + Name: "outstanding", + Help: "Number of outstanding requests (all types)", }, ) rMetrics.longestRunningRequest = prometheus.NewGauge( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "longest_running", - Help: "Time (in ns) the longest request took", + Name: "longest_running", + Help: "Time (in ns) the longest request took", }, ) rMetrics.droppedRequests = prometheus.NewCounter( prometheus.CounterOpts{ - Namespace: namespace, - Name: "dropped", - Help: "Number of dropped requests (all types)", + Name: "dropped", + Help: "Number of dropped requests (all types)", }, ) diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index e9617d7ae687..19b889cd2d94 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/benchlist" @@ -33,11 +33,12 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) const ( - engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE + engineType = p2ppb.EngineType_ENGINE_TYPE_AVALANCHE testThreadPoolSize = 2 ) @@ -60,7 +61,7 @@ func TestShutdown(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -79,7 +80,6 @@ func TestShutdown(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -92,6 +92,16 @@ func TestShutdown(t *testing.T) { time.Second, ) require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( chainCtx, vdrs, @@ -102,6 +112,8 @@ func TestShutdown(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(chainCtx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -188,7 +200,6 @@ func TestShutdownTimesOut(t *testing.T) { vdrs := validators.NewManager() require.NoError(vdrs.AddStaker(ctx.SubnetID, ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() - metrics := prometheus.NewRegistry() // Ensure that the Ancestors request does not timeout tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -199,8 +210,8 @@ func TestShutdownTimesOut(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist, - "", - metrics, + prometheus.NewRegistry(), + prometheus.NewRegistry(), ) require.NoError(err) @@ -219,8 +230,7 @@ func TestShutdownTimesOut(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", - metrics, + prometheus.NewRegistry(), )) resourceTracker, err := tracker.NewResourceTracker( @@ -230,6 +240,16 @@ func TestShutdownTimesOut(t *testing.T) { time.Second, ) require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx, vdrs, @@ -240,6 +260,8 @@ func TestShutdownTimesOut(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -304,8 +326,8 @@ func TestShutdownTimesOut(t *testing.T) { go func() { chainID := ids.ID{} msg := handler.Message{ - InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), 0, nodeID, engineType), - EngineType: engineType, + InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), 0, nodeID), + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } h.Push(context.Background(), msg) @@ -337,7 +359,7 @@ func TestRouterTimeout(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -357,7 +379,6 @@ func TestRouterTimeout(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -390,6 +411,15 @@ func TestRouterTimeout(t *testing.T) { ) require.NoError(err) + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx, vdrs, @@ -400,6 +430,8 @@ func TestRouterTimeout(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -465,7 +497,7 @@ func TestRouterTimeout(t *testing.T) { return nil } ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrapping is ongoing }) @@ -504,7 +536,7 @@ func TestRouterTimeout(t *testing.T) { ctx.ChainID, requestID, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -523,7 +555,7 @@ func TestRouterTimeout(t *testing.T) { ctx.ChainID, requestID, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -541,9 +573,8 @@ func TestRouterTimeout(t *testing.T) { nodeID, ctx.ChainID, requestID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -561,9 +592,8 @@ func TestRouterTimeout(t *testing.T) { nodeID, ctx.ChainID, requestID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -581,9 +611,9 @@ func TestRouterTimeout(t *testing.T) { nodeID, ctx.ChainID, requestID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -601,9 +631,8 @@ func TestRouterTimeout(t *testing.T) { nodeID, ctx.ChainID, requestID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -621,9 +650,8 @@ func TestRouterTimeout(t *testing.T) { nodeID, ctx.ChainID, requestID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -644,7 +672,7 @@ func TestRouterTimeout(t *testing.T) { common.ErrTimeout.Code, common.ErrTimeout.Message, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -666,7 +694,7 @@ func TestRouterTimeout(t *testing.T) { common.ErrTimeout.Code, common.ErrTimeout.Message, ), - p2p.EngineType_ENGINE_TYPE_SNOWMAN, + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, ) } @@ -700,7 +728,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -720,7 +748,6 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -754,7 +781,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { ctx.ChainID, requestID, ), - p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, ) msg := message.InboundStateSummaryFrontier( ctx.ChainID, @@ -764,7 +791,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { ) h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { - require.Equal(p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, msg.EngineType) + require.Equal(p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, msg.EngineType) }) chainRouter.HandleInbound(context.Background(), msg) } @@ -799,8 +826,6 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { } { - engineType := p2p.EngineType(100) - requestID++ msg := message.InboundPushQuery( ctx.ChainID, @@ -809,16 +834,17 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { nil, 0, nodeID, - engineType, ) h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { - require.Equal(engineType, msg.EngineType) + require.Equal(p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, msg.EngineType) }) chainRouter.HandleInbound(context.Background(), msg) } + chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() } func TestRouterClearTimeouts(t *testing.T) { @@ -846,19 +872,19 @@ func TestRouterClearTimeouts(t *testing.T) { name: "AcceptedFrontierOp", responseOp: message.AcceptedFrontierOp, responseMsg: message.InboundAcceptedFrontier(ids.Empty, requestID, ids.GenerateTestID(), ids.EmptyNodeID), - timeoutMsg: message.InternalGetAcceptedFrontierFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + timeoutMsg: message.InternalGetAcceptedFrontierFailed(ids.EmptyNodeID, ids.Empty, requestID), }, { name: "Accepted", responseOp: message.AcceptedOp, responseMsg: message.InboundAccepted(ids.Empty, requestID, []ids.ID{ids.GenerateTestID()}, ids.EmptyNodeID), - timeoutMsg: message.InternalGetAcceptedFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + timeoutMsg: message.InternalGetAcceptedFailed(ids.EmptyNodeID, ids.Empty, requestID), }, { name: "Chits", responseOp: message.ChitsOp, responseMsg: message.InboundChits(ids.Empty, requestID, ids.GenerateTestID(), ids.GenerateTestID(), ids.GenerateTestID(), ids.EmptyNodeID), - timeoutMsg: message.InternalQueryFailed(ids.EmptyNodeID, ids.Empty, requestID, engineType), + timeoutMsg: message.InternalQueryFailed(ids.EmptyNodeID, ids.Empty, requestID), }, { name: "AppResponse", @@ -904,7 +930,10 @@ func TestRouterClearTimeouts(t *testing.T) { ) chainRouter.HandleInbound(context.Background(), tt.responseMsg) + + chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() }) } } @@ -923,7 +952,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -943,7 +972,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -965,6 +993,16 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { time.Second, ) require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx, vdrs, @@ -975,6 +1013,8 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { validators.UnhandledSubnetConnector, sb, commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -993,7 +1033,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { return nil } ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrapping is ongoing }) @@ -1037,7 +1077,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { dummyContainerID, 0, nID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) chainRouter.HandleInbound(context.Background(), inMsg) @@ -1053,7 +1092,6 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { dummyContainerID, 0, vID, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) wg.Add(1) chainRouter.HandleInbound(context.Background(), inMsg) @@ -1075,7 +1113,7 @@ func TestConnectedSubnet(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "timeoutManager", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -1100,7 +1138,6 @@ func TestConnectedSubnet(t *testing.T) { trackedSubnets, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -1115,15 +1152,15 @@ func TestConnectedSubnet(t *testing.T) { myConnectedMsg := handler.Message{ InboundMessage: message.InternalConnected(myNodeID, version.CurrentApp), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } mySubnetConnectedMsg0 := handler.Message{ InboundMessage: message.InternalConnectedSubnet(myNodeID, subnetID0), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } mySubnetConnectedMsg1 := handler.Message{ InboundMessage: message.InternalConnectedSubnet(myNodeID, subnetID1), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } platformHandler := handler.NewMockHandler(ctrl) @@ -1137,28 +1174,28 @@ func TestConnectedSubnet(t *testing.T) { peerConnectedMsg := handler.Message{ InboundMessage: message.InternalConnected(peerNodeID, version.CurrentApp), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } platformHandler.EXPECT().Push(gomock.Any(), peerConnectedMsg).Times(1) chainRouter.Connected(peerNodeID, version.CurrentApp, constants.PrimaryNetworkID) peerSubnetConnectedMsg0 := handler.Message{ InboundMessage: message.InternalConnectedSubnet(peerNodeID, subnetID0), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } platformHandler.EXPECT().Push(gomock.Any(), peerSubnetConnectedMsg0).Times(1) chainRouter.Connected(peerNodeID, version.CurrentApp, subnetID0) myDisconnectedMsg := handler.Message{ InboundMessage: message.InternalDisconnected(myNodeID), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } platformHandler.EXPECT().Push(gomock.Any(), myDisconnectedMsg).Times(1) chainRouter.Benched(constants.PlatformChainID, myNodeID) peerDisconnectedMsg := handler.Message{ InboundMessage: message.InternalDisconnected(peerNodeID), - EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } platformHandler.EXPECT().Push(gomock.Any(), peerDisconnectedMsg).Times(1) chainRouter.Benched(constants.PlatformChainID, peerNodeID) @@ -1192,7 +1229,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -1212,7 +1249,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) defer chainRouter.Shutdown(context.Background()) @@ -1239,6 +1275,15 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { ) require.NoError(err) + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx, vdrs, @@ -1249,6 +1294,8 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { validators.UnhandledSubnetConnector, sb, commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -1305,7 +1352,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { dummyContainerID, 0, nID, - engineType, ) chainRouter.HandleInbound(context.Background(), inMsg) @@ -1321,7 +1367,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { dummyContainerID, 0, allowedID, - engineType, ) wg.Add(1) chainRouter.HandleInbound(context.Background(), inMsg) @@ -1339,7 +1384,6 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { dummyContainerID, 0, vID, - engineType, ) wg.Add(1) chainRouter.HandleInbound(context.Background(), inMsg) @@ -1395,7 +1439,9 @@ func TestAppRequest(t *testing.T) { if tt.inboundMsg == nil || tt.inboundMsg.Op() == message.AppErrorOp { engine.AppRequestFailedF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { defer wg.Done() + chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() require.Equal(ids.EmptyNodeID, nodeID) require.Equal(wantRequestID, requestID) @@ -1407,7 +1453,9 @@ func TestAppRequest(t *testing.T) { } else if tt.inboundMsg.Op() == message.AppResponseOp { engine.AppResponseF = func(_ context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error { defer wg.Done() + chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() require.Equal(ids.EmptyNodeID, nodeID) require.Equal(wantRequestID, requestID) @@ -1419,7 +1467,9 @@ func TestAppRequest(t *testing.T) { ctx := context.Background() chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) + chainRouter.lock.Lock() require.Equal(1, chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() if tt.inboundMsg != nil { chainRouter.HandleInbound(ctx, tt.inboundMsg) @@ -1477,7 +1527,9 @@ func TestCrossChainAppRequest(t *testing.T) { if tt.inboundMsg == nil || tt.inboundMsg.Op() == message.CrossChainAppErrorOp { engine.CrossChainAppRequestFailedF = func(_ context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { defer wg.Done() + chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() require.Equal(ids.Empty, chainID) require.Equal(wantRequestID, requestID) @@ -1489,7 +1541,9 @@ func TestCrossChainAppRequest(t *testing.T) { } else if tt.inboundMsg.Op() == message.CrossChainAppResponseOp { engine.CrossChainAppResponseF = func(_ context.Context, chainID ids.ID, requestID uint32, msg []byte) error { defer wg.Done() + chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() require.Equal(ids.Empty, chainID) require.Equal(wantRequestID, requestID) @@ -1501,7 +1555,9 @@ func TestCrossChainAppRequest(t *testing.T) { ctx := context.Background() chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) + chainRouter.lock.Lock() require.Equal(1, chainRouter.timedRequests.Len()) + chainRouter.lock.Unlock() if tt.inboundMsg != nil { chainRouter.HandleInbound(ctx, tt.inboundMsg) @@ -1523,7 +1579,7 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { TimeoutHalflife: 5 * time.Minute, }, benchlist.NewNoBenchlist(), - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(t, err) @@ -1542,7 +1598,6 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { set.Set[ids.ID]{}, nil, HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -1559,6 +1614,16 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { time.Second, ) require.NoError(t, err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(t, err) + h, err := handler.New( ctx, vdrs, @@ -1569,6 +1634,8 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(t, err) @@ -1600,7 +1667,7 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *common.EngineTest) { }, }) ctx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, // assumed bootstrapping is done }) diff --git a/snow/networking/router/mock_router.go b/snow/networking/router/mock_router.go index c9146a777138..548b32110775 100644 --- a/snow/networking/router/mock_router.go +++ b/snow/networking/router/mock_router.go @@ -125,17 +125,17 @@ func (mr *MockRouterMockRecorder) HealthCheck(arg0 any) *gomock.Call { } // Initialize mocks base method. -func (m *MockRouter) Initialize(nodeID ids.NodeID, log logging.Logger, timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(int), healthConfig HealthConfig, metricsNamespace string, metricsRegisterer prometheus.Registerer) error { +func (m *MockRouter) Initialize(nodeID ids.NodeID, log logging.Logger, timeouts timeout.Manager, shutdownTimeout time.Duration, criticalChains set.Set[ids.ID], sybilProtectionEnabled bool, trackedSubnets set.Set[ids.ID], onFatal func(int), healthConfig HealthConfig, reg prometheus.Registerer) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) + ret := m.ctrl.Call(m, "Initialize", nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, reg) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockRouterMockRecorder) Initialize(nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer any) *gomock.Call { +func (mr *MockRouterMockRecorder) Initialize(nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, reg any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, metricsNamespace, metricsRegisterer) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), nodeID, log, timeouts, shutdownTimeout, criticalChains, sybilProtectionEnabled, trackedSubnets, onFatal, healthConfig, reg) } // RegisterRequest mocks base method. diff --git a/snow/networking/router/router.go b/snow/networking/router/router.go index 4df5614c25fb..ef4765cb0965 100644 --- a/snow/networking/router/router.go +++ b/snow/networking/router/router.go @@ -36,8 +36,7 @@ type Router interface { trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, - metricsNamespace string, - metricsRegisterer prometheus.Registerer, + reg prometheus.Registerer, ) error Shutdown(context.Context) AddChain(ctx context.Context, chain handler.Handler) diff --git a/snow/networking/router/traced_router.go b/snow/networking/router/traced_router.go index 4c52bce0827a..cbd2b6ed1205 100644 --- a/snow/networking/router/traced_router.go +++ b/snow/networking/router/traced_router.go @@ -47,8 +47,7 @@ func (r *tracedRouter) Initialize( trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, - metricsNamespace string, - metricsRegisterer prometheus.Registerer, + reg prometheus.Registerer, ) error { return r.router.Initialize( nodeID, @@ -60,8 +59,7 @@ func (r *tracedRouter) Initialize( trackedSubnets, onFatal, healthConfig, - metricsNamespace, - metricsRegisterer, + reg, ) } diff --git a/snow/networking/sender/external_sender.go b/snow/networking/sender/external_sender.go index 7d279889e3af..f8f90a2cce48 100644 --- a/snow/networking/sender/external_sender.go +++ b/snow/networking/sender/external_sender.go @@ -6,6 +6,7 @@ package sender import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/set" ) @@ -13,22 +14,10 @@ import ( // ExternalSender sends consensus messages to other validators // Right now this is implemented in the networking package type ExternalSender interface { - // Send a message to a specific set of nodes Send( msg message.OutboundMessage, - nodeIDs set.Set[ids.NodeID], + config common.SendConfig, subnetID ids.ID, allower subnets.Allower, ) set.Set[ids.NodeID] - - // Send a message to a random group of nodes in a subnet. - // Nodes are sampled based on their validator status. - Gossip( - msg message.OutboundMessage, - subnetID ids.ID, - numValidatorsToSend int, - numNonValidatorsToSend int, - numPeersToSend int, - allower subnets.Allower, - ) set.Set[ids.NodeID] } diff --git a/snow/networking/sender/mock_external_sender.go b/snow/networking/sender/mock_external_sender.go index 9dc0a50d1af9..420f3b79fc4b 100644 --- a/snow/networking/sender/mock_external_sender.go +++ b/snow/networking/sender/mock_external_sender.go @@ -14,6 +14,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" message "github.com/ava-labs/avalanchego/message" + common "github.com/ava-labs/avalanchego/snow/engine/common" subnets "github.com/ava-labs/avalanchego/subnets" set "github.com/ava-labs/avalanchego/utils/set" gomock "go.uber.org/mock/gomock" @@ -42,30 +43,16 @@ func (m *MockExternalSender) EXPECT() *MockExternalSenderMockRecorder { return m.recorder } -// Gossip mocks base method. -func (m *MockExternalSender) Gossip(msg message.OutboundMessage, subnetID ids.ID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend int, allower subnets.Allower) set.Set[ids.NodeID] { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Gossip", msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) - ret0, _ := ret[0].(set.Set[ids.NodeID]) - return ret0 -} - -// Gossip indicates an expected call of Gossip. -func (mr *MockExternalSenderMockRecorder) Gossip(msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gossip", reflect.TypeOf((*MockExternalSender)(nil).Gossip), msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) -} - // Send mocks base method. -func (m *MockExternalSender) Send(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] { +func (m *MockExternalSender) Send(msg message.OutboundMessage, config common.SendConfig, subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", msg, nodeIDs, subnetID, allower) + ret := m.ctrl.Call(m, "Send", msg, config, subnetID, allower) ret0, _ := ret[0].(set.Set[ids.NodeID]) return ret0 } // Send indicates an expected call of Send. -func (mr *MockExternalSenderMockRecorder) Send(msg, nodeIDs, subnetID, allower any) *gomock.Call { +func (mr *MockExternalSenderMockRecorder) Send(msg, config, subnetID, allower any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), msg, nodeIDs, subnetID, allower) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), msg, config, subnetID, allower) } diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go index a7090e5d0b64..e4e36bd3ebbb 100644 --- a/snow/networking/sender/sender.go +++ b/snow/networking/sender/sender.go @@ -5,7 +5,6 @@ package sender import ( "context" - "fmt" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" @@ -18,12 +17,17 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/subnets" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ) -var _ common.Sender = (*sender)(nil) +const opLabel = "op" + +var ( + _ common.Sender = (*sender)(nil) + + opLabels = []string{opLabel} +) // sender is a wrapper around an ExternalSender. // Messages to this node are put directly into [router] rather than @@ -38,11 +42,11 @@ type sender struct { router router.Router timeouts timeout.Manager - // Request message type --> Counts how many of that request - // have failed because the node was benched - failedDueToBench map[message.Op]prometheus.Counter - engineType p2p.EngineType - subnet subnets.Subnet + // Counts how many request have failed because the node was benched + failedDueToBench *prometheus.CounterVec // op + + engineType p2p.EngineType + subnet subnets.Subnet } func New( @@ -53,42 +57,25 @@ func New( timeouts timeout.Manager, engineType p2p.EngineType, subnet subnets.Subnet, + reg prometheus.Registerer, ) (common.Sender, error) { s := &sender{ - ctx: ctx, - msgCreator: msgCreator, - sender: externalSender, - router: router, - timeouts: timeouts, - failedDueToBench: make(map[message.Op]prometheus.Counter, len(message.ConsensusRequestOps)), - engineType: engineType, - subnet: subnet, - } - - for _, op := range message.ConsensusRequestOps { - counter := prometheus.NewCounter( + ctx: ctx, + msgCreator: msgCreator, + sender: externalSender, + router: router, + timeouts: timeouts, + failedDueToBench: prometheus.NewCounterVec( prometheus.CounterOpts{ - Name: fmt.Sprintf("%s_failed_benched", op), - Help: fmt.Sprintf("# of times a %s request was not sent because the node was benched", op), + Name: "failed_benched", + Help: "requests dropped because a node was benched", }, - ) - - switch engineType { - case p2p.EngineType_ENGINE_TYPE_SNOWMAN: - if err := ctx.Registerer.Register(counter); err != nil { - return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) - } - case p2p.EngineType_ENGINE_TYPE_AVALANCHE: - if err := ctx.AvalancheRegisterer.Register(counter); err != nil { - return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) - } - default: - return nil, fmt.Errorf("unknown engine type %s", engineType) - } - - s.failedDueToBench[op] = counter + opLabels, + ), + engineType: engineType, + subnet: subnet, } - return s, nil + return s, reg.Register(s.failedDueToBench) } func (s *sender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { @@ -146,7 +133,9 @@ func (s *sender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Se if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -208,7 +197,9 @@ func (s *sender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -289,7 +280,9 @@ func (s *sender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Se if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -351,7 +344,9 @@ func (s *sender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -383,7 +378,6 @@ func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[id nodeID, s.ctx.ChainID, requestID, - s.engineType, ) s.router.RegisterRequest( ctx, @@ -393,7 +387,7 @@ func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[id requestID, message.AcceptedFrontierOp, inMsg, - s.engineType, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, ) } @@ -406,7 +400,6 @@ func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[id requestID, deadline, s.ctx.NodeID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -416,7 +409,6 @@ func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[id s.ctx.ChainID, requestID, deadline, - s.engineType, ) // Send the message over the network. @@ -424,7 +416,9 @@ func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[id if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -486,7 +480,9 @@ func (s *sender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -518,7 +514,6 @@ func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID nodeID, s.ctx.ChainID, requestID, - s.engineType, ) s.router.RegisterRequest( ctx, @@ -528,7 +523,7 @@ func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID requestID, message.AcceptedOp, inMsg, - s.engineType, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, ) } @@ -542,7 +537,6 @@ func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID deadline, containerIDs, s.ctx.NodeID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -553,7 +547,6 @@ func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID requestID, deadline, containerIDs, - s.engineType, ) // Send the message over the network. @@ -561,7 +554,9 @@ func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -619,7 +614,9 @@ func (s *sender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -656,16 +653,19 @@ func (s *sender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, reques s.engineType, ) - // Sending a GetAncestors to myself always fails. + // Sending a GetAncestors to myself will fail. To avoid constantly sending + // myself requests when not connected to any peers, we rely on the timeout + // firing to deliver the GetAncestorsFailed message. if nodeID == s.ctx.NodeID { - go s.router.HandleInbound(ctx, inMsg) return } // [nodeID] may be benched. That is, they've been unresponsive so we don't // even bother sending requests to them. We just have them immediately fail. if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.GetAncestorsOp].Inc() // update metric + s.failedDueToBench.With(prometheus.Labels{ + opLabel: message.GetAncestorsOp.String(), + }).Inc() s.timeouts.RegisterRequestToUnreachableValidator() go s.router.HandleInbound(ctx, inMsg) return @@ -699,7 +699,9 @@ func (s *sender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, reques nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -735,7 +737,9 @@ func (s *sender) SendAncestors(_ context.Context, nodeID ids.NodeID, requestID u nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -759,7 +763,6 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 nodeID, s.ctx.ChainID, requestID, - s.engineType, ) s.router.RegisterRequest( ctx, @@ -769,7 +772,7 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 requestID, message.PutOp, inMsg, - s.engineType, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, ) // Sending a Get to myself always fails. @@ -781,7 +784,9 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 // [nodeID] may be benched. That is, they've been unresponsive so we don't // even bother sending requests to them. We just have them immediately fail. if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.GetOp].Inc() // update metric + s.failedDueToBench.With(prometheus.Labels{ + opLabel: message.GetOp.String(), + }).Inc() s.timeouts.RegisterRequestToUnreachableValidator() go s.router.HandleInbound(ctx, inMsg) return @@ -796,7 +801,6 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 requestID, deadline, containerID, - s.engineType, ) // Send the message over the network. @@ -805,7 +809,9 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 nodeIDs := set.Of(nodeID) sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -836,7 +842,7 @@ func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint3 func (s *sender) SendPut(_ context.Context, nodeID ids.NodeID, requestID uint32, container []byte) { // Create the outbound message. - outMsg, err := s.msgCreator.Put(s.ctx.ChainID, requestID, container, s.engineType) + outMsg, err := s.msgCreator.Put(s.ctx.ChainID, requestID, container) if err != nil { s.ctx.Log.Error("failed to build message", zap.Stringer("messageOp", message.PutOp), @@ -852,7 +858,9 @@ func (s *sender) SendPut(_ context.Context, nodeID ids.NodeID, requestID uint32, nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -895,7 +903,6 @@ func (s *sender) SendPushQuery( nodeID, s.ctx.ChainID, requestID, - s.engineType, ) s.router.RegisterRequest( ctx, @@ -905,7 +912,7 @@ func (s *sender) SendPushQuery( requestID, message.ChitsOp, inMsg, - s.engineType, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, ) } @@ -924,7 +931,6 @@ func (s *sender) SendPushQuery( container, requestedHeight, s.ctx.NodeID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -934,7 +940,9 @@ func (s *sender) SendPushQuery( // immediately fail. for nodeID := range nodeIDs { if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.PushQueryOp].Inc() // update metric + s.failedDueToBench.With(prometheus.Labels{ + opLabel: message.PushQueryOp.String(), + }).Inc() nodeIDs.Remove(nodeID) s.timeouts.RegisterRequestToUnreachableValidator() @@ -944,7 +952,6 @@ func (s *sender) SendPushQuery( nodeID, s.ctx.ChainID, requestID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -957,7 +964,6 @@ func (s *sender) SendPushQuery( deadline, container, requestedHeight, - s.engineType, ) // Send the message over the network. @@ -966,7 +972,9 @@ func (s *sender) SendPushQuery( if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -1008,7 +1016,6 @@ func (s *sender) SendPushQuery( nodeID, s.ctx.ChainID, requestID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1034,7 +1041,6 @@ func (s *sender) SendPullQuery( nodeID, s.ctx.ChainID, requestID, - s.engineType, ) s.router.RegisterRequest( ctx, @@ -1044,7 +1050,7 @@ func (s *sender) SendPullQuery( requestID, message.ChitsOp, inMsg, - s.engineType, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, ) } @@ -1063,7 +1069,6 @@ func (s *sender) SendPullQuery( containerID, requestedHeight, s.ctx.NodeID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1073,7 +1078,9 @@ func (s *sender) SendPullQuery( // have them immediately fail. for nodeID := range nodeIDs { if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.PullQueryOp].Inc() // update metric + s.failedDueToBench.With(prometheus.Labels{ + opLabel: message.PullQueryOp.String(), + }).Inc() nodeIDs.Remove(nodeID) s.timeouts.RegisterRequestToUnreachableValidator() // Immediately register a failure. Do so asynchronously to avoid @@ -1082,7 +1089,6 @@ func (s *sender) SendPullQuery( nodeID, s.ctx.ChainID, requestID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1095,7 +1101,6 @@ func (s *sender) SendPullQuery( deadline, containerID, requestedHeight, - s.engineType, ) // Send the message over the network. @@ -1103,7 +1108,9 @@ func (s *sender) SendPullQuery( if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -1136,7 +1143,6 @@ func (s *sender) SendPullQuery( nodeID, s.ctx.ChainID, requestID, - s.engineType, ) go s.router.HandleInbound(ctx, inMsg) } @@ -1187,7 +1193,9 @@ func (s *sender) SendChits( nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -1319,7 +1327,9 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] // have them immediately fail. for nodeID := range nodeIDs { if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.AppRequestOp].Inc() // update metric + s.failedDueToBench.With(prometheus.Labels{ + opLabel: message.AppRequestOp.String(), + }).Inc() nodeIDs.Remove(nodeID) s.timeouts.RegisterRequestToUnreachableValidator() @@ -1350,7 +1360,9 @@ func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID] if err == nil { sentTo = s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -1433,7 +1445,9 @@ func (s *sender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, request nodeIDs := set.Of(nodeID) sentTo := s.sender.Send( outMsg, - nodeIDs, + common.SendConfig{ + NodeIDs: nodeIDs, + }, s.ctx.SubnetID, s.subnet, ) @@ -1496,7 +1510,9 @@ func (s *sender) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID // Send the message over the network. sentTo := s.sender.Send( outMsg, - set.Of(nodeID), + common.SendConfig{ + NodeIDs: set.Of(nodeID), + }, s.ctx.SubnetID, s.subnet, ) @@ -1524,7 +1540,11 @@ func (s *sender) SendAppError(ctx context.Context, nodeID ids.NodeID, requestID return nil } -func (s *sender) SendAppGossipSpecific(_ context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { +func (s *sender) SendAppGossip( + _ context.Context, + config common.SendConfig, + appGossipBytes []byte, +) error { // Create the outbound message. outMsg, err := s.msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) if err != nil { @@ -1537,62 +1557,12 @@ func (s *sender) SendAppGossipSpecific(_ context.Context, nodeIDs set.Set[ids.No return nil } - // Send the message over the network. sentTo := s.sender.Send( outMsg, - nodeIDs, + config, s.ctx.SubnetID, s.subnet, ) - if sentTo.Len() == 0 { - for nodeID := range nodeIDs { - if !sentTo.Contains(nodeID) { - if s.ctx.Log.Enabled(logging.Verbo) { - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("payload", appGossipBytes), - ) - } else { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", s.ctx.ChainID), - ) - } - } - } - } - return nil -} - -func (s *sender) SendAppGossip(_ context.Context, appGossipBytes []byte) error { - // Create the outbound message. - outMsg, err := s.msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) - if err != nil { - s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AppGossipOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("payload", appGossipBytes), - zap.Error(err), - ) - return nil - } - - gossipConfig := s.subnet.Config().GossipConfig - validatorSize := int(gossipConfig.AppGossipValidatorSize) - nonValidatorSize := int(gossipConfig.AppGossipNonValidatorSize) - peerSize := int(gossipConfig.AppGossipPeerSize) - - sentTo := s.sender.Gossip( - outMsg, - s.ctx.SubnetID, - validatorSize, - nonValidatorSize, - peerSize, - s.subnet, - ) if sentTo.Len() == 0 { if s.ctx.Log.Enabled(logging.Verbo) { s.ctx.Log.Verbo("failed to send message", @@ -1609,96 +1579,3 @@ func (s *sender) SendAppGossip(_ context.Context, appGossipBytes []byte) error { } return nil } - -func (s *sender) SendGossip(_ context.Context, container []byte) { - // Create the outbound message. - outMsg, err := s.msgCreator.Put( - s.ctx.ChainID, - constants.GossipMsgRequestID, - container, - s.engineType, - ) - if err != nil { - s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("container", container), - zap.Error(err), - ) - return - } - - gossipConfig := s.subnet.Config().GossipConfig - sentTo := s.sender.Gossip( - outMsg, - s.ctx.SubnetID, - int(gossipConfig.AcceptedFrontierValidatorSize), - int(gossipConfig.AcceptedFrontierNonValidatorSize), - int(gossipConfig.AcceptedFrontierPeerSize), - s.subnet, - ) - if sentTo.Len() == 0 { - if s.ctx.Log.Enabled(logging.Verbo) { - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("container", container), - ) - } else { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - ) - } - } -} - -// Accept is called after every consensus decision -func (s *sender) Accept(ctx *snow.ConsensusContext, _ ids.ID, container []byte) error { - if ctx.State.Get().State != snow.NormalOp { - // don't gossip during bootstrapping - return nil - } - - // Create the outbound message. - outMsg, err := s.msgCreator.Put( - s.ctx.ChainID, - constants.GossipMsgRequestID, - container, - s.engineType, - ) - if err != nil { - s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("container", container), - zap.Error(err), - ) - return nil - } - - gossipConfig := s.subnet.Config().GossipConfig - sentTo := s.sender.Gossip( - outMsg, - s.ctx.SubnetID, - int(gossipConfig.OnAcceptValidatorSize), - int(gossipConfig.OnAcceptNonValidatorSize), - int(gossipConfig.OnAcceptPeerSize), - s.subnet, - ) - if sentTo.Len() == 0 { - if s.ctx.Log.Enabled(logging.Verbo) { - s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - zap.Binary("container", container), - ) - } else { - s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PutOp), - zap.Stringer("chainID", s.ctx.ChainID), - ) - } - } - return nil -} diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 89db9e457855..34f138f6db21 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/benchlist" @@ -35,20 +35,12 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" commontracker "github.com/ava-labs/avalanchego/snow/engine/common/tracker" ) const testThreadPoolSize = 2 -var defaultSubnetConfig = subnets.Config{ - GossipConfig: subnets.GossipConfig{ - AcceptedFrontierPeerSize: 2, - OnAcceptPeerSize: 2, - AppGossipValidatorSize: 2, - AppGossipNonValidatorSize: 2, - }, -} - func TestTimeout(t *testing.T) { require := require.New(t) @@ -66,7 +58,7 @@ func TestTimeout(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -78,7 +70,6 @@ func TestTimeout(t *testing.T) { mc, err := message.NewCreator( logging.NoLog{}, metrics, - "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -94,7 +85,6 @@ func TestTimeout(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -107,8 +97,9 @@ func TestTimeout(t *testing.T) { externalSender, &chainRouter, tm, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - subnets.New(ctx.NodeID, defaultSubnetConfig), + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -120,6 +111,16 @@ func TestTimeout(t *testing.T) { time.Second, ) require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx2, vdrs, @@ -130,6 +131,8 @@ func TestTimeout(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -159,7 +162,7 @@ func TestTimeout(t *testing.T) { }, }) ctx2.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrap is ongoing }) @@ -297,14 +300,14 @@ func TestTimeout(t *testing.T) { } // Send messages to disconnected peers - externalSender.SendF = func(message.OutboundMessage, set.Set[ids.NodeID], ids.ID, subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(message.OutboundMessage, common.SendConfig, ids.ID, subnets.Allower) set.Set[ids.NodeID] { return nil } sendAll() // Send messages to connected peers - externalSender.SendF = func(_ message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { - return nodeIDs + externalSender.SendF = func(_ message.OutboundMessage, config common.SendConfig, _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + return config.NodeIDs } sendAll() @@ -331,7 +334,7 @@ func TestReliableMessages(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -344,7 +347,6 @@ func TestReliableMessages(t *testing.T) { mc, err := message.NewCreator( logging.NoLog{}, metrics, - "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -360,7 +362,6 @@ func TestReliableMessages(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -373,8 +374,9 @@ func TestReliableMessages(t *testing.T) { externalSender, &chainRouter, tm, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - subnets.New(ctx.NodeID, defaultSubnetConfig), + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -386,6 +388,16 @@ func TestReliableMessages(t *testing.T) { time.Second, ) require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx2, vdrs, @@ -396,6 +408,8 @@ func TestReliableMessages(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -435,7 +449,7 @@ func TestReliableMessages(t *testing.T) { }, }) ctx2.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrap is ongoing }) @@ -477,7 +491,7 @@ func TestReliableMessagesToMyself(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -490,7 +504,6 @@ func TestReliableMessagesToMyself(t *testing.T) { mc, err := message.NewCreator( logging.NoLog{}, metrics, - "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second, ) @@ -506,7 +519,6 @@ func TestReliableMessagesToMyself(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -519,8 +531,9 @@ func TestReliableMessagesToMyself(t *testing.T) { externalSender, &chainRouter, tm, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - subnets.New(ctx.NodeID, defaultSubnetConfig), + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -532,6 +545,16 @@ func TestReliableMessagesToMyself(t *testing.T) { time.Second, ) require.NoError(err) + + p2pTracker, err := p2p.NewPeerTracker( + logging.NoLog{}, + "", + prometheus.NewRegistry(), + nil, + version.CurrentApp, + ) + require.NoError(err) + h, err := handler.New( ctx2, vdrs, @@ -542,6 +565,8 @@ func TestReliableMessagesToMyself(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), commontracker.NewPeers(), + p2pTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -580,7 +605,7 @@ func TestReliableMessagesToMyself(t *testing.T) { }, }) ctx2.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.Bootstrapping, // assumed bootstrap is ongoing }) @@ -614,7 +639,6 @@ func TestSender_Bootstrap_Requests(t *testing.T) { requestID = uint32(1337) heights = []uint64{1, 2, 3} containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN ) snowCtx := snowtest.Context(t, snowtest.PChainID) ctx := snowtest.ConsensusContext(snowCtx) @@ -627,7 +651,6 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setMsgCreatorExpect func(msgCreator *message.MockOutboundMsgBuilder) setExternalSenderExpect func(externalSender *MockExternalSender) sendF func(require *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) - engineType p2p.EngineType } tests := []test{ @@ -641,8 +664,8 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.GetStateSummaryFrontier{}, msg.Message()) - innerMsg := msg.Message().(*p2p.GetStateSummaryFrontier) + require.IsType(&p2ppb.GetStateSummaryFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.GetStateSummaryFrontier) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) @@ -658,8 +681,10 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - // Note [myNodeID] is not in this set - set.Of(successNodeID, failedNodeID), + common.SendConfig{ + // Note [myNodeID] is not in this set + NodeIDs: set.Of(successNodeID, failedNodeID), + }, ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) @@ -682,8 +707,8 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.GetAcceptedStateSummary{}, msg.Message()) - innerMsg := msg.Message().(*p2p.GetAcceptedStateSummary) + require.IsType(&p2ppb.GetAcceptedStateSummary{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.GetAcceptedStateSummary) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) @@ -701,8 +726,10 @@ func TestSender_Bootstrap_Requests(t *testing.T) { setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - // Note [myNodeID] is not in this set - set.Of(successNodeID, failedNodeID), + common.SendConfig{ + // Note [myNodeID] is not in this set + NodeIDs: set.Of(successNodeID, failedNodeID), + }, ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) @@ -718,16 +745,14 @@ func TestSender_Bootstrap_Requests(t *testing.T) { nodeID, ctx.ChainID, requestID, - engineType, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.GetAcceptedFrontier{}, msg.Message()) - innerMsg := msg.Message().(*p2p.GetAcceptedFrontier) + require.IsType(&p2ppb.GetAcceptedFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.GetAcceptedFrontier) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) - require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.AcceptedFrontierOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { @@ -735,14 +760,15 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ctx.ChainID, requestID, deadline, - engineType, ).Return(nil, nil) }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - // Note [myNodeID] is not in this set - set.Of(successNodeID, failedNodeID), + common.SendConfig{ + // Note [myNodeID] is not in this set + NodeIDs: set.Of(successNodeID, failedNodeID), + }, ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) @@ -750,7 +776,6 @@ func TestSender_Bootstrap_Requests(t *testing.T) { sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetAcceptedFrontier(context.Background(), nodeIDs, requestID) }, - engineType: engineType, }, { name: "GetAccepted", @@ -759,16 +784,14 @@ func TestSender_Bootstrap_Requests(t *testing.T) { nodeID, ctx.ChainID, requestID, - engineType, ) }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.GetAccepted{}, msg.Message()) - innerMsg := msg.Message().(*p2p.GetAccepted) + require.IsType(&p2ppb.GetAccepted{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.GetAccepted) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(uint64(deadline), innerMsg.Deadline) - require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.AcceptedOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { @@ -777,14 +800,15 @@ func TestSender_Bootstrap_Requests(t *testing.T) { requestID, deadline, containerIDs, - engineType, ).Return(nil, nil) }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( gomock.Any(), // Outbound message - // Note [myNodeID] is not in this set - set.Of(successNodeID, failedNodeID), + common.SendConfig{ + // Note [myNodeID] is not in this set + NodeIDs: set.Of(successNodeID, failedNodeID), + }, ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(set.Of(successNodeID)) @@ -792,7 +816,6 @@ func TestSender_Bootstrap_Requests(t *testing.T) { sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetAccepted(context.Background(), nodeIDs, requestID, containerIDs) }, - engineType: engineType, }, } @@ -821,8 +844,9 @@ func TestSender_Bootstrap_Requests(t *testing.T) { externalSender, router, timeoutManager, - engineType, - subnets.New(ctx.NodeID, defaultSubnetConfig), + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -840,7 +864,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message - tt.engineType, + p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, ) } @@ -878,7 +902,6 @@ func TestSender_Bootstrap_Responses(t *testing.T) { requestID = uint32(1337) summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} summary = []byte{1, 2, 3} - engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE ) snowCtx := snowtest.Context(t, snowtest.PChainID) ctx := snowtest.ConsensusContext(snowCtx) @@ -902,17 +925,19 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.StateSummaryFrontier{}, msg.Message()) - innerMsg := msg.Message().(*p2p.StateSummaryFrontier) + require.IsType(&p2ppb.StateSummaryFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.StateSummaryFrontier) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(summary, innerMsg.Summary) }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Of(destinationNodeID), // Node IDs - ctx.SubnetID, // Subnet ID + gomock.Any(), // Outbound message + common.SendConfig{ + NodeIDs: set.Of(destinationNodeID), + }, + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -930,8 +955,8 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.AcceptedStateSummary{}, msg.Message()) - innerMsg := msg.Message().(*p2p.AcceptedStateSummary) + require.IsType(&p2ppb.AcceptedStateSummary{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.AcceptedStateSummary) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) for i, summaryID := range summaryIDs { @@ -940,9 +965,11 @@ func TestSender_Bootstrap_Responses(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Of(destinationNodeID), // Node IDs - ctx.SubnetID, // Subnet ID + gomock.Any(), // Outbound message + common.SendConfig{ + NodeIDs: set.Of(destinationNodeID), + }, + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -960,17 +987,19 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.AcceptedFrontier{}, msg.Message()) - innerMsg := msg.Message().(*p2p.AcceptedFrontier) + require.IsType(&p2ppb.AcceptedFrontier{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.AcceptedFrontier) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) require.Equal(summaryIDs[0][:], innerMsg.ContainerId) }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Of(destinationNodeID), // Node IDs - ctx.SubnetID, // Subnet ID + gomock.Any(), // Outbound message + common.SendConfig{ + NodeIDs: set.Of(destinationNodeID), + }, + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -988,8 +1017,8 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil, nil) // Don't care about the message }, assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { - require.IsType(&p2p.Accepted{}, msg.Message()) - innerMsg := msg.Message().(*p2p.Accepted) + require.IsType(&p2ppb.Accepted{}, msg.Message()) + innerMsg := msg.Message().(*p2ppb.Accepted) require.Equal(ctx.ChainID[:], innerMsg.ChainId) require.Equal(requestID, innerMsg.RequestId) for i, summaryID := range summaryIDs { @@ -998,9 +1027,11 @@ func TestSender_Bootstrap_Responses(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Of(destinationNodeID), // Node IDs - ctx.SubnetID, // Subnet ID + gomock.Any(), // Outbound message + common.SendConfig{ + NodeIDs: set.Of(destinationNodeID), + }, + ctx.SubnetID, // Subnet ID gomock.Any(), ).Return(nil) }, @@ -1022,19 +1053,15 @@ func TestSender_Bootstrap_Responses(t *testing.T) { router = router.NewMockRouter(ctrl) ) - // Instantiate new registerers to avoid duplicate metrics - // registration - ctx.Registerer = prometheus.NewRegistry() - ctx.AvalancheRegisterer = prometheus.NewRegistry() - sender, err := New( ctx, msgCreator, externalSender, router, timeoutManager, - engineType, - subnets.New(ctx.NodeID, defaultSubnetConfig), + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -1075,7 +1102,7 @@ func TestSender_Single_Request(t *testing.T) { deadline = time.Second requestID = uint32(1337) containerID = ids.GenerateTestID() - engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN + engineType = p2ppb.EngineType_ENGINE_TYPE_SNOWMAN ) snowCtx := snowtest.Context(t, snowtest.PChainID) ctx := snowtest.ConsensusContext(snowCtx) @@ -1083,11 +1110,13 @@ func TestSender_Single_Request(t *testing.T) { type test struct { name string failedMsgF func(nodeID ids.NodeID) message.InboundMessage - assertMsgToMyself func(require *require.Assertions, msg message.InboundMessage) + shouldFailMessageToSelf bool + assertMsg func(require *require.Assertions, msg message.InboundMessage) expectedResponseOp message.Op setMsgCreatorExpect func(msgCreator *message.MockOutboundMsgBuilder) setExternalSenderExpect func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) sendF func(require *require.Assertions, sender common.Sender, nodeID ids.NodeID) + expectedEngineType p2ppb.EngineType } tests := []test{ @@ -1101,7 +1130,8 @@ func TestSender_Single_Request(t *testing.T) { engineType, ) }, - assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + shouldFailMessageToSelf: false, + assertMsg: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&message.GetAncestorsFailed{}, msg.Message()) innerMsg := msg.Message().(*message.GetAncestorsFailed) require.Equal(ctx.ChainID, innerMsg.ChainID) @@ -1120,8 +1150,10 @@ func TestSender_Single_Request(t *testing.T) { }, setExternalSenderExpect: func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Of(destinationNodeID), // Node IDs + gomock.Any(), // Outbound message + common.SendConfig{ + NodeIDs: set.Of(destinationNodeID), + }, ctx.SubnetID, gomock.Any(), ).Return(sentTo) @@ -1129,6 +1161,7 @@ func TestSender_Single_Request(t *testing.T) { sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { sender.SendGetAncestors(context.Background(), nodeID, requestID, containerID) }, + expectedEngineType: engineType, }, { name: "Get", @@ -1137,15 +1170,14 @@ func TestSender_Single_Request(t *testing.T) { nodeID, ctx.ChainID, requestID, - engineType, ) }, - assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + shouldFailMessageToSelf: true, + assertMsg: func(require *require.Assertions, msg message.InboundMessage) { require.IsType(&message.GetFailed{}, msg.Message()) innerMsg := msg.Message().(*message.GetFailed) require.Equal(ctx.ChainID, innerMsg.ChainID) require.Equal(requestID, innerMsg.RequestID) - require.Equal(engineType, innerMsg.EngineType) }, expectedResponseOp: message.PutOp, setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { @@ -1154,13 +1186,14 @@ func TestSender_Single_Request(t *testing.T) { requestID, deadline, containerID, - engineType, ).Return(nil, nil) }, setExternalSenderExpect: func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) { externalSender.EXPECT().Send( - gomock.Any(), // Outbound message - set.Of(destinationNodeID), // Node IDs + gomock.Any(), // Outbound message + common.SendConfig{ + NodeIDs: set.Of(destinationNodeID), + }, ctx.SubnetID, gomock.Any(), ).Return(sentTo) @@ -1194,7 +1227,8 @@ func TestSender_Single_Request(t *testing.T) { router, timeoutManager, engineType, - subnets.New(ctx.NodeID, defaultSubnetConfig), + subnets.New(ctx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -1213,20 +1247,24 @@ func TestSender_Single_Request(t *testing.T) { requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message - engineType, // Engine Type + tt.expectedEngineType, // Engine Type ) // Note that HandleInbound is called in a separate goroutine // so we need to use a channel to synchronize the test. calledHandleInbound := make(chan struct{}) - router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( - func(_ context.Context, msg message.InboundMessage) { - // Make sure we're sending ourselves - // the expected message. - tt.assertMsgToMyself(require, msg) - close(calledHandleInbound) - }, - ) + if tt.shouldFailMessageToSelf { + router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( + func(_ context.Context, msg message.InboundMessage) { + // Make sure we're sending ourselves + // the expected message. + tt.assertMsg(require, msg) + close(calledHandleInbound) + }, + ) + } else { + close(calledHandleInbound) + } tt.sendF(require, sender, ctx.NodeID) @@ -1249,7 +1287,7 @@ func TestSender_Single_Request(t *testing.T) { requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message - engineType, // Engine Type + tt.expectedEngineType, // Engine Type ) // Note that HandleInbound is called in a separate goroutine @@ -1259,7 +1297,7 @@ func TestSender_Single_Request(t *testing.T) { func(_ context.Context, msg message.InboundMessage) { // Make sure we're sending ourselves // the expected message. - tt.assertMsgToMyself(require, msg) + tt.assertMsg(require, msg) close(calledHandleInbound) }, ) @@ -1285,7 +1323,7 @@ func TestSender_Single_Request(t *testing.T) { requestID, // Request ID tt.expectedResponseOp, // Operation expectedFailedMsg, // Failure Message - engineType, // Engine Type + tt.expectedEngineType, // Engine Type ) // Note that HandleInbound is called in a separate goroutine @@ -1295,7 +1333,7 @@ func TestSender_Single_Request(t *testing.T) { func(_ context.Context, msg message.InboundMessage) { // Make sure we're sending ourselves // the expected message. - tt.assertMsgToMyself(require, msg) + tt.assertMsg(require, msg) close(calledHandleInbound) }, ) diff --git a/snow/networking/sender/test_external_sender.go b/snow/networking/sender/test_external_sender.go index ae06187216bf..3d5e688492b9 100644 --- a/snow/networking/sender/test_external_sender.go +++ b/snow/networking/sender/test_external_sender.go @@ -9,39 +9,39 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/set" ) var ( - errSend = errors.New("unexpectedly called Send") - errGossip = errors.New("unexpectedly called Gossip") + _ ExternalSender = (*ExternalSenderTest)(nil) + + errSend = errors.New("unexpectedly called Send") ) // ExternalSenderTest is a test sender type ExternalSenderTest struct { TB testing.TB - CantSend, CantGossip bool + CantSend bool - SendF func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] - GossipF func(msg message.OutboundMessage, subnetID ids.ID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend int, allower subnets.Allower) set.Set[ids.NodeID] + SendF func(msg message.OutboundMessage, config common.SendConfig, subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] } // Default set the default callable value to [cant] func (s *ExternalSenderTest) Default(cant bool) { s.CantSend = cant - s.CantGossip = cant } func (s *ExternalSenderTest) Send( msg message.OutboundMessage, - nodeIDs set.Set[ids.NodeID], + config common.SendConfig, subnetID ids.ID, allower subnets.Allower, ) set.Set[ids.NodeID] { if s.SendF != nil { - return s.SendF(msg, nodeIDs, subnetID, allower) + return s.SendF(msg, config, subnetID, allower) } if s.CantSend { if s.TB != nil { @@ -51,26 +51,3 @@ func (s *ExternalSenderTest) Send( } return nil } - -// Given a msg type, the corresponding mock function is called if it was initialized. -// If it wasn't initialized and this function shouldn't be called and testing was -// initialized, then testing will fail. -func (s *ExternalSenderTest) Gossip( - msg message.OutboundMessage, - subnetID ids.ID, - numValidatorsToSend int, - numNonValidatorsToSend int, - numPeersToSend int, - allower subnets.Allower, -) set.Set[ids.NodeID] { - if s.GossipF != nil { - return s.GossipF(msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) - } - if s.CantGossip { - if s.TB != nil { - s.TB.Helper() - s.TB.Fatal(errGossip) - } - } - return nil -} diff --git a/snow/networking/sender/traced_sender.go b/snow/networking/sender/traced_sender.go index a82264ab325b..0e25602c84c3 100644 --- a/snow/networking/sender/traced_sender.go +++ b/snow/networking/sender/traced_sender.go @@ -9,7 +9,6 @@ import ( "go.opentelemetry.io/otel/attribute" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/set" @@ -259,33 +258,23 @@ func (s *tracedSender) SendAppError(ctx context.Context, nodeID ids.NodeID, requ return s.sender.SendAppError(ctx, nodeID, requestID, errorCode, errorMessage) } -func (s *tracedSender) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { - _, span := s.tracer.Start(ctx, "tracedSender.SendAppGossipSpecific", oteltrace.WithAttributes( - attribute.Int("gossipLen", len(appGossipBytes)), - )) - defer span.End() - - return s.sender.SendAppGossipSpecific(ctx, nodeIDs, appGossipBytes) -} - -func (s *tracedSender) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { +func (s *tracedSender) SendAppGossip( + ctx context.Context, + config common.SendConfig, + appGossipBytes []byte, +) error { _, span := s.tracer.Start(ctx, "tracedSender.SendAppGossip", oteltrace.WithAttributes( + attribute.Int("numNodeIDs", config.NodeIDs.Len()), + attribute.Int("numValidators", config.Validators), + attribute.Int("numNonValidators", config.NonValidators), + attribute.Int("numPeers", config.Peers), attribute.Int("gossipLen", len(appGossipBytes)), )) defer span.End() - return s.sender.SendAppGossip(ctx, appGossipBytes) -} - -func (s *tracedSender) SendGossip(ctx context.Context, container []byte) { - _, span := s.tracer.Start(ctx, "tracedSender.SendGossip", oteltrace.WithAttributes( - attribute.Int("containerLen", len(container)), - )) - defer span.End() - - s.sender.SendGossip(ctx, container) -} - -func (s *tracedSender) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { - return s.sender.Accept(ctx, containerID, container) + return s.sender.SendAppGossip( + ctx, + config, + appGossipBytes, + ) } diff --git a/snow/networking/timeout/manager.go b/snow/networking/timeout/manager.go index 95a3be25e166..573dbe712bc5 100644 --- a/snow/networking/timeout/manager.go +++ b/snow/networking/timeout/manager.go @@ -71,27 +71,33 @@ type Manager interface { func NewManager( timeoutConfig *timer.AdaptiveTimeoutConfig, benchlistMgr benchlist.Manager, - metricsNamespace string, - metricsRegister prometheus.Registerer, + requestReg prometheus.Registerer, + responseReg prometheus.Registerer, ) (Manager, error) { tm, err := timer.NewAdaptiveTimeoutManager( timeoutConfig, - metricsNamespace, - metricsRegister, + requestReg, ) if err != nil { return nil, fmt.Errorf("couldn't create timeout manager: %w", err) } + + m, err := newTimeoutMetrics(responseReg) + if err != nil { + return nil, fmt.Errorf("couldn't create timeout metrics: %w", err) + } + return &manager{ - benchlistMgr: benchlistMgr, tm: tm, + benchlistMgr: benchlistMgr, + metrics: m, }, nil } type manager struct { tm timer.AdaptiveTimeoutManager benchlistMgr benchlist.Manager - metrics metrics + metrics *timeoutMetrics stopOnce sync.Once } @@ -149,7 +155,7 @@ func (m *manager) RegisterResponse( op message.Op, latency time.Duration, ) { - m.metrics.Observe(nodeID, chainID, op, latency) + m.metrics.Observe(chainID, op, latency) m.benchlistMgr.RegisterResponse(chainID, nodeID) m.tm.Remove(requestID) } diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index 49a05f78d8d8..d6109002f615 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -27,7 +27,7 @@ func TestManagerFire(t *testing.T) { TimeoutHalflife: 5 * time.Minute, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(t, err) diff --git a/snow/networking/timeout/metrics.go b/snow/networking/timeout/metrics.go index 0892e5d8794c..3f217d5f7ad7 100644 --- a/snow/networking/timeout/metrics.go +++ b/snow/networking/timeout/metrics.go @@ -4,138 +4,73 @@ package timeout import ( - "fmt" "sync" "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/utils" ) const ( - defaultRequestHelpMsg = "time (in ns) spent waiting for a response to this message" - validatorIDLabel = "validatorID" + chainLabel = "chain" + opLabel = "op" ) -type metrics struct { - lock sync.Mutex - chainToMetrics map[ids.ID]*chainMetrics -} +var opLabels = []string{chainLabel, opLabel} -func (m *metrics) RegisterChain(ctx *snow.ConsensusContext) error { - m.lock.Lock() - defer m.lock.Unlock() +type timeoutMetrics struct { + messages *prometheus.CounterVec // chain + op + messageLatencies *prometheus.GaugeVec // chain + op - if m.chainToMetrics == nil { - m.chainToMetrics = map[ids.ID]*chainMetrics{} - } - if _, exists := m.chainToMetrics[ctx.ChainID]; exists { - return fmt.Errorf("chain %s has already been registered", ctx.ChainID) - } - cm, err := newChainMetrics(ctx, false) - if err != nil { - return fmt.Errorf("couldn't create metrics for chain %s: %w", ctx.ChainID, err) - } - m.chainToMetrics[ctx.ChainID] = cm - return nil + lock sync.RWMutex + chainIDToAlias map[ids.ID]string } -// Record that a response of type [op] took [latency] -func (m *metrics) Observe(nodeID ids.NodeID, chainID ids.ID, op message.Op, latency time.Duration) { - m.lock.Lock() - defer m.lock.Unlock() - - cm, exists := m.chainToMetrics[chainID] - if !exists { - // TODO should this log an error? - return +func newTimeoutMetrics(reg prometheus.Registerer) (*timeoutMetrics, error) { + m := &timeoutMetrics{ + messages: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "messages", + Help: "number of responses", + }, + opLabels, + ), + messageLatencies: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "message_latencies", + Help: "message latencies (ns)", + }, + opLabels, + ), + chainIDToAlias: make(map[ids.ID]string), } - cm.observe(nodeID, op, latency) + return m, utils.Err( + reg.Register(m.messages), + reg.Register(m.messageLatencies), + ) } -// chainMetrics contains message response time metrics for a chain -type chainMetrics struct { - ctx *snow.ConsensusContext - - messageLatencies map[message.Op]metric.Averager - - summaryEnabled bool - messageSummaries map[message.Op]*prometheus.SummaryVec -} - -func newChainMetrics(ctx *snow.ConsensusContext, summaryEnabled bool) (*chainMetrics, error) { - cm := &chainMetrics{ - ctx: ctx, - - messageLatencies: make(map[message.Op]metric.Averager, len(message.ConsensusResponseOps)), - - summaryEnabled: summaryEnabled, - messageSummaries: make(map[message.Op]*prometheus.SummaryVec, len(message.ConsensusResponseOps)), - } - - errs := wrappers.Errs{} - for _, op := range message.ConsensusResponseOps { - cm.messageLatencies[op] = metric.NewAveragerWithErrs( - "lat", - op.String(), - defaultRequestHelpMsg, - ctx.Registerer, - &errs, - ) - - if !summaryEnabled { - continue - } - - summaryName := fmt.Sprintf("%s_peer", op) - summary := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Namespace: "lat", - Name: summaryName, - Help: defaultRequestHelpMsg, - }, - []string{validatorIDLabel}, - ) - cm.messageSummaries[op] = summary +func (m *timeoutMetrics) RegisterChain(ctx *snow.ConsensusContext) error { + m.lock.Lock() + defer m.lock.Unlock() - if err := ctx.Registerer.Register(summary); err != nil { - errs.Add(fmt.Errorf("failed to register %s statistics: %w", summaryName, err)) - } - } - return cm, errs.Err + m.chainIDToAlias[ctx.ChainID] = ctx.PrimaryAlias + return nil } -func (cm *chainMetrics) observe(nodeID ids.NodeID, op message.Op, latency time.Duration) { - lat := float64(latency) - if msg, exists := cm.messageLatencies[op]; exists { - msg.Observe(lat) - } - - if !cm.summaryEnabled { - return - } +// Record that a response of type [op] took [latency] +func (m *timeoutMetrics) Observe(chainID ids.ID, op message.Op, latency time.Duration) { + m.lock.RLock() + defer m.lock.RUnlock() labels := prometheus.Labels{ - validatorIDLabel: nodeID.String(), - } - - msg, exists := cm.messageSummaries[op] - if !exists { - return - } - - observer, err := msg.GetMetricWith(labels) - if err != nil { - cm.ctx.Log.Warn("failed to get observer with validatorID", - zap.Error(err), - ) - return + chainLabel: m.chainIDToAlias[chainID], + opLabel: op.String(), } - observer.Observe(lat) + m.messages.With(labels).Inc() + m.messageLatencies.With(labels).Add(float64(latency)) } diff --git a/snow/networking/tracker/resource_tracker.go b/snow/networking/tracker/resource_tracker.go index b4b14a7561cf..d8f5da99192f 100644 --- a/snow/networking/tracker/resource_tracker.go +++ b/snow/networking/tracker/resource_tracker.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" ) @@ -200,7 +200,7 @@ type resourceTracker struct { // utilized. This doesn't necessarily result in the meters being sorted // based on their usage. However, in practice the nodes that are not being // utilized will move towards the oldest elements where they can be deleted. - meters linkedhashmap.LinkedHashmap[ids.NodeID, meter.Meter] + meters *linked.Hashmap[ids.NodeID, meter.Meter] metrics *trackerMetrics } @@ -215,10 +215,10 @@ func NewResourceTracker( resources: resources, processingMeter: factory.New(halflife), halflife: halflife, - meters: linkedhashmap.New[ids.NodeID, meter.Meter](), + meters: linked.NewHashmap[ids.NodeID, meter.Meter](), } var err error - t.metrics, err = newCPUTrackerMetrics("resource_tracker", reg) + t.metrics, err = newCPUTrackerMetrics(reg) if err != nil { return nil, fmt.Errorf("initializing resourceTracker metrics errored with: %w", err) } @@ -293,32 +293,27 @@ type trackerMetrics struct { diskSpaceAvailable prometheus.Gauge } -func newCPUTrackerMetrics(namespace string, reg prometheus.Registerer) (*trackerMetrics, error) { +func newCPUTrackerMetrics(reg prometheus.Registerer) (*trackerMetrics, error) { m := &trackerMetrics{ processingTimeMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "processing_time", - Help: "Tracked processing time over all nodes. Value expected to be in [0, number of CPU cores], but can go higher due to IO bound processes and thread multiplexing", + Name: "processing_time", + Help: "Tracked processing time over all nodes. Value expected to be in [0, number of CPU cores], but can go higher due to IO bound processes and thread multiplexing", }), cpuMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "cpu_usage", - Help: "CPU usage tracked by the resource manager. Value should be in [0, number of CPU cores]", + Name: "cpu_usage", + Help: "CPU usage tracked by the resource manager. Value should be in [0, number of CPU cores]", }), diskReadsMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "disk_reads", - Help: "Disk reads (bytes/sec) tracked by the resource manager", + Name: "disk_reads", + Help: "Disk reads (bytes/sec) tracked by the resource manager", }), diskWritesMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "disk_writes", - Help: "Disk writes (bytes/sec) tracked by the resource manager", + Name: "disk_writes", + Help: "Disk writes (bytes/sec) tracked by the resource manager", }), diskSpaceAvailable: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "disk_available_space", - Help: "Available space remaining (bytes) on the database volume", + Name: "disk_available_space", + Help: "Available space remaining (bytes) on the database volume", }), } err := utils.Err( diff --git a/snow/snowtest/snowtest.go b/snow/snowtest/snowtest.go index 9879b726955c..3cacc8e873bf 100644 --- a/snow/snowtest/snowtest.go +++ b/snow/snowtest/snowtest.go @@ -39,12 +39,12 @@ func (noOpAcceptor) Accept(*snow.ConsensusContext, ids.ID, []byte) error { func ConsensusContext(ctx *snow.Context) *snow.ConsensusContext { return &snow.ConsensusContext{ - Context: ctx, - Registerer: prometheus.NewRegistry(), - AvalancheRegisterer: prometheus.NewRegistry(), - BlockAcceptor: noOpAcceptor{}, - TxAcceptor: noOpAcceptor{}, - VertexAcceptor: noOpAcceptor{}, + Context: ctx, + PrimaryAlias: ctx.ChainID.String(), + Registerer: prometheus.NewRegistry(), + BlockAcceptor: noOpAcceptor{}, + TxAcceptor: noOpAcceptor{}, + VertexAcceptor: noOpAcceptor{}, } } @@ -90,7 +90,7 @@ func Context(tb testing.TB, chainID ids.ID) *snow.Context { Log: logging.NoLog{}, BCLookup: aliaser, - Metrics: metrics.NewOptionalGatherer(), + Metrics: metrics.NewPrefixGatherer(), ValidatorState: validatorState, ChainDataDir: "", diff --git a/snow/validators/gvalidators/validator_state_client.go b/snow/validators/gvalidators/validator_state_client.go index 49fa1e641417..ae09b749d3ca 100644 --- a/snow/validators/gvalidators/validator_state_client.go +++ b/snow/validators/gvalidators/validator_state_client.go @@ -76,11 +76,12 @@ func (c *Client) GetValidatorSet( } var publicKey *bls.PublicKey if len(validator.PublicKey) > 0 { - // This is a performance optimization to avoid the cost of compression - // and key re-verification with PublicKeyFromBytes. We can safely - // assume that the BLS Public Keys are verified before being added - // to the P-Chain and served by the gRPC server. - publicKey = bls.DeserializePublicKey(validator.PublicKey) + // This is a performance optimization to avoid the cost of + // compression and key re-verification with + // PublicKeyFromCompressedBytes. We can safely assume that the BLS + // Public Keys are verified before being added to the P-Chain and + // served by the gRPC server. + publicKey = bls.PublicKeyFromValidUncompressedBytes(validator.PublicKey) if publicKey == nil { return nil, errFailedPublicKeyDeserialize } diff --git a/snow/validators/gvalidators/validator_state_server.go b/snow/validators/gvalidators/validator_state_server.go index 5476dca4db99..0550eba0b9b3 100644 --- a/snow/validators/gvalidators/validator_state_server.go +++ b/snow/validators/gvalidators/validator_state_server.go @@ -71,8 +71,8 @@ func (s *Server) GetValidatorSet(ctx context.Context, req *pb.GetValidatorSetReq } if vdr.PublicKey != nil { // This is a performance optimization to avoid the cost of compression - // from PublicKeyToBytes. - vdrPB.PublicKey = bls.SerializePublicKey(vdr.PublicKey) + // from PublicKeyToCompressedBytes. + vdrPB.PublicKey = bls.PublicKeyToUncompressedBytes(vdr.PublicKey) } resp.Validators[i] = vdrPB i++ diff --git a/snow/validators/gvalidators/validator_state_test.go b/snow/validators/gvalidators/validator_state_test.go index 8895507a4f6c..8944e5c9588a 100644 --- a/snow/validators/gvalidators/validator_state_test.go +++ b/snow/validators/gvalidators/validator_state_test.go @@ -184,8 +184,8 @@ func TestPublicKeyDeserialize(t *testing.T) { require.NoError(err) pk := bls.PublicFromSecretKey(sk) - pkBytes := bls.SerializePublicKey(pk) - pkDe := bls.DeserializePublicKey(pkBytes) + pkBytes := bls.PublicKeyToUncompressedBytes(pk) + pkDe := bls.PublicKeyFromValidUncompressedBytes(pkBytes) require.NotNil(pkDe) require.Equal(pk, pkDe) } diff --git a/snow/validators/logger.go b/snow/validators/logger.go index 40613b76b68d..50224d03f099 100644 --- a/snow/validators/logger.go +++ b/snow/validators/logger.go @@ -45,7 +45,7 @@ func (l *logger) OnValidatorAdded( if l.nodeIDs.Contains(nodeID) { var pkBytes []byte if pk != nil { - pkBytes = bls.PublicKeyToBytes(pk) + pkBytes = bls.PublicKeyToCompressedBytes(pk) } l.log.Info("node added to validator set", zap.Stringer("subnetID", l.subnetID), diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 5844c1e7f185..fcf37934112d 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -24,6 +24,12 @@ var ( ErrMissingValidators = errors.New("missing validators") ) +type ManagerCallbackListener interface { + OnValidatorAdded(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) + OnValidatorRemoved(subnetID ids.ID, nodeID ids.NodeID, weight uint64) + OnValidatorWeightChanged(subnetID ids.ID, nodeID ids.NodeID, oldWeight, newWeight uint64) +} + type SetCallbackListener interface { OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) @@ -88,9 +94,13 @@ type Manager interface { // Map of the validators in this subnet GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput - // When a validator's weight changes, or a validator is added/removed, - // this listener is called. - RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) + // When a validator is added, removed, or its weight changes, the listener + // will be notified of the event. + RegisterCallbackListener(listener ManagerCallbackListener) + + // When a validator is added, removed, or its weight changes on [subnetID], + // the listener will be notified of the event. + RegisterSetCallbackListener(subnetID ids.ID, listener SetCallbackListener) } // NewManager returns a new, empty manager @@ -105,7 +115,8 @@ type manager struct { // Key: Subnet ID // Value: The validators that validate the subnet - subnetToVdrs map[ids.ID]*vdrSet + subnetToVdrs map[ids.ID]*vdrSet + callbackListeners []ManagerCallbackListener } func (m *manager) AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { @@ -118,7 +129,7 @@ func (m *manager) AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKe set, exists := m.subnetToVdrs[subnetID] if !exists { - set = newSet() + set = newSet(subnetID, m.callbackListeners) m.subnetToVdrs[subnetID] = set } @@ -264,13 +275,23 @@ func (m *manager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { return set.Map() } -func (m *manager) RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) { +func (m *manager) RegisterCallbackListener(listener ManagerCallbackListener) { + m.lock.Lock() + defer m.lock.Unlock() + + m.callbackListeners = append(m.callbackListeners, listener) + for _, set := range m.subnetToVdrs { + set.RegisterManagerCallbackListener(listener) + } +} + +func (m *manager) RegisterSetCallbackListener(subnetID ids.ID, listener SetCallbackListener) { m.lock.Lock() defer m.lock.Unlock() set, exists := m.subnetToVdrs[subnetID] if !exists { - set = newSet() + set = newSet(subnetID, m.callbackListeners) m.subnetToVdrs[subnetID] = set } diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 781d2e784e1d..365d7ffdf7d7 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -11,12 +11,44 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" safemath "github.com/ava-labs/avalanchego/utils/math" ) +var _ ManagerCallbackListener = (*managerCallbackListener)(nil) + +type managerCallbackListener struct { + t *testing.T + onAdd func(ids.ID, ids.NodeID, *bls.PublicKey, ids.ID, uint64) + onWeight func(ids.ID, ids.NodeID, uint64, uint64) + onRemoved func(ids.ID, ids.NodeID, uint64) +} + +func (c *managerCallbackListener) OnValidatorAdded(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + if c.onAdd != nil { + c.onAdd(subnetID, nodeID, pk, txID, weight) + } else { + c.t.Fail() + } +} + +func (c *managerCallbackListener) OnValidatorRemoved(subnetID ids.ID, nodeID ids.NodeID, weight uint64) { + if c.onRemoved != nil { + c.onRemoved(subnetID, nodeID, weight) + } else { + c.t.Fail() + } +} + +func (c *managerCallbackListener) OnValidatorWeightChanged(subnetID ids.ID, nodeID ids.NodeID, oldWeight, newWeight uint64) { + if c.onWeight != nil { + c.onWeight(subnetID, nodeID, oldWeight, newWeight) + } else { + c.t.Fail() + } +} + func TestAddZeroWeight(t *testing.T) { require := require.New(t) @@ -363,7 +395,7 @@ func TestSample(t *testing.T) { require.Equal([]ids.NodeID{nodeID0}, sampled) _, err = m.Sample(subnetID, 2) - require.ErrorIs(err, sampler.ErrOutOfRange) + require.ErrorIs(err, errInsufficientWeight) nodeID1 := ids.GenerateTestNodeID() require.NoError(m.AddStaker(subnetID, nodeID1, nil, ids.Empty, math.MaxInt64-1)) @@ -411,142 +443,292 @@ func TestString(t *testing.T) { func TestAddCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.BuildTestNodeID([]byte{1}) - sk0, err := bls.NewSecretKey() + expectedSK, err := bls.NewSecretKey() require.NoError(err) - pk0 := bls.PublicFromSecretKey(sk0) - txID0 := ids.GenerateTestID() - weight0 := uint64(1) - m := NewManager() - subnetID := ids.GenerateTestID() - callCount := 0 - m.RegisterCallbackListener(subnetID, &callbackListener{ + var ( + expectedNodeID = ids.GenerateTestNodeID() + expectedPK = bls.PublicFromSecretKey(expectedSK) + expectedTxID = ids.GenerateTestID() + expectedWeight uint64 = 1 + expectedSubnetID0 = ids.GenerateTestID() + expectedSubnetID1 = ids.GenerateTestID() + + m = NewManager() + managerCallCount = 0 + setCallCount = 0 + ) + m.RegisterCallbackListener(&managerCallbackListener{ + t: t, + onAdd: func(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedWeight, weight) + managerCallCount++ + }, + }) + m.RegisterSetCallbackListener(expectedSubnetID0, &setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { - require.Equal(nodeID0, nodeID) - require.Equal(pk0, pk) - require.Equal(txID0, txID) - require.Equal(weight0, weight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedWeight, weight) + setCallCount++ }, }) - require.NoError(m.AddStaker(subnetID, nodeID0, pk0, txID0, weight0)) - // setup another subnetID - subnetID2 := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) - // should not be called for subnetID2 - require.Equal(1, callCount) + require.NoError(m.AddStaker(expectedSubnetID0, expectedNodeID, expectedPK, expectedTxID, expectedWeight)) + require.Equal(1, managerCallCount) // should be called for expectedSubnetID0 + require.Equal(1, setCallCount) // should be called for expectedSubnetID0 + + require.NoError(m.AddStaker(expectedSubnetID1, expectedNodeID, expectedPK, expectedTxID, expectedWeight)) + require.Equal(2, managerCallCount) // should be called for expectedSubnetID1 + require.Equal(1, setCallCount) // should not be called for expectedSubnetID1 } func TestAddWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.BuildTestNodeID([]byte{1}) - txID0 := ids.GenerateTestID() - weight0 := uint64(1) - weight1 := uint64(93) - - m := NewManager() - subnetID := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID, nodeID0, nil, txID0, weight0)) + expectedSK, err := bls.NewSecretKey() + require.NoError(err) - callCount := 0 - m.RegisterCallbackListener(subnetID, &callbackListener{ + var ( + expectedNodeID = ids.GenerateTestNodeID() + expectedPK = bls.PublicFromSecretKey(expectedSK) + expectedTxID = ids.GenerateTestID() + expectedOldWeight uint64 = 1 + expectedAddedWeight uint64 = 10 + expectedNewWeight = expectedOldWeight + expectedAddedWeight + expectedSubnetID0 = ids.GenerateTestID() + expectedSubnetID1 = ids.GenerateTestID() + + m = NewManager() + managerAddCallCount = 0 + managerChangeCallCount = 0 + setAddCallCount = 0 + setChangeCallCount = 0 + ) + + require.NoError(m.AddStaker(expectedSubnetID0, expectedNodeID, expectedPK, expectedTxID, expectedOldWeight)) + + m.RegisterCallbackListener(&managerCallbackListener{ + t: t, + onAdd: func(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedOldWeight, weight) + managerAddCallCount++ + }, + onWeight: func(subnetID ids.ID, nodeID ids.NodeID, oldWeight, newWeight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedOldWeight, oldWeight) + require.Equal(expectedNewWeight, newWeight) + managerChangeCallCount++ + }, + }) + m.RegisterSetCallbackListener(expectedSubnetID0, &setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { - require.Equal(nodeID0, nodeID) - require.Nil(pk) - require.Equal(txID0, txID) - require.Equal(weight0, weight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedOldWeight, weight) + setAddCallCount++ }, onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { - require.Equal(nodeID0, nodeID) - require.Equal(weight0, oldWeight) - require.Equal(weight0+weight1, newWeight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedOldWeight, oldWeight) + require.Equal(expectedNewWeight, newWeight) + setChangeCallCount++ }, }) - require.NoError(m.AddWeight(subnetID, nodeID0, weight1)) - // setup another subnetID - subnetID2 := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) - require.NoError(m.AddWeight(subnetID2, nodeID0, weight1)) - // should not be called for subnetID2 - require.Equal(2, callCount) + require.Equal(1, managerAddCallCount) + require.Zero(managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Zero(setChangeCallCount) + + require.NoError(m.AddWeight(expectedSubnetID0, expectedNodeID, expectedAddedWeight)) + require.Equal(1, managerAddCallCount) + require.Equal(1, managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setChangeCallCount) + + require.NoError(m.AddStaker(expectedSubnetID1, expectedNodeID, expectedPK, expectedTxID, expectedOldWeight)) + require.Equal(2, managerAddCallCount) + require.Equal(1, managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setChangeCallCount) + + require.NoError(m.AddWeight(expectedSubnetID1, expectedNodeID, expectedAddedWeight)) + require.Equal(2, managerAddCallCount) + require.Equal(2, managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setChangeCallCount) } func TestRemoveWeightCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.BuildTestNodeID([]byte{1}) - txID0 := ids.GenerateTestID() - weight0 := uint64(93) - weight1 := uint64(92) - - m := NewManager() - subnetID := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID, nodeID0, nil, txID0, weight0)) + expectedSK, err := bls.NewSecretKey() + require.NoError(err) - callCount := 0 - m.RegisterCallbackListener(subnetID, &callbackListener{ + var ( + expectedNodeID = ids.GenerateTestNodeID() + expectedPK = bls.PublicFromSecretKey(expectedSK) + expectedTxID = ids.GenerateTestID() + expectedNewWeight uint64 = 1 + expectedRemovedWeight uint64 = 10 + expectedOldWeight = expectedNewWeight + expectedRemovedWeight + expectedSubnetID0 = ids.GenerateTestID() + expectedSubnetID1 = ids.GenerateTestID() + + m = NewManager() + managerAddCallCount = 0 + managerChangeCallCount = 0 + setAddCallCount = 0 + setChangeCallCount = 0 + ) + + require.NoError(m.AddStaker(expectedSubnetID0, expectedNodeID, expectedPK, expectedTxID, expectedOldWeight)) + + m.RegisterCallbackListener(&managerCallbackListener{ + t: t, + onAdd: func(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedOldWeight, weight) + managerAddCallCount++ + }, + onWeight: func(subnetID ids.ID, nodeID ids.NodeID, oldWeight, newWeight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedOldWeight, oldWeight) + require.Equal(expectedNewWeight, newWeight) + managerChangeCallCount++ + }, + }) + m.RegisterSetCallbackListener(expectedSubnetID0, &setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { - require.Equal(nodeID0, nodeID) - require.Nil(pk) - require.Equal(txID0, txID) - require.Equal(weight0, weight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedOldWeight, weight) + setAddCallCount++ }, onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { - require.Equal(nodeID0, nodeID) - require.Equal(weight0, oldWeight) - require.Equal(weight0-weight1, newWeight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedOldWeight, oldWeight) + require.Equal(expectedNewWeight, newWeight) + setChangeCallCount++ }, }) - require.NoError(m.RemoveWeight(subnetID, nodeID0, weight1)) - // setup another subnetID - subnetID2 := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) - require.NoError(m.RemoveWeight(subnetID2, nodeID0, weight1)) - // should not be called for subnetID2 - require.Equal(2, callCount) + require.Equal(1, managerAddCallCount) + require.Zero(managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Zero(setChangeCallCount) + + require.NoError(m.RemoveWeight(expectedSubnetID0, expectedNodeID, expectedRemovedWeight)) + require.Equal(1, managerAddCallCount) + require.Equal(1, managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setChangeCallCount) + + require.NoError(m.AddStaker(expectedSubnetID1, expectedNodeID, expectedPK, expectedTxID, expectedOldWeight)) + require.Equal(2, managerAddCallCount) + require.Equal(1, managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setChangeCallCount) + + require.NoError(m.RemoveWeight(expectedSubnetID1, expectedNodeID, expectedRemovedWeight)) + require.Equal(2, managerAddCallCount) + require.Equal(2, managerChangeCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setChangeCallCount) } -func TestValidatorRemovedCallback(t *testing.T) { +func TestRemoveCallback(t *testing.T) { require := require.New(t) - nodeID0 := ids.BuildTestNodeID([]byte{1}) - txID0 := ids.GenerateTestID() - weight0 := uint64(93) + expectedSK, err := bls.NewSecretKey() + require.NoError(err) - m := NewManager() - subnetID := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID, nodeID0, nil, txID0, weight0)) + var ( + expectedNodeID = ids.GenerateTestNodeID() + expectedPK = bls.PublicFromSecretKey(expectedSK) + expectedTxID = ids.GenerateTestID() + expectedWeight uint64 = 1 + expectedSubnetID0 = ids.GenerateTestID() + expectedSubnetID1 = ids.GenerateTestID() + + m = NewManager() + managerAddCallCount = 0 + managerRemoveCallCount = 0 + setAddCallCount = 0 + setRemoveCallCount = 0 + ) - callCount := 0 - m.RegisterCallbackListener(subnetID, &callbackListener{ + require.NoError(m.AddStaker(expectedSubnetID0, expectedNodeID, expectedPK, expectedTxID, expectedWeight)) + + m.RegisterCallbackListener(&managerCallbackListener{ + t: t, + onAdd: func(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedWeight, weight) + managerAddCallCount++ + }, + onRemoved: func(subnetID ids.ID, nodeID ids.NodeID, weight uint64) { + require.Contains([]ids.ID{expectedSubnetID0, expectedSubnetID1}, subnetID) + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedWeight, weight) + managerRemoveCallCount++ + }, + }) + m.RegisterSetCallbackListener(expectedSubnetID0, &setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { - require.Equal(nodeID0, nodeID) - require.Nil(pk) - require.Equal(txID0, txID) - require.Equal(weight0, weight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedPK, pk) + require.Equal(expectedTxID, txID) + require.Equal(expectedWeight, weight) + setAddCallCount++ }, onRemoved: func(nodeID ids.NodeID, weight uint64) { - require.Equal(nodeID0, nodeID) - require.Equal(weight0, weight) - callCount++ + require.Equal(expectedNodeID, nodeID) + require.Equal(expectedWeight, weight) + setRemoveCallCount++ }, }) - require.NoError(m.RemoveWeight(subnetID, nodeID0, weight0)) - // setup another subnetID - subnetID2 := ids.GenerateTestID() - require.NoError(m.AddStaker(subnetID2, nodeID0, nil, txID0, weight0)) - require.NoError(m.AddWeight(subnetID2, nodeID0, weight0)) - // should not be called for subnetID2 - require.Equal(2, callCount) + require.Equal(1, managerAddCallCount) + require.Zero(managerRemoveCallCount) + require.Equal(1, setAddCallCount) + require.Zero(setRemoveCallCount) + + require.NoError(m.RemoveWeight(expectedSubnetID0, expectedNodeID, expectedWeight)) + require.Equal(1, managerAddCallCount) + require.Equal(1, managerRemoveCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setRemoveCallCount) + + require.NoError(m.AddStaker(expectedSubnetID1, expectedNodeID, expectedPK, expectedTxID, expectedWeight)) + require.Equal(2, managerAddCallCount) + require.Equal(1, managerRemoveCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setRemoveCallCount) + + require.NoError(m.RemoveWeight(expectedSubnetID1, expectedNodeID, expectedWeight)) + require.Equal(2, managerAddCallCount) + require.Equal(2, managerRemoveCallCount) + require.Equal(1, setAddCallCount) + require.Equal(1, setRemoveCallCount) } diff --git a/snow/validators/mock_manager.go b/snow/validators/mock_manager.go deleted file mode 100644 index b622ba11223a..000000000000 --- a/snow/validators/mock_manager.go +++ /dev/null @@ -1,226 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: snow/validators/manager.go -// -// Generated by this command: -// -// mockgen -source=snow/validators/manager.go -destination=snow/validators/mock_manager.go -package=validators -exclude_interfaces=SetCallbackListener -// - -// Package validators is a generated GoMock package. -package validators - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - bls "github.com/ava-labs/avalanchego/utils/crypto/bls" - set "github.com/ava-labs/avalanchego/utils/set" - gomock "go.uber.org/mock/gomock" -) - -// MockManager is a mock of Manager interface. -type MockManager struct { - ctrl *gomock.Controller - recorder *MockManagerMockRecorder -} - -// MockManagerMockRecorder is the mock recorder for MockManager. -type MockManagerMockRecorder struct { - mock *MockManager -} - -// NewMockManager creates a new mock instance. -func NewMockManager(ctrl *gomock.Controller) *MockManager { - mock := &MockManager{ctrl: ctrl} - mock.recorder = &MockManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManager) EXPECT() *MockManagerMockRecorder { - return m.recorder -} - -// AddStaker mocks base method. -func (m *MockManager) AddStaker(subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddStaker", subnetID, nodeID, pk, txID, weight) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddStaker indicates an expected call of AddStaker. -func (mr *MockManagerMockRecorder) AddStaker(subnetID, nodeID, pk, txID, weight any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStaker", reflect.TypeOf((*MockManager)(nil).AddStaker), subnetID, nodeID, pk, txID, weight) -} - -// AddWeight mocks base method. -func (m *MockManager) AddWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddWeight", subnetID, nodeID, weight) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddWeight indicates an expected call of AddWeight. -func (mr *MockManagerMockRecorder) AddWeight(subnetID, nodeID, weight any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWeight", reflect.TypeOf((*MockManager)(nil).AddWeight), subnetID, nodeID, weight) -} - -// Count mocks base method. -func (m *MockManager) Count(subnetID ids.ID) int { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Count", subnetID) - ret0, _ := ret[0].(int) - return ret0 -} - -// Count indicates an expected call of Count. -func (mr *MockManagerMockRecorder) Count(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockManager)(nil).Count), subnetID) -} - -// GetMap mocks base method. -func (m *MockManager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMap", subnetID) - ret0, _ := ret[0].(map[ids.NodeID]*GetValidatorOutput) - return ret0 -} - -// GetMap indicates an expected call of GetMap. -func (mr *MockManagerMockRecorder) GetMap(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMap", reflect.TypeOf((*MockManager)(nil).GetMap), subnetID) -} - -// GetValidator mocks base method. -func (m *MockManager) GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Validator, bool) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidator", subnetID, nodeID) - ret0, _ := ret[0].(*Validator) - ret1, _ := ret[1].(bool) - return ret0, ret1 -} - -// GetValidator indicates an expected call of GetValidator. -func (mr *MockManagerMockRecorder) GetValidator(subnetID, nodeID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidator", reflect.TypeOf((*MockManager)(nil).GetValidator), subnetID, nodeID) -} - -// GetValidatorIDs mocks base method. -func (m *MockManager) GetValidatorIDs(subnetID ids.ID) []ids.NodeID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetValidatorIDs", subnetID) - ret0, _ := ret[0].([]ids.NodeID) - return ret0 -} - -// GetValidatorIDs indicates an expected call of GetValidatorIDs. -func (mr *MockManagerMockRecorder) GetValidatorIDs(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorIDs", reflect.TypeOf((*MockManager)(nil).GetValidatorIDs), subnetID) -} - -// GetWeight mocks base method. -func (m *MockManager) GetWeight(subnetID ids.ID, nodeID ids.NodeID) uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetWeight", subnetID, nodeID) - ret0, _ := ret[0].(uint64) - return ret0 -} - -// GetWeight indicates an expected call of GetWeight. -func (mr *MockManagerMockRecorder) GetWeight(subnetID, nodeID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWeight", reflect.TypeOf((*MockManager)(nil).GetWeight), subnetID, nodeID) -} - -// RegisterCallbackListener mocks base method. -func (m *MockManager) RegisterCallbackListener(subnetID ids.ID, listener SetCallbackListener) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RegisterCallbackListener", subnetID, listener) -} - -// RegisterCallbackListener indicates an expected call of RegisterCallbackListener. -func (mr *MockManagerMockRecorder) RegisterCallbackListener(subnetID, listener any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCallbackListener", reflect.TypeOf((*MockManager)(nil).RegisterCallbackListener), subnetID, listener) -} - -// RemoveWeight mocks base method. -func (m *MockManager) RemoveWeight(subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveWeight", subnetID, nodeID, weight) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemoveWeight indicates an expected call of RemoveWeight. -func (mr *MockManagerMockRecorder) RemoveWeight(subnetID, nodeID, weight any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWeight", reflect.TypeOf((*MockManager)(nil).RemoveWeight), subnetID, nodeID, weight) -} - -// Sample mocks base method. -func (m *MockManager) Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sample", subnetID, size) - ret0, _ := ret[0].([]ids.NodeID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Sample indicates an expected call of Sample. -func (mr *MockManagerMockRecorder) Sample(subnetID, size any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sample", reflect.TypeOf((*MockManager)(nil).Sample), subnetID, size) -} - -// String mocks base method. -func (m *MockManager) String() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "String") - ret0, _ := ret[0].(string) - return ret0 -} - -// String indicates an expected call of String. -func (mr *MockManagerMockRecorder) String() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockManager)(nil).String)) -} - -// SubsetWeight mocks base method. -func (m *MockManager) SubsetWeight(subnetID ids.ID, validatorIDs set.Set[ids.NodeID]) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubsetWeight", subnetID, validatorIDs) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SubsetWeight indicates an expected call of SubsetWeight. -func (mr *MockManagerMockRecorder) SubsetWeight(subnetID, validatorIDs any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubsetWeight", reflect.TypeOf((*MockManager)(nil).SubsetWeight), subnetID, validatorIDs) -} - -// TotalWeight mocks base method. -func (m *MockManager) TotalWeight(subnetID ids.ID) (uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TotalWeight", subnetID) - ret0, _ := ret[0].(uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TotalWeight indicates an expected call of TotalWeight. -func (mr *MockManagerMockRecorder) TotalWeight(subnetID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TotalWeight", reflect.TypeOf((*MockManager)(nil).TotalWeight), subnetID) -} diff --git a/snow/validators/set.go b/snow/validators/set.go index 5e7c81a2310e..b0c7e5de9ba6 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "math/big" + "slices" "strings" "sync" @@ -22,18 +23,23 @@ var ( errDuplicateValidator = errors.New("duplicate validator") errMissingValidator = errors.New("missing validator") errTotalWeightNotUint64 = errors.New("total weight is not a uint64") + errInsufficientWeight = errors.New("insufficient weight") ) // newSet returns a new, empty set of validators. -func newSet() *vdrSet { +func newSet(subnetID ids.ID, callbackListeners []ManagerCallbackListener) *vdrSet { return &vdrSet{ - vdrs: make(map[ids.NodeID]*Validator), - sampler: sampler.NewWeightedWithoutReplacement(), - totalWeight: new(big.Int), + subnetID: subnetID, + vdrs: make(map[ids.NodeID]*Validator), + totalWeight: new(big.Int), + sampler: sampler.NewWeightedWithoutReplacement(), + managerCallbackListeners: slices.Clone(callbackListeners), } } type vdrSet struct { + subnetID ids.ID + lock sync.RWMutex vdrs map[ids.NodeID]*Validator vdrSlice []*Validator @@ -43,7 +49,8 @@ type vdrSet struct { samplerInitialized bool sampler sampler.WeightedWithoutReplacement - callbackListeners []SetCallbackListener + managerCallbackListeners []ManagerCallbackListener + setCallbackListeners []SetCallbackListener } func (s *vdrSet) Add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { @@ -218,7 +225,7 @@ func (s *vdrSet) HasCallbackRegistered() bool { s.lock.RLock() defer s.lock.RUnlock() - return len(s.callbackListeners) > 0 + return len(s.setCallbackListeners) > 0 } func (s *vdrSet) Map() map[ids.NodeID]*GetValidatorOutput { @@ -251,9 +258,9 @@ func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { s.samplerInitialized = true } - indices, err := s.sampler.Sample(size) - if err != nil { - return nil, err + indices, ok := s.sampler.Sample(size) + if !ok { + return nil, errInsufficientWeight } list := make([]ids.NodeID, size) @@ -305,11 +312,21 @@ func (s *vdrSet) prefixedString(prefix string) string { return sb.String() } +func (s *vdrSet) RegisterManagerCallbackListener(callbackListener ManagerCallbackListener) { + s.lock.Lock() + defer s.lock.Unlock() + + s.managerCallbackListeners = append(s.managerCallbackListeners, callbackListener) + for _, vdr := range s.vdrSlice { + callbackListener.OnValidatorAdded(s.subnetID, vdr.NodeID, vdr.PublicKey, vdr.TxID, vdr.Weight) + } +} + func (s *vdrSet) RegisterCallbackListener(callbackListener SetCallbackListener) { s.lock.Lock() defer s.lock.Unlock() - s.callbackListeners = append(s.callbackListeners, callbackListener) + s.setCallbackListeners = append(s.setCallbackListeners, callbackListener) for _, vdr := range s.vdrSlice { callbackListener.OnValidatorAdded(vdr.NodeID, vdr.PublicKey, vdr.TxID, vdr.Weight) } @@ -317,21 +334,30 @@ func (s *vdrSet) RegisterCallbackListener(callbackListener SetCallbackListener) // Assumes [s.lock] is held func (s *vdrSet) callWeightChangeCallbacks(node ids.NodeID, oldWeight, newWeight uint64) { - for _, callbackListener := range s.callbackListeners { + for _, callbackListener := range s.managerCallbackListeners { + callbackListener.OnValidatorWeightChanged(s.subnetID, node, oldWeight, newWeight) + } + for _, callbackListener := range s.setCallbackListeners { callbackListener.OnValidatorWeightChanged(node, oldWeight, newWeight) } } // Assumes [s.lock] is held func (s *vdrSet) callValidatorAddedCallbacks(node ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { - for _, callbackListener := range s.callbackListeners { + for _, callbackListener := range s.managerCallbackListeners { + callbackListener.OnValidatorAdded(s.subnetID, node, pk, txID, weight) + } + for _, callbackListener := range s.setCallbackListeners { callbackListener.OnValidatorAdded(node, pk, txID, weight) } } // Assumes [s.lock] is held func (s *vdrSet) callValidatorRemovedCallbacks(node ids.NodeID, weight uint64) { - for _, callbackListener := range s.callbackListeners { + for _, callbackListener := range s.managerCallbackListeners { + callbackListener.OnValidatorRemoved(s.subnetID, node, weight) + } + for _, callbackListener := range s.setCallbackListeners { callbackListener.OnValidatorRemoved(node, weight) } } diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 4554f930fa37..086e5c0b654a 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -11,16 +11,48 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" safemath "github.com/ava-labs/avalanchego/utils/math" ) +var _ SetCallbackListener = (*setCallbackListener)(nil) + +type setCallbackListener struct { + t *testing.T + onAdd func(ids.NodeID, *bls.PublicKey, ids.ID, uint64) + onWeight func(ids.NodeID, uint64, uint64) + onRemoved func(ids.NodeID, uint64) +} + +func (c *setCallbackListener) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + if c.onAdd != nil { + c.onAdd(nodeID, pk, txID, weight) + } else { + c.t.Fail() + } +} + +func (c *setCallbackListener) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { + if c.onRemoved != nil { + c.onRemoved(nodeID, weight) + } else { + c.t.Fail() + } +} + +func (c *setCallbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { + if c.onWeight != nil { + c.onWeight(nodeID, oldWeight, newWeight) + } else { + c.t.Fail() + } +} + func TestSetAddDuplicate(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) nodeID := ids.GenerateTestNodeID() require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) @@ -32,7 +64,7 @@ func TestSetAddDuplicate(t *testing.T) { func TestSetAddOverflow(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, math.MaxUint64)) @@ -44,7 +76,7 @@ func TestSetAddOverflow(t *testing.T) { func TestSetAddWeightOverflow(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -60,7 +92,7 @@ func TestSetAddWeightOverflow(t *testing.T) { func TestSetGetWeight(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) nodeID := ids.GenerateTestNodeID() require.Zero(s.GetWeight(nodeID)) @@ -83,7 +115,7 @@ func TestSetSubsetWeight(t *testing.T) { subset := set.Of(nodeID0, nodeID1) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(nodeID0, nil, ids.Empty, weight0)) require.NoError(s.Add(nodeID1, nil, ids.Empty, weight1)) @@ -98,7 +130,7 @@ func TestSetSubsetWeight(t *testing.T) { func TestSetRemoveWeightMissingValidator(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -109,7 +141,7 @@ func TestSetRemoveWeightMissingValidator(t *testing.T) { func TestSetRemoveWeightUnderflow(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) @@ -127,7 +159,7 @@ func TestSetRemoveWeightUnderflow(t *testing.T) { func TestSetGet(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) nodeID := ids.GenerateTestNodeID() _, ok := s.Get(nodeID) @@ -164,7 +196,7 @@ func TestSetGet(t *testing.T) { func TestSetLen(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) setLen := s.Len() require.Zero(setLen) @@ -195,7 +227,7 @@ func TestSetLen(t *testing.T) { func TestSetMap(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) m := s.Map() require.Empty(m) @@ -278,7 +310,7 @@ func TestSetWeight(t *testing.T) { vdr1 := ids.BuildTestNodeID([]byte{2}) weight1 := uint64(123) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(vdr0, nil, ids.Empty, weight0)) require.NoError(s.Add(vdr1, nil, ids.Empty, weight1)) @@ -292,7 +324,7 @@ func TestSetWeight(t *testing.T) { func TestSetSample(t *testing.T) { require := require.New(t) - s := newSet() + s := newSet(ids.Empty, nil) sampled, err := s.Sample(0) require.NoError(err) @@ -310,7 +342,7 @@ func TestSetSample(t *testing.T) { require.Equal([]ids.NodeID{nodeID0}, sampled) _, err = s.Sample(2) - require.ErrorIs(err, sampler.ErrOutOfRange) + require.ErrorIs(err, errInsufficientWeight) nodeID1 := ids.GenerateTestNodeID() require.NoError(s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1)) @@ -337,7 +369,7 @@ func TestSetString(t *testing.T) { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, }) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) require.NoError(s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1)) @@ -349,39 +381,6 @@ func TestSetString(t *testing.T) { require.Equal(expected, result) } -var _ SetCallbackListener = (*callbackListener)(nil) - -type callbackListener struct { - t *testing.T - onAdd func(ids.NodeID, *bls.PublicKey, ids.ID, uint64) - onWeight func(ids.NodeID, uint64, uint64) - onRemoved func(ids.NodeID, uint64) -} - -func (c *callbackListener) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { - if c.onAdd != nil { - c.onAdd(nodeID, pk, txID, weight) - } else { - c.t.Fail() - } -} - -func (c *callbackListener) OnValidatorRemoved(nodeID ids.NodeID, weight uint64) { - if c.onRemoved != nil { - c.onRemoved(nodeID, weight) - } else { - c.t.Fail() - } -} - -func (c *callbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight uint64) { - if c.onWeight != nil { - c.onWeight(nodeID, oldWeight, newWeight) - } else { - c.t.Fail() - } -} - func TestSetAddCallback(t *testing.T) { require := require.New(t) @@ -392,10 +391,10 @@ func TestSetAddCallback(t *testing.T) { txID0 := ids.GenerateTestID() weight0 := uint64(1) - s := newSet() + s := newSet(ids.Empty, nil) callCount := 0 require.False(s.HasCallbackRegistered()) - s.RegisterCallbackListener(&callbackListener{ + s.RegisterCallbackListener(&setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { require.Equal(nodeID0, nodeID) @@ -418,12 +417,12 @@ func TestSetAddWeightCallback(t *testing.T) { weight0 := uint64(1) weight1 := uint64(93) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 require.False(s.HasCallbackRegistered()) - s.RegisterCallbackListener(&callbackListener{ + s.RegisterCallbackListener(&setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { require.Equal(nodeID0, nodeID) @@ -452,12 +451,12 @@ func TestSetRemoveWeightCallback(t *testing.T) { weight0 := uint64(93) weight1 := uint64(92) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 require.False(s.HasCallbackRegistered()) - s.RegisterCallbackListener(&callbackListener{ + s.RegisterCallbackListener(&setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { require.Equal(nodeID0, nodeID) @@ -485,12 +484,12 @@ func TestSetValidatorRemovedCallback(t *testing.T) { txID0 := ids.GenerateTestID() weight0 := uint64(93) - s := newSet() + s := newSet(ids.Empty, nil) require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 require.False(s.HasCallbackRegistered()) - s.RegisterCallbackListener(&callbackListener{ + s.RegisterCallbackListener(&setCallbackListener{ t: t, onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { require.Equal(nodeID0, nodeID) diff --git a/staking/asn1.go b/staking/asn1.go index afd817a95cd6..6796b900ebe3 100644 --- a/staking/asn1.go +++ b/staking/asn1.go @@ -5,7 +5,6 @@ package staking import ( "crypto" - "crypto/x509" "encoding/asn1" "fmt" @@ -30,12 +29,6 @@ var ( // id-ecPublicKey OBJECT IDENTIFIER ::= { // iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} - - // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L326-L350 - signatureAlgorithmVerificationDetails = map[x509.SignatureAlgorithm]x509.PublicKeyAlgorithm{ - x509.SHA256WithRSA: x509.RSA, - x509.ECDSAWithSHA256: x509.ECDSA, - } ) func init() { diff --git a/staking/certificate.go b/staking/certificate.go index b3e1a511f63f..5032c1fad2cb 100644 --- a/staking/certificate.go +++ b/staking/certificate.go @@ -3,26 +3,9 @@ package staking -import ( - "crypto" - "crypto/x509" -) +import "crypto" type Certificate struct { Raw []byte PublicKey crypto.PublicKey - // TODO: Remove after v1.11.x activates. - SignatureAlgorithm x509.SignatureAlgorithm -} - -// CertificateFromX509 converts an x509 certificate into a staking certificate. -// -// Invariant: The provided certificate must be a parseable into a staking -// certificate. -func CertificateFromX509(cert *x509.Certificate) *Certificate { - return &Certificate{ - Raw: cert.Raw, - PublicKey: cert.PublicKey, - SignatureAlgorithm: cert.SignatureAlgorithm, - } } diff --git a/staking/local/README.md b/staking/local/README.md index 7c843a69d9a5..f5cbe241101f 100644 --- a/staking/local/README.md +++ b/staking/local/README.md @@ -1,10 +1,10 @@ # Local Network Staking Keys -This folder contains the staking keys referenced by the local network genesis. +This folder contains the staking keys (TLS and BLS) referenced by the local network genesis. **NOTE:** These keys **are** intended to be public. They **must** only be used for local test networks. -Each staker's Base64 encoded keys are included below for ease of use with the `--staking-tls-key-file-content` and `--staking-tls-cert-file-content` flags. +Each staker's Base64 encoded keys are included below for ease of use with the `--staking-tls-key-file-content`, `--staking-tls-cert-file-content` and `--staking-signer-key-file-content` flags. ## Staker1 @@ -26,6 +26,26 @@ LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBeW1Fa2NQMXRHS1dC LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TVRWYUdBOHpNREU1TURjeE1ERTIKTVRJeE5Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRREtZU1J3L1cwWXBZSC9NVFFoaUZyUjBtODlsNnlUdXpMcER0anVkci81Um5oSVB2dHFrN1lJR20vbTlsMjkKeHdSNEo1cjdTWkdzKzcweUJldGtiUytoN1B3SjJybVdEd2JyZHlKS3ZWQmhxZjhrU24rVlUyTGVQU0ljSmoxOQozTER5V2hWMUg0bHFOa1VrY0FSNzZGaDlxak12QTJwMHZKNjYrZURMWGxwaC9SWWFwUXg5SGdPai8wQm1BS01yCllDeW81QmhSaWgrT3VnZzhhSzRHOVBRVElBNUcyd1RXVzJRa0h4TS9RcHBGalpkL1h3UWVKMkg2dWJXTUZjNWYKdHRmNkF6cEp2RklEQnUvSkRDS1dpQ3U1bTh0NEdMOHcyT3JJeDhKczE5bEY0WVlFMmVvakNyZXFnUGk2NFMzbwpjcXdLc0RveVNUdzYvNWlLUTVCVVl3VVhYM3o3RVhPcUQ4U01IZWZVS2Vjemo0V3ZBYVpMelIyN3FYbTU1RWdSCllRQUlYNGZobVk3TmZTb3AzV2gwRW82MitKSG9NLzFnK1VnT1hsYm5XcFk5NU1nZDcvZndEU1dMdTRJeEUwL3UKcThWdWZJYmZDNHlyWThxbFRWZkFmZkkxbGRSZHZKalBKQlBpUTBDTnJPbDYwTFZwdHBrR2M5c2hIN3daMmJQMApiRW5ZS1RnTEFmT3pEOFV0NzFPMkFPSWE4MEExR05GbDRZbGUvTVNOSk9jUU9TcGd0V2RSRXpJVW9lbkFqZnV6Ck00T2VUcjRjUmc0K1ZZVEFvOUtIS3JpTjFEdWV3TnpHZDhXaktBVkhtY0lNanFJU0xUbHpNaGRzZG0rT21mUTYKT3Z5WDd2MEdUT0JiaFAwOU5HY3d3NUEwZ0N6WE4xOEZTNW94bnhlNk9HOUQwd0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUFxTDFUV0kxUFRNbTNKYVhraGRUQmU4dHNrNytGc0hBRnpUY0JWQnNCOGRrSk5HaHhiCmRsdTdYSW0rQXlHVW4wajhzaXo4cW9qS2JPK3JFUFYvSW1USDVXN1EzNnJYU2Rndk5VV3BLcktJQzVTOFBVRjUKVDRwSCtscFlJbFFIblRhS011cUgzbk8zSTQwSWhFaFBhYTJ3QXd5MmtEbHo0NmZKY3I2YU16ajZaZzQzSjVVSwpaaWQrQlFzaVdBVWF1NVY3Q3BDN0dNQ3g0WWRPWldXc1QzZEFzdWc5aHZ3VGU4MWtLMUpvVEgwanV3UFRCSDB0CnhVZ1VWSVd5dXdlTTFVd1lGM244SG13cTZCNDZZbXVqaE1ES1QrM2xncVp0N2VaMVh2aWVMZEJSbFZRV3pPYS8KNlFZVGtycXdQWmlvS0lTdHJ4VkdZams0MHFFQ05vZENTQ0l3UkRnYm5RdWJSV3Jkc2x4aUl5YzVibEpOdU9WKwpqZ3Y1ZDJFZVVwd1VqdnBadUVWN0ZxUEtHUmdpRzBqZmw2UHNtczlnWVVYZCt5M3l0RzlIZW9ETm1MVFNUQkU0Cm5DUVhYOTM1UDIveE91b2s2Q3BpR3BQODlEWDd0OHlpd2s4TEZOblkzcnZ2NTBuVnk4a2VyVmRuZkhUbW9NWjkKL0lCZ29qU0lLb3Y0bG1QS2RnekZmaW16aGJzc1ZDYTRETy9MSWhURjdiUWJIMXV0L09xN25wZE9wTWpMWUlCRQo5bGFndlJWVFZGd1QvdXdyQ2NYSENiMjFiL3B1d1Y5NFNOWFZ3dDdCaGVGVEZCZHR4SnJSNGpqcjJUNW9kTGtYCjZuUWNZOFYyT1Q3S094bjBLVmM2cGwzc2FKVExtTCtILzNDdEFhbzlOdG11VURhcEtJTlJTVk55dmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== ``` +## Signer1 + +### Public Key + +``` +0x900c9b119b5c82d781d4b49be78c3fc7ae65f2b435b7ed9e3a8b9a03e475edff86d8a64827fec8db23a6f236afbf127d +``` + +### Proof of Possession + +``` +0x8bfd6d4d2086b2b8115d8f72f94095fefe5a6c07876b2accf51a811adf520f389e74a3d2152a6d90b521e2be58ffe468043dc5ea68b4c44410eb67f8dc24f13ed4f194000764c0e922cd254a3588a4962b1cb4db7de4bb9cda9d9d4d6b03f3d2 +``` + +### Key Base64 + +``` +QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDE= +``` + ## Staker2 ### NodeID @@ -46,6 +66,26 @@ LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBM1U2RWV0SjJ1amJr LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TVRsYUdBOHpNREU1TURjeE1ERTIKTVRJeE9Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRGRUb1I2MG5hNk51UjlpU0FVTXl6UFhKTk1XVlFiTHlUNS9pWkNpSjNCQjRZV01CaGZ4cEpXSmlXWGNNK3oKbkRncEp1eUNFZWg1RHA2WlkzRmU3azZIaHQ2Rm1GcERqd25qcFFtZGtFS1VnMDBHK0VsUFRwL1VzbXNQTCtKQQpzd1BxQlpXcE1CUzNkc1hRTnVuTU10TUdscmY1UzBsNlhYNHk3a2MvR1R4WWd2ZVdaOUp0Ui9tMktOZXIrd2pnCkJIcUo0clBxbkhCMzBzRFlQWmc5MUN6MUFrOEJiMncySTEwOHpRVmdLSzZlSXFOS1hKSi80cGl6U1pkVTQ5MjAKd014WUJwbmZEQWNobnhlaTlVL3YzUWJUN2VLVUkyZkdyK2hPV1RJV1U4MCtWZU9CdDhhNlA0c1M5QVFoNS82Rwo4cXdtQXFPM1lROWR4TjgyaXUvSDMrTitHR2EvTTByNXJFV3J6d0l1Rmh3S3Z5UWNwUFJCbTJ5UW5CbmhMOUc1CmtONm40T0JNMEtzZ1ozQ1lsSFpTZzRlV2NOZ0J0MVdDRnNRYzd2ZlVGYUpucjhRUDNwRjRWLzRCb2s3d1RPNUgKTjBBMUVZRVZZdVg1M05HbnJLVmUrRmc5K3hNT2dYUFdrVU5xZHZwSTlaYlYzWjBTNTg2NnFGMy92QlpyaGdDcgpLYzVFL3ZNZXhCUmU4S2k0d0txT05WaGk5V0dVY1JIdkZFaWtjKzdWclBqMFlhRzZ6VkxkK3VPQUpOODFmS09QCllvNFg0c1pyTXlQWWwzT2pHdE1oZlY0S3ZDYUxFcjFkdU9rbHFPNmNDdkdROGlBbExWeTNWSnlXNUdKMEQwS3kKaUFpcjRWTmRBSktvMVpnaUdpdkpMV3VsVGZqVWlmQ045bzExNUFpcUp4aXF3d0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUNRT2R3RDdlUkl4QnZiUUhVYyttMFRSekVhMTdCQ2ZjazFZMld3TjNUWlhER1NrUFZFCjB1dWpBOFNMM3FpOC9DVExHUnFJOVUzZ1JaSmYrdEpQQkYvUDAyMVBFbXlhRlRTNGh0eGNEeFR4dVp2MmpDbzkKK1hoVUV5dlJXaXRUbW95MWVzcTNta290VlFIZVRtUXZ3Q3NRSkFoY3RWQS9oUmRKd21NUHMxQjhReE9VSTZCcQpTT0JIYTlDc1hJelZPRnY4RnFFOTFQWkEybnMzMHNLUVlycm5iSDk5YXBmRjVXZ2xMVW95UHd4ZjJlM0FBQ2g3CmJlRWRrNDVpdnZLd2k1Sms4bnI4NUtESFlQbHFrcjBiZDlFaGw4eHBsYU5CZE1QZVJ1ZnFCRGx6dGpjTEozd28KbW5ydDk1Z1FNZVNvTEhZM1VOc0lSamJqNDN6SW11N3E5di9ERDlwcFFwdTI2YVJEUm1CTmdMWkE5R001WG5iWgpSRmkzVnhMeXFhc0djU3phSHd6NWM3dk9CT2tPZGxxY1F6SVNSdldEeGlOMUhrQUwraGtpUUN1TWNoZ09SQWdNCnd6UG9vYThyZld0TElwT1hNcHd1VkdiLzhyR05MRVBvdm9DSzl6NmMrV1oremtSbzQrM1RRa09NWTY2WGh0N3IKQWhseTNsZXIrVHlnNmE1alhUOTJXS0MvTVhCWUF5MlpRTm95MjA0a05LZXZjSDdSMmNTa3hJVGQzbjVFYWNOeQo1TUF0Q05JazdKd2VMQ2g5ckxyTFVCdCtpNG40NHNQK0xWaGZXSGVtbmdBOENvRjRuNmVRMHBwMGl4WlRlbjBqCjR1TjBHMk5mK0plR01scW9PYkxXZElPZEgvcGJEcHBYR29aYUtLRGQ3K2JBNzRGbGU1VWg3KzFlM0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== ``` +## Signer2 + +### Public Key + +``` +0xa058ff27a4c570664bfa28e34939368539a1340867951943d0f56fa8aac13bc09ff64f341acf8cc0cef74202c2d6f9c0 +``` + +### Proof of Possession + +``` +0xac52195616344127df74d924e11701ca5e0867647ae36171d168fcf95d536e94061659b3edb924fffdb69dd5aa5cb2d703f8920c825c8f7b74dd0112c9c27814790bfcfa3a08e1d9358da1c54e1f6c0b4d9772432f79d7dceaa3a95c3a7e6adc +``` + +### Key Base64 + +``` +QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDI= +``` + ## Staker3 ### NodeID @@ -66,6 +106,26 @@ LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKSndJQkFBS0NBZ0VBdkpsUTA2QjI1RkJk LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpKYUdBOHpNREU1TURjeE1ERTIKTVRJeU1sb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRQzhtVkRUb0hia1VGMmdSZFZmcHlkWkxOS2VRMzhkNkhaRmtVTTNVMWRXTFpGU1pOdmFnTjhobFF2WS90UXUKM0E0MHAxOVdnS2J6V1pyZTN0ZzFBa3c4Snp0ZHo5Z2w0Uk1uMTQySUlPM0Npd0lwdGtFMEpvcGJaaG1HNWZBQwoybi9NWFF0ZmllSTNoemVSMDRMVzRKZ0xLemYzTm44eFpkbEJnSmZCbUw1cVVVbkU3TzdJYkpHR21hNmdTRDNlCndldEU2S1FadE50ZjB4Ukl2MDhkb1pLWXdUbDZJdGtkR0s3NnVmcXEwOThHVndXdkExd1N1bmU0K01GZ3M5TjQKZUZKajZKeXQ4NWZpSy9jd1B4N0tSZGdZZ0J6clpRNEVQc2hSbndXckJUaWVPT2FKdkFBMlJNeE1FWXpLUnJKQQpBc1lJMXp4dE55cUlVYUJUY3htYXorTlhVR1crd0h3SVRpYzBHcC9YUW0yTHdyL2x4SVY2T25BbEwzQ2diU1hpCnJTbm9HK2VIUSt2RHpCQWNSRGtUQWd2L0dVSXpsZnFUMlN0VEswMnVJQmdKWXp2RlRHNHBsSGl0Y2NSZnk4d3gKc2g1Wjh4Rzk5bG1QUVF0THNubFFBVitMaTA2Q2I4Q0g0aFVWb2lXaVZzNVFBYWhxV212NWZwb1gwRXMyNlJ5VQpIWEdiakUyMDJweU1NQTdqVWVyVVZLTWlqT29HWnRjSDZ6QjRwL2RKMFR0VG9Sd09nckE3TkNJOUFZVnRxVlhyClhHL3Vkajh1cjJyMWJUVndJYkhzT2VURVAzZ1kwbUhSV20yRS9iTGp0OXZiWUlSVXhSOHhXbkxrYmVCemlOVHcKZyszNmpkREYrNmd1M2NVei9uYlNuOFlZK1kxampYdU0zbHFGOGlNYUFvYmh1d0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUFlMmtDMEhqS1pVK2RsblUyUmxmQnBCNFFnenpyRkU1TjlBOEYxTWxFNHZWM0F6Q2cxClJWZEhQdm5pWHpkTmhEaWlmbEswbC9jbnJGdjJYMVR6WU1yckE2NzcvdXNIZjJCdzB4am0vaXBIT3Q1Vis0VE4KbVpBSUE0SVBsMDlnUDI4SVpMYzl4U3VxNEZvSGVNOE9UeGh0dE9sSU5ocXBHOVA1ZDZiUGV6VzZaekkzQ2RQUApDRjY5eEs0R0Zsai9OUW5Bb0ZvZ2lkNG9qWVlOVGovY000UFlRVTJLYnJsekx5UHVVay9DZ3dlZlhMTUg4Ny9ICmUza1BEZXY4MFRqdjJQbTVuRDkzN2ZaZmdyRW95b2xLeGlSVmNmWlZNeFI3cWhQaGl6anVlRDBEQWtmUUlzN0wKWVZTeXgvcWpFdjJiQllhaW01UlFha1VlSFIxWHU1WGovazV6cjMzdDk3OWVkZTUwYnlRcmNXbTRINUp4bkVwRApKeEpuRmZET1U2bzE0U0tHSFNyYW81WjRDM2RJNTVETTg0V0xBU25sTUk1Qks0WHRTM25vdExOekc4ZGZXV2hUCjltMEhjcnkrd1BORGNHcjhNdGoxbG9zLzBiTURxTUhDNGpjRlcxaHJYQ1VVczlSWXpFK04veG9xd0NRU2dOMVAKRTczdVhUeVNXajVvdk1SNVRQRjZQaGNmdExCL096aXFPN0Z2ZXJFQnB2R0dIVUFuVVQ2MUp0am9kalhQYkVkagowVmd5TU9CWTJ5NTNIVFhueDNkeGVGWmtVZFJYL1ZaWXk4dE1LM01UWSs3VUlVNWNXWW5DWkFvNUxOY2MwdWtSClM2V1M5KzZlYVE2WFJqaGZOVWp4OWE3RnpxYXBXZHRUZWRwaXBtQlAxTmphcDNnMjlpVXVWbkxRZWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== ``` +## Signer3 + +### Public Key + +``` +0xa10b6955a85684a0f5c94b8381f04506f1bee60625927d372323f78b3d30196cc56c8618c77eaf429298e74673d832c3 +``` + +### Proof of Possession + +``` +0x8d505f7b53960813f1e007f04702ae1bd524cce036b4695fbf8a16eb50b35cdbdd4eedec2b0ce281f35ae36e3ac29fb40867c94cabe2ba0b462f177dd8c3d293b0586b92d392e8278711fb434fe2601ae1b2e0867cfd128180c936a8010c5552 +``` + +### Key Base64 + +``` +QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDM= +``` + ## Staker4 ### NodeID @@ -86,6 +146,26 @@ LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBMlp3NkF4eE5wNC9O LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpWYUdBOHpNREU1TURjeE1ERTIKTVRJeU5Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRFpuRG9ESEUybmo4MnhEakgwVGI3T1hNcVFESHoremJMaWR0Nk1TSTFYQjN2T0FJRWlQcXJ0ZW5HbnFSYlYKRmNtNUdaeHZ4aDRZUUQ4Q2pLU2sxcWdaSmN6czBEUFNpR1E4RWZsNFBHTzR4bkVibGxnTDNQVVJQV3A3bUVWMwpvaDZmeElDZ1FLVEJsVDY3MUVuRnpCNWx5SldwdW1SenZBMXZ5aEJNc1k4YU8reGRxNUxVRmx0WXpCZHZwZ0xYClZhRHdIWlEyUFFFV3RGMGQwSk8yTjBXRkZER05teDZuOHBLU2VJQVZEc1R3WkNaSytGQ2VlRXlvR2ZYc0lOc2MKMHlDTVFzbGF3a2ZPTXFBOXlCVjNKaTZRbUZZS3lHWXQ2NU1XR05xUEE0WHJJeWxpS3dDQ1h3ejltamFXeU43cgpBeXc5Y1dsTE1PRE5tRE9SV3pHUlo1MjkwTUVBRUlac3FqWUhWaXRSVE0vUm5OSWFkVG9aR08weTV1QWtNMTRjCm1Udm5zSzFDUDkycXRmU2lzcTc1Vy9JOTFkclRob0V0VEs3OFVHT2wvNVExWUJSMDhGK3RTVVdaV3lIZUk2VU8KQlVDR0MyYkN0bXpLTWw3dlUyNWxHNm1iQ1IxSnVRaTZSWXBuZk1qWEgzNmxWNFM3ZlR2U3d3dVIwM2gyRjNIMQplRmtXTkcybGJGclcwZHpEQ1BnM2xYd21GUTY1aFVjUWhjdHpub0J6NUMxbEYyZVcwM3d1Vmd4aW5udVZsSkhqCnkvR3JxbVdzQVNuMVBEdVZzNGs3azZESmZ3eUhBaUEwdXhYckdmeFl2cDdIOGo0KzJZT21XaVdsNXhZZ3JFRGoKdXI1bjhaeDQ2UEhRZXIyQXZxM3NiRUdFZTFNQ3RYSmxqM2RyZDVIaW0zbStOUUlEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUE0MGF4MGRBTXJiV2lrYUo1czZramFHa1BrWXV4SE5KYncwNDdEbzBoancrbmNYc3hjClFESG1XY29ISHBnTVFDeDArdnA4eStvS1o0cG5xTmZHU3VPVG83L2wwNW9RVy9OYld3OW1Id1RpTE1lSTE4L3gKQXkrNUxwT2FzdytvbXFXTGJkYmJXcUwwby9SdnRCZEsycmtjSHpUVnpFQ2dHU294VUZmWkQrY2syb2RwSCthUgpzUVZ1ODZBWlZmY2xOMm1qTXlGU3FNSXRxUmNWdzdycXIzWHk2RmNnUlFQeWtVbnBndUNFZ2NjOWM1NGMxbFE5ClpwZGR0NGV6WTdjVGRrODZvaDd5QThRRmNodnRFOVpiNWRKNVZ1OWJkeTlpZzFreXNjUFRtK1NleWhYUmNoVW8KcWw0SC9jekdCVk1IVVk0MXdZMlZGejdIaXRFQ2NUQUlwUzZRdmN4eGdZZXZHTmpaWnh5WnZFQThTWXBMTVp5YgpvbWs0ZW5EVExkL3hLMXlGN1ZGb2RUREV5cTYzSUFtME5UUVpVVnZJRGZKZXV6dU56NTV1eGdkVXEyUkxwYUplCjBidnJ0OU9ieitmNWoyam9uYjJlMEJ1dWN3U2RUeUZYa1VDeE1XK3BpSVVHa3lyZ3VBaGxjSG9oRExFbzJ1Qi8KaVE0Zm9zR3Fxc2w0N2IrVGV6VDVwU1NibGtnVWppd3o2ZURwTTRsUXB4MjJNeHNIVmx4RkhyY0JObTBUZDkydgpGaXhybWxsYW1BWmJFejF0Qi8vMGJpcEthT09adWhBTkpmcmdOOEJDNnYyYWhsNC9TQnV1dDA5YTBBenl4cXBwCnVDc3lUbmZORWQxVzZjNm5vYXEyNHMrN1c3S0tMSWVrdU5uMU51bm5IcUtxcmlFdUgxeGx4eFBqWUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== ``` +## Signer4 + +### Public Key + +``` +0xaccd61ceb90c61628aa0fa34acab27ecb08f6897e9ccad283578c278c52109f9e10e4f8bc31aa6d7905c4e1623de367e +``` + +### Proof of Possession + +``` +0x910082e2b61fe4895b4a8f754c9e3a93c346156363acf67546a87e4bc1db7bbfa3239daa5292ad9bc30a11f60e59bbd30b375785b71fe45abd154d717b6471c2406df2534297305ae93d6abeb38fc461170fc0b74b8aa4550f30257a264c75b0 +``` + +### Key Base64 + +``` +QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDQ= +``` + ## Staker5 ### NodeID @@ -104,4 +184,24 @@ LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBNEN1YStiM1I3U1JS ``` LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpsYUdBOHpNREU1TURjeE1ERTIKTVRJeU9Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRGdLNXI1dmRIdEpGRWd3N2hHRS9sekthSGN2d3pyMzJhcm1xMGs5dFljaEpYZlQzazFqMWxYdEJBZGNVTjMKZ1NSS2pnekgvdmpibjBlYTNBaURDVWQyTWNrL24wS2NKWjQzUzVJN1pqUDdyYmF2Mjk2YktDWjFIcjdyNWdYWQpGaGsrM2FVc1ZmRFVxQVBCd3lQOEtlVjMxQVJWQS9zK1dQZVdxczY5UVhUZHlKdUJZRTVwcjQwdjFTZitlYlVJCm5aMzd1R1kza2lPMEV4L0pnY29Rc0dKenJXRC96dGJSQ0ZJdnJkTkpaZDBwR3ZNbG1US3A3WHNNUjNjcHZxazcKNzAvL01MQ2R5R1cvMUlBclRTdUQxdmQ3bUJYMUp5VlhLeWNZTjB2SU90Ymd4UE9GdXRVeXFET2VQN281MXE0aQpQUzNkQ1JnZm1uL2hXTHd5K0N0SmUwQkdLc2I0dGswdEt4bzBzZTh2OUpBOG1VdG5tem1NdDRZOWppak9yQ09CCjdYd1dLbUpZRW04TjVVYmN5NmNwMm9MOHZRVnR6ejNQWHJrRnQrM2NGdDFqcmpkcFFZZ0g0anlra1dEZU9qRWYKeTFGQ3d6c05SdWRMVHZMaGZMbjg2L1pUNGNMWjlKSTcvV1cwSVBDOEZjN2xoem5KK2JJUVVlRW5kYUdkZ1ZreAp1RWcwTXhkck1yMGpVMElGb1h5U1JYTlJ6Y0RXWlNoRWpCVHY3dG5GeExtb05VK3VKYi9LcE1INnNSWWkzenM4CjVlY2FNS055RytMRG1CYWhVbEh4NWhLQUg0OU84ODU1K0FNaHNnOTFPTlpKbGRqUVgwb1pySUt6SzVCcHNxZVQKbDRjMll0L2ZBTGlaYWVGazFwQkVzdlZlTU9CQ0l1V0UrYjRVSUVhTEFPaHhmd0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUIrMlZYbnFScWZHN0gyL0swbGd6eFQrWDlyMXUrWURuMEVhVUdBRzcxczcwUW5xYnBuClg3dEJtQ0tMTjZYZ1BMMEhyTjkzM253aVlybWZiOFMzM3paN2t3OEdKRHZhVGFtTE55ZW00LzhxVEJRbW5Sd2UKNnJRN1NZMmw3M0lnODdtUjBXVGkrclRuVFR0YzY2Ky9qTHRGZWFqMFljbDloQlpYSEtpVUxTR2hzYlVid3RregppdU5sQU5ob05LWE5JQUJSSW1VcTZPd1loRVFOMER3SFhqNzl3a3B5RFlqS1p3SHVFWlVrbmM4UGwyb1FQQmtlCm1pbDN0c3J2R1Jrd2hpc25YWDd0cWg2cldLVlpOSmtPNjhoeTdYTzlhVFhqYmNCLzdZMUs4M0lTTkV5R1BzSC8KcHdGeWQvajhPNG1vZHdoN1Vsd3cxL2h3Y3FucWlFRkUzS3p4WDJwTWg3VnhlQW1YMnQ1ZVhGWk9sUngxbGVjTQpYUmtWdTE5bFlES1FIR1NyR3huZytCRmxTT0I5NmU1a1hJYnVJWEtwUEFBQ29CUS9KWllidEhrczlIOE90TllPClAyam9xbW5ROXdHa0U1Y28xSWkvL2oydHVvQ1JDcEs4Nm1tYlRseU5ZdksrMS9ra0tjc2FpaVdYTnJRc3JJRFoKQkZzMEZ3WDVnMjRPUDUrYnJ4VGxSWkUwMVI2U3Q4bFFqNElVd0FjSXpHOGZGbU1DV2FZYXZyQ1pUZVlhRWl5RgpBMFgyVkEvdlo3eDlENVA5WjVPYWtNaHJNVytoSlRZcnBIMXJtNktSN0IyNmlVMmtKUnhUWDd4UTlscmtzcWZCCjdsWCtxMGloZWVZQTRjSGJHSk5Xd1dnZCtGUXNLL1BUZWl5cjRyZnF1dHV0ZFdBMEl4b0xSYzNYRnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== -``` \ No newline at end of file +``` + +## Signer5 + +### Public Key + +``` +0x8048109c3da13de0700f9f3590c3270bfc42277417f6d0cc84282947e1a1f8b4980fd3e3fe223acf0f56a5838890814a +``` + +### Proof of Possession + +``` +0xb034e0d0ec808b7ec456a6d88bdad7b32854794605a11139d70430d81fb93834a3f81d8969042952daff335fec51018016a7ecb19d38597c5743a4eb3fb945ebe28a4250dd7bcb7a192c98d2fcaf15320a9bee239c66ddf61bb24f87c6e91971 +``` + +### Key Base64 + +``` +QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDU= +``` diff --git a/staking/local/signer1.key b/staking/local/signer1.key new file mode 100644 index 000000000000..92c25fc3dd3e --- /dev/null +++ b/staking/local/signer1.key @@ -0,0 +1 @@ +AvalancheLocalNetworkValidator01 \ No newline at end of file diff --git a/staking/local/signer2.key b/staking/local/signer2.key new file mode 100644 index 000000000000..8a5a43713ae7 --- /dev/null +++ b/staking/local/signer2.key @@ -0,0 +1 @@ +AvalancheLocalNetworkValidator02 \ No newline at end of file diff --git a/staking/local/signer3.key b/staking/local/signer3.key new file mode 100644 index 000000000000..4df9ad937072 --- /dev/null +++ b/staking/local/signer3.key @@ -0,0 +1 @@ +AvalancheLocalNetworkValidator03 \ No newline at end of file diff --git a/staking/local/signer4.key b/staking/local/signer4.key new file mode 100644 index 000000000000..ce17f66f4b91 --- /dev/null +++ b/staking/local/signer4.key @@ -0,0 +1 @@ +AvalancheLocalNetworkValidator04 \ No newline at end of file diff --git a/staking/local/signer5.key b/staking/local/signer5.key new file mode 100644 index 000000000000..dca852aa9cb9 --- /dev/null +++ b/staking/local/signer5.key @@ -0,0 +1 @@ +AvalancheLocalNetworkValidator05 \ No newline at end of file diff --git a/staking/parse.go b/staking/parse.go index 4f9a50f05131..28ae02cbc644 100644 --- a/staking/parse.go +++ b/staking/parse.go @@ -8,7 +8,6 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rsa" - "crypto/x509" "encoding/asn1" "errors" "fmt" @@ -53,25 +52,13 @@ var ( ErrUnknownPublicKeyAlgorithm = errors.New("staking: unknown public key algorithm") ) -// ParseCertificate parses a single certificate from the given ASN.1 DER data. -// -// TODO: Remove after v1.11.x activates. -func ParseCertificate(der []byte) (*Certificate, error) { - x509Cert, err := x509.ParseCertificate(der) - if err != nil { - return nil, err - } - stakingCert := CertificateFromX509(x509Cert) - return stakingCert, ValidateCertificate(stakingCert) -} - -// ParseCertificatePermissive parses a single certificate from the given ASN.1. +// ParseCertificate parses a single certificate from the given ASN.1. // // This function does not validate that the certificate is valid to be used // against normal TLS implementations. // // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/parser.go#L789-L968 -func ParseCertificatePermissive(bytes []byte) (*Certificate, error) { +func ParseCertificate(bytes []byte) (*Certificate, error) { if len(bytes) > MaxCertificateLen { return nil, ErrCertificateTooLarge } @@ -126,56 +113,55 @@ func ParseCertificatePermissive(bytes []byte) (*Certificate, error) { if !input.ReadASN1BitString(&spk) { return nil, ErrMalformedSubjectPublicKey } - publicKey, signatureAlgorithm, err := parsePublicKey(pkAI, spk) + publicKey, err := parsePublicKey(pkAI, spk) return &Certificate{ - Raw: bytes, - SignatureAlgorithm: signatureAlgorithm, - PublicKey: publicKey, + Raw: bytes, + PublicKey: publicKey, }, err } // Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/parser.go#L215-L306 -func parsePublicKey(oid asn1.ObjectIdentifier, publicKey asn1.BitString) (crypto.PublicKey, x509.SignatureAlgorithm, error) { +func parsePublicKey(oid asn1.ObjectIdentifier, publicKey asn1.BitString) (crypto.PublicKey, error) { der := cryptobyte.String(publicKey.RightAlign()) switch { case oid.Equal(oidPublicKeyRSA): pub := &rsa.PublicKey{N: new(big.Int)} if !der.ReadASN1(&der, cryptobyte_asn1.SEQUENCE) { - return nil, 0, ErrInvalidRSAPublicKey + return nil, ErrInvalidRSAPublicKey } if !der.ReadASN1Integer(pub.N) { - return nil, 0, ErrInvalidRSAModulus + return nil, ErrInvalidRSAModulus } if !der.ReadASN1Integer(&pub.E) { - return nil, 0, ErrInvalidRSAPublicExponent + return nil, ErrInvalidRSAPublicExponent } if pub.N.Sign() <= 0 { - return nil, 0, ErrRSAModulusNotPositive + return nil, ErrRSAModulusNotPositive } if bitLen := pub.N.BitLen(); bitLen != allowedRSALargeModulusLen && bitLen != allowedRSASmallModulusLen { - return nil, 0, fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) + return nil, fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) } if pub.N.Bit(0) == 0 { - return nil, 0, ErrRSAModulusIsEven + return nil, ErrRSAModulusIsEven } if pub.E != allowedRSAPublicExponentValue { - return nil, 0, fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) + return nil, fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) } - return pub, x509.SHA256WithRSA, nil + return pub, nil case oid.Equal(oidPublicKeyECDSA): namedCurve := elliptic.P256() x, y := elliptic.Unmarshal(namedCurve, der) if x == nil { - return nil, 0, ErrFailedUnmarshallingEllipticCurvePoint + return nil, ErrFailedUnmarshallingEllipticCurvePoint } return &ecdsa.PublicKey{ Curve: namedCurve, X: x, Y: y, - }, x509.ECDSAWithSHA256, nil + }, nil default: - return nil, 0, ErrUnknownPublicKeyAlgorithm + return nil, ErrUnknownPublicKeyAlgorithm } } diff --git a/staking/parse_test.go b/staking/parse_test.go index 60f6ee8f3240..41704e3b71dc 100644 --- a/staking/parse_test.go +++ b/staking/parse_test.go @@ -11,32 +11,12 @@ import ( _ "embed" ) -var ( - //go:embed large_rsa_key.cert - largeRSAKeyCert []byte - - parsers = []struct { - name string - parse func([]byte) (*Certificate, error) - }{ - { - name: "ParseCertificate", - parse: ParseCertificate, - }, - { - name: "ParseCertificatePermissive", - parse: ParseCertificatePermissive, - }, - } -) +//go:embed large_rsa_key.cert +var largeRSAKeyCert []byte func TestParseCheckLargeCert(t *testing.T) { - for _, parser := range parsers { - t.Run(parser.name, func(t *testing.T) { - _, err := parser.parse(largeRSAKeyCert) - require.ErrorIs(t, err, ErrCertificateTooLarge) - }) - } + _, err := ParseCertificate(largeRSAKeyCert) + require.ErrorIs(t, err, ErrCertificateTooLarge) } func BenchmarkParse(b *testing.B) { @@ -44,46 +24,10 @@ func BenchmarkParse(b *testing.B) { require.NoError(b, err) bytes := tlsCert.Leaf.Raw - for _, parser := range parsers { - b.Run(parser.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - _, err = parser.parse(bytes) - require.NoError(b, err) - } - }) - } -} -func FuzzParseCertificate(f *testing.F) { - tlsCert, err := NewTLSCert() - require.NoError(f, err) - - f.Add(tlsCert.Leaf.Raw) - f.Add(largeRSAKeyCert) - f.Fuzz(func(t *testing.T, certBytes []byte) { - require := require.New(t) - - // Verify that any certificate that can be parsed by ParseCertificate - // can also be parsed by ParseCertificatePermissive. - { - strictCert, err := ParseCertificate(certBytes) - if err == nil { - permissiveCert, err := ParseCertificatePermissive(certBytes) - require.NoError(err) - require.Equal(strictCert, permissiveCert) - } - } - - // Verify that any certificate that can't be parsed by - // ParseCertificatePermissive also can't be parsed by ParseCertificate. - { - cert, err := ParseCertificatePermissive(certBytes) - if err == nil { - require.NoError(ValidateCertificate(cert)) - } else { - _, err = ParseCertificate(certBytes) - require.Error(err) //nolint:forbidigo - } - } - }) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err = ParseCertificate(bytes) + require.NoError(b, err) + } } diff --git a/staking/tls.go b/staking/tls.go index fbb5d9e488ae..c63dd84c3dfe 100644 --- a/staking/tls.go +++ b/staking/tls.go @@ -5,8 +5,9 @@ package staking import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" - "crypto/rsa" "crypto/tls" "crypto/x509" "encoding/pem" @@ -115,9 +116,9 @@ func NewTLSCert() (*tls.Certificate, error) { // Returns the PEM byte representations of both. func NewCertAndKeyBytes() ([]byte, []byte, error) { // Create key to sign cert with - key, err := rsa.GenerateKey(rand.Reader, 4096) + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { - return nil, nil, fmt.Errorf("couldn't generate rsa key: %w", err) + return nil, nil, fmt.Errorf("couldn't generate ecdsa key: %w", err) } // Create self-signed staking cert @@ -125,10 +126,10 @@ func NewCertAndKeyBytes() ([]byte, []byte, error) { SerialNumber: big.NewInt(0), NotBefore: time.Date(2000, time.January, 0, 0, 0, 0, 0, time.UTC), NotAfter: time.Now().AddDate(100, 0, 0), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment, + KeyUsage: x509.KeyUsageDigitalSignature, BasicConstraintsValid: true, } - certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &key.PublicKey, key) + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, key.Public(), key) if err != nil { return nil, nil, fmt.Errorf("couldn't create certificate: %w", err) } diff --git a/staking/tls_test.go b/staking/tls_test.go index 6de376c2a538..31762542802d 100644 --- a/staking/tls_test.go +++ b/staking/tls_test.go @@ -29,3 +29,10 @@ func TestMakeKeys(t *testing.T) { require.NoError(cert.Leaf.CheckSignature(cert.Leaf.SignatureAlgorithm, msg, sig)) } + +func BenchmarkNewCertAndKeyBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _, err := NewCertAndKeyBytes() + require.NoError(b, err) + } +} diff --git a/staking/verify.go b/staking/verify.go index dd4255455ff0..450daf4e92f4 100644 --- a/staking/verify.go +++ b/staking/verify.go @@ -6,17 +6,12 @@ package staking import ( "crypto" "crypto/ecdsa" - "crypto/elliptic" "crypto/rsa" - "crypto/x509" "errors" - "fmt" ) var ( ErrUnsupportedAlgorithm = errors.New("staking: cannot verify signature: unsupported algorithm") - ErrPublicKeyAlgoMismatch = errors.New("staking: signature algorithm specified different public key type") - ErrInvalidECDSAPublicKey = errors.New("staking: invalid ECDSA public key") ErrECDSAVerificationFailure = errors.New("staking: ECDSA verification failure") ) @@ -45,50 +40,3 @@ func CheckSignature(cert *Certificate, msg []byte, signature []byte) error { return ErrUnsupportedAlgorithm } } - -// ValidateCertificate verifies that this certificate conforms to the required -// staking format assuming that it was already able to be parsed. -// -// TODO: Remove after v1.11.x activates. -func ValidateCertificate(cert *Certificate) error { - if len(cert.Raw) > MaxCertificateLen { - return ErrCertificateTooLarge - } - - pubkeyAlgo, ok := signatureAlgorithmVerificationDetails[cert.SignatureAlgorithm] - if !ok { - return ErrUnsupportedAlgorithm - } - - switch pub := cert.PublicKey.(type) { - case *rsa.PublicKey: - if pubkeyAlgo != x509.RSA { - return signaturePublicKeyAlgoMismatchError(pubkeyAlgo, pub) - } - if bitLen := pub.N.BitLen(); bitLen != allowedRSALargeModulusLen && bitLen != allowedRSASmallModulusLen { - return fmt.Errorf("%w: %d", ErrUnsupportedRSAModulusBitLen, bitLen) - } - if pub.N.Bit(0) == 0 { - return ErrRSAModulusIsEven - } - if pub.E != allowedRSAPublicExponentValue { - return fmt.Errorf("%w: %d", ErrUnsupportedRSAPublicExponent, pub.E) - } - return nil - case *ecdsa.PublicKey: - if pubkeyAlgo != x509.ECDSA { - return signaturePublicKeyAlgoMismatchError(pubkeyAlgo, pub) - } - if pub.Curve != elliptic.P256() { - return ErrInvalidECDSAPublicKey - } - return nil - default: - return ErrUnsupportedAlgorithm - } -} - -// Ref: https://github.com/golang/go/blob/go1.19.12/src/crypto/x509/x509.go#L812-L814 -func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo x509.PublicKeyAlgorithm, pubKey any) error { - return fmt.Errorf("%w: expected an %s public key, but have public key of type %T", ErrPublicKeyAlgoMismatch, expectedPubKeyAlgo, pubKey) -} diff --git a/staking/verify_test.go b/staking/verify_test.go new file mode 100644 index 000000000000..9c8ca5ad98cb --- /dev/null +++ b/staking/verify_test.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package staking + +import ( + "crypto" + "crypto/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/hashing" +) + +func BenchmarkSign(b *testing.B) { + tlsCert, err := NewTLSCert() + require.NoError(b, err) + + signer := tlsCert.PrivateKey.(crypto.Signer) + msg := []byte("msg") + hash := hashing.ComputeHash256(msg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := signer.Sign(rand.Reader, hash, crypto.SHA256) + require.NoError(b, err) + } +} + +func BenchmarkVerify(b *testing.B) { + tlsCert, err := NewTLSCert() + require.NoError(b, err) + + signer := tlsCert.PrivateKey.(crypto.Signer) + msg := []byte("msg") + signature, err := signer.Sign( + rand.Reader, + hashing.ComputeHash256(msg), + crypto.SHA256, + ) + require.NoError(b, err) + + certBytes := tlsCert.Leaf.Raw + cert, err := ParseCertificate(certBytes) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := CheckSignature(cert, msg, signature) + require.NoError(b, err) + } +} diff --git a/subnets/config.go b/subnets/config.go index 9a12c550b833..f5bd223b0705 100644 --- a/subnets/config.go +++ b/subnets/config.go @@ -15,21 +15,7 @@ import ( var errAllowedNodesWhenNotValidatorOnly = errors.New("allowedNodes can only be set when ValidatorOnly is true") -type GossipConfig struct { - AcceptedFrontierValidatorSize uint `json:"gossipAcceptedFrontierValidatorSize" yaml:"gossipAcceptedFrontierValidatorSize"` - AcceptedFrontierNonValidatorSize uint `json:"gossipAcceptedFrontierNonValidatorSize" yaml:"gossipAcceptedFrontierNonValidatorSize"` - AcceptedFrontierPeerSize uint `json:"gossipAcceptedFrontierPeerSize" yaml:"gossipAcceptedFrontierPeerSize"` - OnAcceptValidatorSize uint `json:"gossipOnAcceptValidatorSize" yaml:"gossipOnAcceptValidatorSize"` - OnAcceptNonValidatorSize uint `json:"gossipOnAcceptNonValidatorSize" yaml:"gossipOnAcceptNonValidatorSize"` - OnAcceptPeerSize uint `json:"gossipOnAcceptPeerSize" yaml:"gossipOnAcceptPeerSize"` - AppGossipValidatorSize uint `json:"appGossipValidatorSize" yaml:"appGossipValidatorSize"` - AppGossipNonValidatorSize uint `json:"appGossipNonValidatorSize" yaml:"appGossipNonValidatorSize"` - AppGossipPeerSize uint `json:"appGossipPeerSize" yaml:"appGossipPeerSize"` -} - type Config struct { - GossipConfig - // ValidatorOnly indicates that this Subnet's Chains are available to only subnet validators. // No chain related messages will go out to non-validators. // Validators will drop messages received from non-validators. diff --git a/subnets/config.md b/subnets/config.md new file mode 100644 index 000000000000..970211710388 --- /dev/null +++ b/subnets/config.md @@ -0,0 +1,110 @@ +--- +tags: [Nodes] +description: Reference for all available Subnet config options and flags. +sidebar_label: Subnet Configs +pagination_label: Subnet Configs +sidebar_position: 2 +--- + +# Subnet Configs + +It is possible to provide parameters for a Subnet. Parameters here apply to all +chains in the specified Subnet. + +AvalancheGo looks for files specified with `{subnetID}.json` under +`--subnet-config-dir` as documented +[here](/nodes/configure/avalanchego-config-flags.md#subnet-configs). + +Here is an example of Subnet config file: + +```json +{ + "validatorOnly": false, + "consensusParameters": { + "k": 25, + "alpha": 18 + } +} +``` + +## Parameters + +### Private Subnet + +#### `validatorOnly` (bool) + +If `true` this node does not expose Subnet blockchain contents to non-validators +via P2P messages. Defaults to `false`. + +Avalanche Subnets are public by default. It means that every node can sync and +listen ongoing transactions/blocks in Subnets, even they're not validating the +listened Subnet. + +Subnet validators can choose not to publish contents of blockchains via this +configuration. If a node sets `validatorOnly` to true, the node exchanges +messages only with this Subnet's validators. Other peers will not be able to +learn contents of this Subnet from this node. + +:::tip + +This is a node-specific configuration. Every validator of this Subnet has to use +this configuration in order to create a full private Subnet. + +::: + +#### `allowedNodes` (string list) + +If `validatorOnly=true` this allows explicitly specified NodeIDs to be allowed +to sync the Subnet regardless of validator status. Defaults to be empty. + +:::tip + +This is a node-specific configuration. Every validator of this Subnet has to use +this configuration in order to properly allow a node in the private Subnet. + +::: + +#### `proposerMinBlockDelay` (duration) + +The minimum delay performed when building snowman++ blocks. Default is set to 1 second. + +As one of the ways to control network congestion, Snowman++ will only build a +block `proposerMinBlockDelay` after the parent block's timestamp. Some +high-performance custom VM may find this too strict. This flag allows tuning the +frequency at which blocks are built. + +### Consensus Parameters + +Subnet configs supports loading new consensus parameters. JSON keys are +different from their matching `CLI` keys. These parameters must be grouped under +`consensusParameters` key. The consensus parameters of a Subnet default to the +same values used for the Primary Network, which are given [CLI Snow Parameters](/nodes/configure/avalanchego-config-flags.md#snow-parameters). + +| CLI Key | JSON Key | +| :------------------------------- | :-------------------- | +| --snow-sample-size | k | +| --snow-quorum-size | alpha | +| --snow-commit-threshold | `beta` | +| --snow-concurrent-repolls | concurrentRepolls | +| --snow-optimal-processing | `optimalProcessing` | +| --snow-max-processing | maxOutstandingItems | +| --snow-max-time-processing | maxItemProcessingTime | +| --snow-avalanche-batch-size | `batchSize` | +| --snow-avalanche-num-parents | `parentSize` | + +### Gossip Configs + +It's possible to define different Gossip configurations for each Subnet without +changing values for Primary Network. JSON keys of these +parameters are different from their matching `CLI` keys. These parameters +default to the same values used for the Primary Network. For more information +see [CLI Gossip Configs](/nodes/configure/avalanchego-config-flags.md#gossiping). + +| CLI Key | JSON Key | +| :------------------------------------------------------ | :------------------------------------- | +| --consensus-accepted-frontier-gossip-validator-size | gossipAcceptedFrontierValidatorSize | +| --consensus-accepted-frontier-gossip-non-validator-size | gossipAcceptedFrontierNonValidatorSize | +| --consensus-accepted-frontier-gossip-peer-size | gossipAcceptedFrontierPeerSize | +| --consensus-on-accept-gossip-validator-size | gossipOnAcceptValidatorSize | +| --consensus-on-accept-gossip-non-validator-size | gossipOnAcceptNonValidatorSize | +| --consensus-on-accept-gossip-peer-size | gossipOnAcceptPeerSize | diff --git a/subnets/config_test.go b/subnets/config_test.go index fdb10c4e072a..c45388e88d1a 100644 --- a/subnets/config_test.go +++ b/subnets/config_test.go @@ -17,8 +17,7 @@ var validParameters = snowball.Parameters{ K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Beta: 1, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, diff --git a/tests/antithesis/README.md b/tests/antithesis/README.md new file mode 100644 index 000000000000..1fd22370e655 --- /dev/null +++ b/tests/antithesis/README.md @@ -0,0 +1,145 @@ +# Antithesis Testing + +This package supports testing with +[Antithesis](https://antithesis.com/docs/introduction/introduction.html), +a SaaS offering that enables deployment of distributed systems (such +as Avalanche) to a deterministic and simulated environment that +enables discovery and reproduction of anomalous behavior. + +## Package details + +| Filename | Purpose | +|:---------------|:-----------------------------------------------------------------------------------| +| compose.go | Generates Docker Compose project file and initial database for antithesis testing. | +| config.go | Defines common flags for the workload binary. | +| init_db.go | Initializes initial db state for subnet testing. | +| node_health.go | Helper to check node health. | +| avalanchego/ | Defines an antithesis test setup for avalanchego's primary chains. | +| xsvm/ | Defines an antithesis test setup for the xsvm VM. | + +## Instrumentation + +Software running in Antithesis's environment must be +[instrumented](https://antithesis.com/docs/instrumentation/overview.html) +to take full advantage of the supported traceability. Since the +Antithesis Go SDK only supports the amd64/x86_64 architecture as of this +writing, running of instrumented binaries on Macs (arm64) is not possible +without emulation (which would be very slow). To support test development +on Macs, a local build will not be instrumented. + +## Defining a new test setup + +When defining a new test setup - whether in the avalanchego repo or +for a VM in another repo - following the example of an existing test +setup is suggested. The following table enumerates the files defining +a test setup: + +| Filename | Purpose | +|:-------------------------------------------------------|:-------------------------------------------------------| +| scripts/build_antithesis_images.sh | Builds the test images to deploy to antithesis | +| scripts/build_antithesis_[test setup]_workload.sh | Builds the workload binary | +| scripts/tests.build_antithesis_images.sh | Validates the build of the test images | +| tests/antithesis/[test setup]/main.go | The entrypoint for the workload binary | +| tests/antithesis/[test setup]/Dockerfile.config | Defines how to build the config image | +| tests/antithesis/[test setup]/Dockerfile.node | Defines how to build the instrumented node image | +| tests/antithesis/[test setup]/Dockerfile.workload | Defines how to build the workload image | +| tests/antithesis/[test setup]/gencomposeconfig/main.go | Generates the compose configuration for the test setup | + +In addition, github workflows are suggested to ensure +`scripts/tests.build_antithesis_images.sh` runs against PRs and +`scripts/build_antithesis_images.sh` runs against pushes. + +### Use of a builder image + +To simplify building instrumented (for running in CI) and +non-instrumented (for running locally) versions of the workload and +node images, a common builder image is used. If on an amd64 host, +`tests/antithesis/avalanchego/Dockerfile.builder-instrumented` is used +to create an instrumented builder. On an arm64 host, +`tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented` is +used to create an uninstrumented builder. In both cases, the builder +image is based on the default golang image and will include the source +code necessary to build the node and workload binaries. The +alternative would require duplicating builder setup for instrumented +and non-instrumented builds for the workload and node images of each +test setup. + +## Troubleshooting a test setup + +### Running a workload directly + +The workload of the 'avalanchego' test setup can be invoked against an +arbitrary network: + +```bash +$ AVAWL_URIS="http://10.0.20.3:9650 http://10.0.20.4:9650" go run ./tests/antithesis/avalanchego +``` + +The workload of a subnet test setup like 'xsvm' additionally requires +a network with a configured chain for the xsvm VM and the ID for that +chain needs to be provided to the workload: + +```bash +$ AVAWL_URIS=... CHAIN_IDS="2S9ypz...AzMj9" go run ./tests/antithesis/xsvm +``` + +### Running a workload with docker-compose + +Running the test script for a given test setup with the `DEBUG` flag +set will avoid cleaning up the the temporary directory where the +docker-compose setup is written to. This will allow manual invocation of +docker-compose to see the log output of the workload. + +```bash +$ DEBUG=1 ./scripts/tests.build_antithesis_images.sh +``` + +After the test script has terminated, the name of the temporary +directory will appear in the output of the script: + +``` +... +using temporary directory /tmp/tmp.E6eHdDr4ln as the docker-compose path" +... +``` + +Running compose from the temporary directory will ensure the workload +output appears on stdout for inspection: + +```bash +$ cd [temporary directory] + +# Start the compose project +$ docker-compose up + +# Cleanup the compose project +$ docker-compose down --volumes +``` + +## Manually triggering an Antithesis test run + +When making changes to a test setup, it may be useful to manually +trigger an Antithesis test run outside of the normal schedule. This +can be performed against master or an arbitrary branch: + + - Navigate to the ['Actions' tab of the avalanchego + repo](https://github.com/ava-labs/avalanchego/actions). + - Select the [Publish Antithesis + Images](https://github.com/ava-labs/avalanchego/actions/workflows/publish_antithesis_images.yml) + workflow on the left. + - Find the 'Run workflow' drop-down on the right and trigger the + workflow against the desired branch. The default value for + `image_tag` (`latest`) is used by scheduled test runs, so consider + supplying a different value to avoid interferring with the results + of the scheduled runs. + - Wait for the publication job to complete successfully so that the + images are available to be tested against. + - Select the [Trigger Antithesis Test Runs](https://github.com/ava-labs/avalanchego/actions/workflows/trigger-antithesis-runs.yml) + workflow on the left. + - Find the 'Run workflow' drop-down on the right and trigger the + workflow against the desired branch. The branch only determines the + CI configuration (the images have already been built), so master is + probably fine. Make sure to supply the same `image_tag` that was + provided to the publishing workflow and consider setting + `recipients` to your own email rather than sending the test report + to everyone on the regular distribution list. diff --git a/tests/antithesis/avalanchego/Dockerfile.builder-instrumented b/tests/antithesis/avalanchego/Dockerfile.builder-instrumented new file mode 100644 index 000000000000..ffc05256e30e --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.builder-instrumented @@ -0,0 +1,46 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +# Antithesis: Getting the Antithesis golang instrumentation library +FROM docker.io/antithesishq/go-instrumentor AS instrumentor + +# ============= Instrumentation Stage ================ +FROM golang:$GO_VERSION-bullseye + +WORKDIR /build +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Ensure pre-existing builds are not available for inclusion in the final image +RUN [ -d ./build ] && rm -rf ./build/* || true + +# Keep the commit hash to easily verify the exact version that is running +RUN git rev-parse HEAD > ./commit_hash.txt + +# Copy the instrumentor and supporting files to their correct locations +COPY --from=instrumentor /opt/antithesis /opt/antithesis +COPY --from=instrumentor /opt/antithesis/lib /lib + +# Create the destination output directory for the instrumented code +RUN mkdir -p /avalanchego_instrumented + +# Park the .git file in a safe location +RUN mkdir -p /opt/tmp/ +RUN cp -r .git /opt/tmp/ + +# Instrument avalanchego +RUN /opt/antithesis/bin/goinstrumentor \ + -stderrthreshold=INFO \ + -antithesis /opt/antithesis/instrumentation \ + . \ + /avalanchego_instrumented + +WORKDIR /avalanchego_instrumented/customer +RUN go mod download +RUN ln -s /opt/tmp/.git .git diff --git a/tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented b/tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented new file mode 100644 index 000000000000..07d3fe8b882c --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.builder-uninstrumented @@ -0,0 +1,17 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +FROM golang:$GO_VERSION-bullseye + +WORKDIR /build +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Ensure pre-existing builds are not available for inclusion in the final image +RUN [ -d ./build ] && rm -rf ./build/* || true diff --git a/tests/antithesis/avalanchego/Dockerfile.config b/tests/antithesis/avalanchego/Dockerfile.config new file mode 100644 index 000000000000..36e0214bb80f --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.config @@ -0,0 +1,5 @@ +FROM scratch AS execution + +# Copy config artifacts from the build path. For simplicity, artifacts +# are built outside of the docker image. +COPY ./build/antithesis/avalanchego/ / diff --git a/tests/antithesis/avalanchego/Dockerfile.node b/tests/antithesis/avalanchego/Dockerfile.node new file mode 100644 index 000000000000..2b19adbb93c1 --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.node @@ -0,0 +1,30 @@ +# TAG should identify the builder image +ARG TAG + +# ============= Compilation Stage ================ +FROM antithesis-avalanchego-builder:$TAG AS builder + +# The workdir is hard-coded since this Dockerfile is only intended for instrumented builds. +WORKDIR /avalanchego_instrumented/customer + +# Build avalanchego with race detection (-r) enabled. +RUN ./scripts/build.sh -r + +# ============= Cleanup Stage ================ +FROM debian:11-slim AS execution + +# Copy identifying information into the container +COPY --from=builder /build/commit_hash.txt ./commit_hash.txt + +# Copy the antithesis dependencies into the container +RUN mkdir -p /symbols +COPY --from=builder /avalanchego_instrumented/symbols /symbols +COPY --from=builder /opt/antithesis/lib/libvoidstar.so /usr/lib/libvoidstar.so + +# Use the same path as the uninstrumented node image for consistency +WORKDIR /avalanchego/build + +# Copy the executable into the container +COPY --from=builder /avalanchego_instrumented/customer/build/avalanchego ./avalanchego + +CMD [ "./avalanchego" ] diff --git a/tests/antithesis/avalanchego/Dockerfile.workload b/tests/antithesis/avalanchego/Dockerfile.workload new file mode 100644 index 000000000000..4cd37123773e --- /dev/null +++ b/tests/antithesis/avalanchego/Dockerfile.workload @@ -0,0 +1,28 @@ +# TAG should identify the builder image +ARG TAG + +# NODE_IMAGE needs to identify an existing node image and should include the tag +ARG NODE_IMAGE + +# ============= Compilation Stage ================ +FROM antithesis-avalanchego-builder:$TAG AS builder + +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + +WORKDIR $BUILDER_WORKDIR + +# Build the workload +RUN ./scripts/build_antithesis_avalanchego_workload.sh + +# ============= Cleanup Stage ================ +# Base the workflow on the node image to support bootstrap testing +FROM $NODE_IMAGE AS execution + +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + +# Copy the executable into the container +COPY --from=builder $BUILDER_WORKDIR/build/antithesis-avalanchego-workload ./workload + +CMD [ "./workload" ] diff --git a/tests/antithesis/avalanchego/gencomposeconfig/main.go b/tests/antithesis/avalanchego/gencomposeconfig/main.go new file mode 100644 index 000000000000..85659abe26ae --- /dev/null +++ b/tests/antithesis/avalanchego/gencomposeconfig/main.go @@ -0,0 +1,37 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "fmt" + "log" + "os" + + "github.com/ava-labs/avalanchego/tests/antithesis" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" +) + +const baseImageName = "antithesis-avalanchego" + +// Creates docker-compose.yml and its associated volumes in the target path. +func main() { + targetPath := os.Getenv("TARGET_PATH") + if len(targetPath) == 0 { + log.Fatal("TARGET_PATH environment variable not set") + } + + imageTag := os.Getenv("IMAGE_TAG") + if len(imageTag) == 0 { + log.Fatal("IMAGE_TAG environment variable not set") + } + + nodeImageName := fmt.Sprintf("%s-node:%s", baseImageName, imageTag) + workloadImageName := fmt.Sprintf("%s-workload:%s", baseImageName, imageTag) + + network := tmpnet.LocalNetworkOrPanic() + err := antithesis.GenerateComposeConfig(network, nodeImageName, workloadImageName, targetPath) + if err != nil { + log.Fatalf("failed to generate config for docker-compose: %s", err) + } +} diff --git a/tests/antithesis/avalanchego/main.go b/tests/antithesis/avalanchego/main.go new file mode 100644 index 000000000000..a7101b6bc3cb --- /dev/null +++ b/tests/antithesis/avalanchego/main.go @@ -0,0 +1,651 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "crypto/rand" + "log" + "math/big" + "os" + "time" + + "github.com/antithesishq/antithesis-sdk-go/assert" + "github.com/antithesishq/antithesis-sdk-go/lifecycle" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/antithesis" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/avm" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + xtxs "github.com/ava-labs/avalanchego/vms/avm/txs" + ptxs "github.com/ava-labs/avalanchego/vms/platformvm/txs" + xbuilder "github.com/ava-labs/avalanchego/wallet/chain/x/builder" +) + +const NumKeys = 5 + +func main() { + c, err := antithesis.NewConfig(os.Args) + if err != nil { + log.Fatalf("invalid config: %s", err) + } + + ctx := context.Background() + if err := antithesis.AwaitHealthyNodes(ctx, c.URIs); err != nil { + log.Fatalf("failed to await healthy nodes: %s", err) + } + + kc := secp256k1fx.NewKeychain(genesis.EWOQKey) + walletSyncStartTime := time.Now() + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: c.URIs[0], + AVAXKeychain: kc, + EthKeychain: kc, + }) + if err != nil { + log.Fatalf("failed to initialize wallet: %s", err) + } + log.Printf("synced wallet in %s", time.Since(walletSyncStartTime)) + + genesisWorkload := &workload{ + id: 0, + wallet: wallet, + addrs: set.Of(genesis.EWOQKey.Address()), + uris: c.URIs, + } + + workloads := make([]*workload, NumKeys) + workloads[0] = genesisWorkload + + var ( + genesisXWallet = wallet.X() + genesisXBuilder = genesisXWallet.Builder() + genesisXContext = genesisXBuilder.Context() + avaxAssetID = genesisXContext.AVAXAssetID + ) + for i := 1; i < NumKeys; i++ { + key, err := secp256k1.NewPrivateKey() + if err != nil { + log.Fatalf("failed to generate key: %s", err) + } + + var ( + addr = key.Address() + baseStartTime = time.Now() + ) + baseTx, err := genesisXWallet.IssueBaseTx([]*avax.TransferableOutput{{ + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 100 * units.KiloAvax, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + }, + }}) + if err != nil { + log.Fatalf("failed to issue initial funding X-chain baseTx: %s", err) + } + log.Printf("issued initial funding X-chain baseTx %s in %s", baseTx.ID(), time.Since(baseStartTime)) + + genesisWorkload.confirmXChainTx(ctx, baseTx) + + uri := c.URIs[i%len(c.URIs)] + kc := secp256k1fx.NewKeychain(key) + walletSyncStartTime := time.Now() + wallet, err := primary.MakeWallet(ctx, &primary.WalletConfig{ + URI: uri, + AVAXKeychain: kc, + EthKeychain: kc, + }) + if err != nil { + log.Fatalf("failed to initialize wallet: %s", err) + } + log.Printf("synced wallet in %s", time.Since(walletSyncStartTime)) + + workloads[i] = &workload{ + id: i, + wallet: wallet, + addrs: set.Of(addr), + uris: c.URIs, + } + } + + lifecycle.SetupComplete(map[string]any{ + "msg": "initialized workers", + "numWorkers": NumKeys, + }) + + for _, w := range workloads[1:] { + go w.run(ctx) + } + genesisWorkload.run(ctx) +} + +type workload struct { + id int + wallet primary.Wallet + addrs set.Set[ids.ShortID] + uris []string +} + +func (w *workload) run(ctx context.Context) { + timer := time.NewTimer(0) + if !timer.Stop() { + <-timer.C + } + + var ( + xWallet = w.wallet.X() + xBuilder = xWallet.Builder() + pWallet = w.wallet.P() + pBuilder = pWallet.Builder() + ) + xBalances, err := xBuilder.GetFTBalance() + if err != nil { + log.Fatalf("failed to fetch X-chain balances: %s", err) + } + pBalances, err := pBuilder.GetBalance() + if err != nil { + log.Fatalf("failed to fetch P-chain balances: %s", err) + } + var ( + xContext = xBuilder.Context() + avaxAssetID = xContext.AVAXAssetID + xAVAX = xBalances[avaxAssetID] + pAVAX = pBalances[avaxAssetID] + ) + log.Printf("wallet starting with %d X-chain nAVAX and %d P-chain nAVAX", xAVAX, pAVAX) + assert.Reachable("wallet starting", map[string]any{ + "worker": w.id, + "xBalance": xAVAX, + "pBalance": pAVAX, + }) + + for { + val, err := rand.Int(rand.Reader, big.NewInt(5)) + if err != nil { + log.Fatalf("failed to read randomness: %s", err) + } + + flowID := val.Int64() + log.Printf("wallet %d executing flow %d", w.id, flowID) + switch flowID { + case 0: + w.issueXChainBaseTx(ctx) + case 1: + w.issueXChainCreateAssetTx(ctx) + case 2: + w.issueXChainOperationTx(ctx) + case 3: + w.issueXToPTransfer(ctx) + case 4: + w.issuePToXTransfer(ctx) + } + + val, err = rand.Int(rand.Reader, big.NewInt(int64(time.Second))) + if err != nil { + log.Fatalf("failed to read randomness: %s", err) + } + + timer.Reset(time.Duration(val.Int64())) + select { + case <-ctx.Done(): + return + case <-timer.C: + } + } +} + +func (w *workload) issueXChainBaseTx(ctx context.Context) { + var ( + xWallet = w.wallet.X() + xBuilder = xWallet.Builder() + ) + balances, err := xBuilder.GetFTBalance() + if err != nil { + log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) + return + } + + var ( + xContext = xBuilder.Context() + avaxAssetID = xContext.AVAXAssetID + avaxBalance = balances[avaxAssetID] + baseTxFee = xContext.BaseTxFee + neededBalance = baseTxFee + units.Schmeckle + ) + if avaxBalance < neededBalance { + log.Printf("skipping X-chain tx issuance due to insufficient balance: %d < %d", avaxBalance, neededBalance) + return + } + + var ( + owner = w.makeOwner() + baseStartTime = time.Now() + ) + baseTx, err := xWallet.IssueBaseTx( + []*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: units.Schmeckle, + OutputOwners: owner, + }, + }, + }, + ) + if err != nil { + log.Printf("failed to issue X-chain baseTx: %s", err) + return + } + log.Printf("issued new X-chain baseTx %s in %s", baseTx.ID(), time.Since(baseStartTime)) + + w.confirmXChainTx(ctx, baseTx) + w.verifyXChainTxConsumedUTXOs(ctx, baseTx) +} + +func (w *workload) issueXChainCreateAssetTx(ctx context.Context) { + var ( + xWallet = w.wallet.X() + xBuilder = xWallet.Builder() + ) + balances, err := xBuilder.GetFTBalance() + if err != nil { + log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) + return + } + + var ( + xContext = xBuilder.Context() + avaxAssetID = xContext.AVAXAssetID + avaxBalance = balances[avaxAssetID] + neededBalance = xContext.CreateAssetTxFee + ) + if avaxBalance < neededBalance { + log.Printf("skipping X-chain tx issuance due to insufficient balance: %d < %d", avaxBalance, neededBalance) + return + } + + var ( + owner = w.makeOwner() + createAssetStartTime = time.Now() + ) + createAssetTx, err := xWallet.IssueCreateAssetTx( + "HI", + "HI", + 1, + map[uint32][]verify.State{ + 0: { + &secp256k1fx.TransferOutput{ + Amt: units.Schmeckle, + OutputOwners: owner, + }, + }, + }, + ) + if err != nil { + log.Printf("failed to issue X-chain create asset transaction: %s", err) + return + } + log.Printf("created new X-chain asset %s in %s", createAssetTx.ID(), time.Since(createAssetStartTime)) + + w.confirmXChainTx(ctx, createAssetTx) + w.verifyXChainTxConsumedUTXOs(ctx, createAssetTx) +} + +func (w *workload) issueXChainOperationTx(ctx context.Context) { + var ( + xWallet = w.wallet.X() + xBuilder = xWallet.Builder() + ) + balances, err := xBuilder.GetFTBalance() + if err != nil { + log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) + return + } + + var ( + xContext = xBuilder.Context() + avaxAssetID = xContext.AVAXAssetID + avaxBalance = balances[avaxAssetID] + createAssetTxFee = xContext.CreateAssetTxFee + baseTxFee = xContext.BaseTxFee + neededBalance = createAssetTxFee + baseTxFee + ) + if avaxBalance < neededBalance { + log.Printf("skipping X-chain tx issuance due to insufficient balance: %d < %d", avaxBalance, neededBalance) + return + } + + var ( + owner = w.makeOwner() + createAssetStartTime = time.Now() + ) + createAssetTx, err := xWallet.IssueCreateAssetTx( + "HI", + "HI", + 1, + map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: owner, + }, + }, + }, + ) + if err != nil { + log.Printf("failed to issue X-chain create asset transaction: %s", err) + return + } + log.Printf("created new X-chain asset %s in %s", createAssetTx.ID(), time.Since(createAssetStartTime)) + + operationStartTime := time.Now() + operationTx, err := xWallet.IssueOperationTxMintProperty( + createAssetTx.ID(), + &owner, + ) + if err != nil { + log.Printf("failed to issue X-chain operation transaction: %s", err) + return + } + log.Printf("issued X-chain operation tx %s in %s", operationTx.ID(), time.Since(operationStartTime)) + + w.confirmXChainTx(ctx, createAssetTx) + w.verifyXChainTxConsumedUTXOs(ctx, createAssetTx) + w.confirmXChainTx(ctx, operationTx) + w.verifyXChainTxConsumedUTXOs(ctx, operationTx) +} + +func (w *workload) issueXToPTransfer(ctx context.Context) { + var ( + xWallet = w.wallet.X() + pWallet = w.wallet.P() + xBuilder = xWallet.Builder() + ) + balances, err := xBuilder.GetFTBalance() + if err != nil { + log.Printf("failed to fetch X-chain balances: %s", err) + assert.Unreachable("failed to fetch X-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) + return + } + + var ( + xContext = xBuilder.Context() + avaxAssetID = xContext.AVAXAssetID + avaxBalance = balances[avaxAssetID] + xBaseTxFee = xContext.BaseTxFee + pBuilder = pWallet.Builder() + pContext = pBuilder.Context() + pBaseTxFee = pContext.BaseTxFee + txFees = xBaseTxFee + pBaseTxFee + neededBalance = txFees + units.Avax + ) + if avaxBalance < neededBalance { + log.Printf("skipping X-chain tx issuance due to insufficient balance: %d < %d", avaxBalance, neededBalance) + return + } + + var ( + owner = w.makeOwner() + exportStartTime = time.Now() + ) + exportTx, err := xWallet.IssueExportTx( + constants.PlatformChainID, + []*avax.TransferableOutput{{ + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: units.Avax, + }, + }}, + ) + if err != nil { + log.Printf("failed to issue X-chain export transaction: %s", err) + return + } + log.Printf("created X-chain export transaction %s in %s", exportTx.ID(), time.Since(exportStartTime)) + + var ( + xChainID = xContext.BlockchainID + importStartTime = time.Now() + ) + importTx, err := pWallet.IssueImportTx( + xChainID, + &owner, + ) + if err != nil { + log.Printf("failed to issue P-chain import transaction: %s", err) + return + } + log.Printf("created P-chain import transaction %s in %s", importTx.ID(), time.Since(importStartTime)) + + w.confirmXChainTx(ctx, exportTx) + w.verifyXChainTxConsumedUTXOs(ctx, exportTx) + w.confirmPChainTx(ctx, importTx) + w.verifyPChainTxConsumedUTXOs(ctx, importTx) +} + +func (w *workload) issuePToXTransfer(ctx context.Context) { + var ( + xWallet = w.wallet.X() + pWallet = w.wallet.P() + xBuilder = xWallet.Builder() + pBuilder = pWallet.Builder() + ) + balances, err := pBuilder.GetBalance() + if err != nil { + log.Printf("failed to fetch P-chain balances: %s", err) + assert.Unreachable("failed to fetch P-chain balances", map[string]any{ + "worker": w.id, + "err": err, + }) + return + } + + var ( + xContext = xBuilder.Context() + pContext = pBuilder.Context() + avaxAssetID = pContext.AVAXAssetID + avaxBalance = balances[avaxAssetID] + pBaseTxFee = pContext.BaseTxFee + xBaseTxFee = xContext.BaseTxFee + txFees = pBaseTxFee + xBaseTxFee + neededBalance = txFees + units.Schmeckle + ) + if avaxBalance < neededBalance { + log.Printf("skipping P-chain tx issuance due to insufficient balance: %d < %d", avaxBalance, neededBalance) + return + } + + var ( + xChainID = xContext.BlockchainID + owner = w.makeOwner() + exportStartTime = time.Now() + ) + exportTx, err := pWallet.IssueExportTx( + xChainID, + []*avax.TransferableOutput{{ + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &secp256k1fx.TransferOutput{ + Amt: units.Schmeckle, + }, + }}, + ) + if err != nil { + log.Printf("failed to issue P-chain export transaction: %s", err) + return + } + log.Printf("created P-chain export transaction %s in %s", exportTx.ID(), time.Since(exportStartTime)) + + importStartTime := time.Now() + importTx, err := xWallet.IssueImportTx( + constants.PlatformChainID, + &owner, + ) + if err != nil { + log.Printf("failed to issue X-chain import transaction: %s", err) + return + } + log.Printf("created X-chain import transaction %s in %s", importTx.ID(), time.Since(importStartTime)) + + w.confirmPChainTx(ctx, exportTx) + w.verifyPChainTxConsumedUTXOs(ctx, exportTx) + w.confirmXChainTx(ctx, importTx) + w.verifyXChainTxConsumedUTXOs(ctx, importTx) +} + +func (w *workload) makeOwner() secp256k1fx.OutputOwners { + addr, _ := w.addrs.Peek() + return secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + } +} + +func (w *workload) confirmXChainTx(ctx context.Context, tx *xtxs.Tx) { + txID := tx.ID() + for _, uri := range w.uris { + client := avm.NewClient(uri, "X") + if err := avm.AwaitTxAccepted(client, ctx, txID, 100*time.Millisecond); err != nil { + log.Printf("failed to confirm X-chain transaction %s on %s: %s", txID, uri, err) + return + } + log.Printf("confirmed X-chain transaction %s on %s", txID, uri) + } + log.Printf("confirmed X-chain transaction %s on all nodes", txID) +} + +func (w *workload) confirmPChainTx(ctx context.Context, tx *ptxs.Tx) { + txID := tx.ID() + for _, uri := range w.uris { + client := platformvm.NewClient(uri) + if err := platformvm.AwaitTxAccepted(client, ctx, txID, 100*time.Millisecond); err != nil { + log.Printf("failed to determine the status of a P-chain transaction %s on %s: %s", txID, uri, err) + return + } + log.Printf("confirmed P-chain transaction %s on %s", txID, uri) + } + log.Printf("confirmed P-chain transaction %s on all nodes", txID) +} + +func (w *workload) verifyXChainTxConsumedUTXOs(ctx context.Context, tx *xtxs.Tx) { + txID := tx.ID() + chainID := w.wallet.X().Builder().Context().BlockchainID + for _, uri := range w.uris { + client := avm.NewClient(uri, "X") + + utxos := common.NewUTXOs() + err := primary.AddAllUTXOs( + ctx, + utxos, + client, + xbuilder.Parser.Codec(), + chainID, + chainID, + w.addrs.List(), + ) + if err != nil { + log.Printf("failed to fetch X-chain UTXOs on %s: %s", uri, err) + return + } + + inputs := tx.Unsigned.InputIDs() + for input := range inputs { + _, err := utxos.GetUTXO(ctx, chainID, chainID, input) + if err != database.ErrNotFound { + log.Printf("failed to verify that X-chain UTXO %s was deleted on %s after %s", input, uri, txID) + assert.Unreachable("failed to verify that X-chain UTXO was deleted", map[string]any{ + "worker": w.id, + "uri": uri, + "txID": txID, + "utxoID": input, + "err": err, + }) + return + } + } + log.Printf("confirmed all X-chain UTXOs consumed by %s are not present on %s", txID, uri) + } + log.Printf("confirmed all X-chain UTXOs consumed by %s are not present on all nodes", txID) +} + +func (w *workload) verifyPChainTxConsumedUTXOs(ctx context.Context, tx *ptxs.Tx) { + txID := tx.ID() + for _, uri := range w.uris { + client := platformvm.NewClient(uri) + + utxos := common.NewUTXOs() + err := primary.AddAllUTXOs( + ctx, + utxos, + client, + ptxs.Codec, + constants.PlatformChainID, + constants.PlatformChainID, + w.addrs.List(), + ) + if err != nil { + log.Printf("failed to fetch P-chain UTXOs on %s: %s", uri, err) + return + } + + inputs := tx.Unsigned.InputIDs() + for input := range inputs { + _, err := utxos.GetUTXO(ctx, constants.PlatformChainID, constants.PlatformChainID, input) + if err != database.ErrNotFound { + log.Printf("failed to verify that P-chain UTXO %s was deleted on %s after %s", input, uri, txID) + assert.Unreachable("failed to verify that P-chain UTXO was deleted", map[string]any{ + "worker": w.id, + "uri": uri, + "txID": txID, + "utxoID": input, + "err": err, + }) + return + } + } + log.Printf("confirmed all P-chain UTXOs consumed by %s are not present on %s", txID, uri) + } + log.Printf("confirmed all P-chain UTXOs consumed by %s are not present on all nodes", txID) +} diff --git a/tests/antithesis/compose.go b/tests/antithesis/compose.go new file mode 100644 index 000000000000..e17e189281bd --- /dev/null +++ b/tests/antithesis/compose.go @@ -0,0 +1,241 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package antithesis + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/types" + "gopkg.in/yaml.v3" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/perms" +) + +const bootstrapIndex = 0 + +// Initialize the given path with the docker-compose configuration (compose file and +// volumes) needed for an Antithesis test setup. +func GenerateComposeConfig( + network *tmpnet.Network, + nodeImageName string, + workloadImageName string, + targetPath string, +) error { + // Generate a compose project for the specified network + project, err := newComposeProject(network, nodeImageName, workloadImageName) + if err != nil { + return fmt.Errorf("failed to create compose project: %w", err) + } + + absPath, err := filepath.Abs(targetPath) + if err != nil { + return fmt.Errorf("failed to convert target path to absolute path: %w", err) + } + + if err := os.MkdirAll(absPath, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create target path %q: %w", absPath, err) + } + + // Write the compose file + bytes, err := yaml.Marshal(&project) + if err != nil { + return fmt.Errorf("failed to marshal compose project: %w", err) + } + composePath := filepath.Join(targetPath, "docker-compose.yml") + if err := os.WriteFile(composePath, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write genesis: %w", err) + } + + // Create the volume paths + for _, service := range project.Services { + for _, volume := range service.Volumes { + volumePath := filepath.Join(absPath, volume.Source) + if err := os.MkdirAll(volumePath, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create volume path %q: %w", volumePath, err) + } + } + } + return nil +} + +// Create a new docker compose project for an antithesis test setup +// for the provided network configuration. +func newComposeProject(network *tmpnet.Network, nodeImageName string, workloadImageName string) (*types.Project, error) { + networkName := "avalanche-testnet" + baseNetworkAddress := "10.0.20" + + services := make(types.Services, len(network.Nodes)+1) + uris := make([]string, len(network.Nodes)) + var ( + bootstrapIP string + bootstrapIDs string + ) + for i, node := range network.Nodes { + address := fmt.Sprintf("%s.%d", baseNetworkAddress, 3+i) + + tlsKey, err := node.Flags.GetStringVal(config.StakingTLSKeyContentKey) + if err != nil { + return nil, err + } + tlsCert, err := node.Flags.GetStringVal(config.StakingCertContentKey) + if err != nil { + return nil, err + } + signerKey, err := node.Flags.GetStringVal(config.StakingSignerKeyContentKey) + if err != nil { + return nil, err + } + + env := types.Mapping{ + config.NetworkNameKey: constants.LocalName, + config.LogLevelKey: logging.Debug.String(), + config.LogDisplayLevelKey: logging.Trace.String(), + config.HTTPHostKey: "0.0.0.0", + config.PublicIPKey: address, + config.StakingTLSKeyContentKey: tlsKey, + config.StakingCertContentKey: tlsCert, + config.StakingSignerKeyContentKey: signerKey, + } + + // Apply configuration appropriate to a test network + for k, v := range tmpnet.DefaultTestFlags() { + switch value := v.(type) { + case string: + env[k] = value + case bool: + env[k] = strconv.FormatBool(value) + default: + return nil, fmt.Errorf("unable to convert unsupported type %T to string", v) + } + } + + serviceName := getServiceName(i) + + volumes := []types.ServiceVolumeConfig{ + { + Type: types.VolumeTypeBind, + Source: fmt.Sprintf("./volumes/%s/logs", serviceName), + Target: "/root/.avalanchego/logs", + }, + } + + trackSubnets, err := node.Flags.GetStringVal(config.TrackSubnetsKey) + if err != nil { + return nil, err + } + if len(trackSubnets) > 0 { + env[config.TrackSubnetsKey] = trackSubnets + if i == bootstrapIndex { + // DB volume for bootstrap node will need to initialized with the subnet + volumes = append(volumes, types.ServiceVolumeConfig{ + Type: types.VolumeTypeBind, + Source: fmt.Sprintf("./volumes/%s/db", serviceName), + Target: "/root/.avalanchego/db", + }) + } + } + + if i == 0 { + bootstrapIP = address + ":9651" + bootstrapIDs = node.NodeID.String() + } else { + env[config.BootstrapIPsKey] = bootstrapIP + env[config.BootstrapIDsKey] = bootstrapIDs + } + + // The env is defined with the keys and then converted to env + // vars because only the keys are available as constants. + env = keyMapToEnvVarMap(env) + + services[i+1] = types.ServiceConfig{ + Name: serviceName, + ContainerName: serviceName, + Hostname: serviceName, + Image: nodeImageName, + Volumes: volumes, + Environment: env.ToMappingWithEquals(), + Networks: map[string]*types.ServiceNetworkConfig{ + networkName: { + Ipv4Address: address, + }, + }, + } + + // Collect URIs for the workload container + uris[i] = fmt.Sprintf("http://%s:9650", address) + } + + workloadEnv := types.Mapping{ + "AVAWL_URIS": strings.Join(uris, " "), + } + chainIDs := []string{} + for _, subnet := range network.Subnets { + for _, chain := range subnet.Chains { + chainIDs = append(chainIDs, chain.ChainID.String()) + } + } + if len(chainIDs) > 0 { + workloadEnv["AVAWL_CHAIN_IDS"] = strings.Join(chainIDs, " ") + } + + workloadName := "workload" + services[0] = types.ServiceConfig{ + Name: workloadName, + ContainerName: workloadName, + Hostname: workloadName, + Image: workloadImageName, + Environment: workloadEnv.ToMappingWithEquals(), + Networks: map[string]*types.ServiceNetworkConfig{ + networkName: { + Ipv4Address: baseNetworkAddress + ".129", + }, + }, + } + + return &types.Project{ + Networks: types.Networks{ + networkName: types.NetworkConfig{ + Driver: "bridge", + Ipam: types.IPAMConfig{ + Config: []*types.IPAMPool{ + { + Subnet: baseNetworkAddress + ".0/24", + }, + }, + }, + }, + }, + Services: services, + }, nil +} + +// Convert a mapping of avalanche config keys to a mapping of env vars +func keyMapToEnvVarMap(keyMap types.Mapping) types.Mapping { + envVarMap := make(types.Mapping, len(keyMap)) + for key, val := range keyMap { + // e.g. network-id -> AVAGO_NETWORK_ID + envVar := strings.ToUpper(config.EnvPrefix + "_" + config.DashesToUnderscores.Replace(key)) + envVarMap[envVar] = val + } + return envVarMap +} + +// Retrieve the service name for a node at the given index. Common to +// GenerateComposeConfig and InitDBVolumes to ensure consistency +// between db volumes configuration and volume paths. +func getServiceName(index int) string { + baseName := "avalanche" + if index == 0 { + return baseName + "-bootstrap-node" + } + return fmt.Sprintf("%s-node-%d", baseName, index) +} diff --git a/tests/antithesis/config.go b/tests/antithesis/config.go new file mode 100644 index 000000000000..471b12bb2c17 --- /dev/null +++ b/tests/antithesis/config.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package antithesis + +import ( + "errors" + "fmt" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +const ( + URIsKey = "uris" + ChainIDsKey = "chain-ids" + + FlagsName = "workload" + EnvPrefix = "avawl" +) + +var ( + errNoURIs = errors.New("at least one URI must be provided") + errNoArguments = errors.New("no arguments") +) + +type Config struct { + URIs []string + ChainIDs []string +} + +func NewConfig(arguments []string) (*Config, error) { + v, err := parseFlags(arguments) + if err != nil { + return nil, err + } + + c := &Config{ + URIs: v.GetStringSlice(URIsKey), + ChainIDs: v.GetStringSlice(ChainIDsKey), + } + return c, c.Verify() +} + +func (c *Config) Verify() error { + if len(c.URIs) == 0 { + return errNoURIs + } + return nil +} + +func parseFlags(arguments []string) (*viper.Viper, error) { + if len(arguments) == 0 { + return nil, errNoArguments + } + + fs := pflag.NewFlagSet(FlagsName, pflag.ContinueOnError) + fs.StringSlice(URIsKey, []string{primary.LocalAPIURI}, "URIs of nodes that the workload can communicate with") + fs.StringSlice(ChainIDsKey, []string{}, "IDs of chains to target for testing") + if err := fs.Parse(arguments[1:]); err != nil { + return nil, fmt.Errorf("failed parsing CLI flags: %w", err) + } + + v := viper.New() + v.AutomaticEnv() + v.SetEnvKeyReplacer(config.DashesToUnderscores) + v.SetEnvPrefix(EnvPrefix) + if err := v.BindPFlags(fs); err != nil { + return nil, fmt.Errorf("failed binding pflags: %w", err) + } + return v, nil +} diff --git a/tests/antithesis/init_db.go b/tests/antithesis/init_db.go new file mode 100644 index 000000000000..e1d6c5d537a9 --- /dev/null +++ b/tests/antithesis/init_db.go @@ -0,0 +1,60 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package antithesis + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/perms" +) + +// Given a path, compose the expected path of the bootstrap node's docker compose db volume. +func GetBootstrapVolumePath(targetPath string) (string, error) { + absPath, err := filepath.Abs(targetPath) + if err != nil { + return "", fmt.Errorf("failed to convert target path to absolute path: %w", err) + } + return filepath.Join(absPath, "volumes", getServiceName(bootstrapIndex)), nil +} + +// Bootstraps a local process-based network, creates its subnets and chains, and copies +// the resulting db state from one of the nodes to the provided path. The path will be +// created if it does not already exist. +func InitBootstrapDB(network *tmpnet.Network, avalancheGoPath string, pluginDir string, destPath string) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + defer cancel() + if err := tmpnet.BootstrapNewNetwork( + ctx, + os.Stdout, + network, + "", + avalancheGoPath, + pluginDir, + ); err != nil { + return fmt.Errorf("failed to bootstrap network: %w", err) + } + // Since the goal is to initialize the DB, we can stop the network after it has been started successfully + if err := network.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop network: %w", err) + } + + // Copy the db state from the bootstrap node to the compose volume path. + sourcePath := filepath.Join(network.Nodes[0].GetDataDir(), "db") + if err := os.MkdirAll(destPath, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create db path %q: %w", destPath, err) + } + // TODO(marun) Replace with os.CopyFS once we upgrade to Go 1.23 + cmd := exec.Command("cp", "-r", sourcePath, destPath) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to copy bootstrap db from %q to %q: %w", sourcePath, destPath, err) + } + + return nil +} diff --git a/tests/antithesis/node_health.go b/tests/antithesis/node_health.go new file mode 100644 index 000000000000..039442398a73 --- /dev/null +++ b/tests/antithesis/node_health.go @@ -0,0 +1,50 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package antithesis + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/ava-labs/avalanchego/api/health" +) + +// Waits for the nodes at the provided URIs to report healthy. +func AwaitHealthyNodes(ctx context.Context, uris []string) error { + for _, uri := range uris { + if err := awaitHealthyNode(ctx, uri); err != nil { + return err + } + } + log.Println("all nodes reported healthy") + return nil +} + +func awaitHealthyNode(ctx context.Context, uri string) error { + client := health.NewClient(uri) + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + + log.Printf("awaiting node health at %s", uri) + for { + res, err := client.Health(ctx, nil) + switch { + case err != nil: + log.Printf("node couldn't be reached at %s", uri) + case res.Healthy: + log.Printf("node reported healthy at %s", uri) + return nil + default: + log.Printf("node reported unhealthy at %s", uri) + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return fmt.Errorf("node health check cancelled at %s: %w", uri, ctx.Err()) + } + } +} diff --git a/tests/antithesis/xsvm/Dockerfile.config b/tests/antithesis/xsvm/Dockerfile.config new file mode 100644 index 000000000000..3c1128c6f51b --- /dev/null +++ b/tests/antithesis/xsvm/Dockerfile.config @@ -0,0 +1,5 @@ +FROM scratch AS execution + +# Copy config artifacts from the build path. For simplicity, artifacts +# are built outside of the docker image. +COPY ./build/antithesis/xsvm/ / diff --git a/tests/antithesis/xsvm/Dockerfile.node b/tests/antithesis/xsvm/Dockerfile.node new file mode 100644 index 000000000000..67a1aa01fca9 --- /dev/null +++ b/tests/antithesis/xsvm/Dockerfile.node @@ -0,0 +1,31 @@ +# TAG should identify the builder image +ARG TAG + +# AVALANCHEGO_NODE_IMAGE needs to identify an existing avalanchego node image and should include the tag +ARG AVALANCHEGO_NODE_IMAGE + +# ============= Compilation Stage ================ +FROM antithesis-avalanchego-builder:$TAG AS builder + +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + +WORKDIR $BUILDER_WORKDIR + +# Build xsvm VM +RUN ./scripts/build_xsvm.sh + +# ============= Cleanup Stage ================ +FROM $AVALANCHEGO_NODE_IMAGE AS execution + +# The commit hash and antithesis dependencies should be part of the base image. + +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + +# Copy the executable into the container +RUN mkdir -p /root/.avalanchego/plugins +COPY --from=builder $BUILDER_WORKDIR/build/xsvm \ + /root/.avalanchego/plugins/v3m4wPxaHpvGr8qfMeyK6PRW3idZrPHmYcMTt7oXdK47yurVH + +# The node image's entrypoint will be reused. diff --git a/tests/antithesis/xsvm/Dockerfile.workload b/tests/antithesis/xsvm/Dockerfile.workload new file mode 100644 index 000000000000..1ca2f56b862a --- /dev/null +++ b/tests/antithesis/xsvm/Dockerfile.workload @@ -0,0 +1,28 @@ +# TAG should identify the builder image +ARG TAG + +# NODE_IMAGE needs to identify an existing node image and should include the tag +ARG NODE_IMAGE + +# ============= Compilation Stage ================ +FROM antithesis-avalanchego-builder:$TAG AS builder + +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + +WORKDIR $BUILDER_WORKDIR + +# Build the workload +RUN ./scripts/build_antithesis_xsvm_workload.sh + +# ============= Cleanup Stage ================ +# Base the workflow on the node image to support bootstrap testing +FROM $NODE_IMAGE AS execution + +# The builder workdir will vary between instrumented and non-instrumented builders +ARG BUILDER_WORKDIR + +# Copy the executable into the container +COPY --from=builder $BUILDER_WORKDIR/build/antithesis-xsvm-workload ./workload + +CMD [ "./workload" ] diff --git a/tests/antithesis/xsvm/gencomposeconfig/main.go b/tests/antithesis/xsvm/gencomposeconfig/main.go new file mode 100644 index 000000000000..43720d56155c --- /dev/null +++ b/tests/antithesis/xsvm/gencomposeconfig/main.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "fmt" + "log" + "os" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/tests/antithesis" + "github.com/ava-labs/avalanchego/tests/fixture/subnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" +) + +const baseImageName = "antithesis-xsvm" + +// Creates docker-compose.yml and its associated volumes in the target path. +func main() { + avalancheGoPath := os.Getenv("AVALANCHEGO_PATH") + if len(avalancheGoPath) == 0 { + log.Fatal("AVALANCHEGO_PATH environment variable not set") + } + + pluginDir := os.Getenv("AVALANCHEGO_PLUGIN_DIR") + if len(pluginDir) == 0 { + log.Fatal("AVALANCHEGO_PLUGIN_DIR environment variable not set") + } + + targetPath := os.Getenv("TARGET_PATH") + if len(targetPath) == 0 { + log.Fatal("TARGET_PATH environment variable not set") + } + + imageTag := os.Getenv("IMAGE_TAG") + if len(imageTag) == 0 { + log.Fatal("IMAGE_TAG environment variable not set") + } + + nodeImageName := fmt.Sprintf("%s-node:%s", baseImageName, imageTag) + workloadImageName := fmt.Sprintf("%s-workload:%s", baseImageName, imageTag) + + // Create a network with an xsvm subnet + network := tmpnet.LocalNetworkOrPanic() + network.Subnets = []*tmpnet.Subnet{ + subnet.NewXSVMOrPanic("xsvm", genesis.VMRQKey, network.Nodes...), + } + + bootstrapVolumePath, err := antithesis.GetBootstrapVolumePath(targetPath) + if err != nil { + log.Fatalf("failed to get bootstrap volume path: %v", err) + } + + if err := antithesis.InitBootstrapDB(network, avalancheGoPath, pluginDir, bootstrapVolumePath); err != nil { + log.Fatalf("failed to initialize db volumes: %v", err) + } + + if err := antithesis.GenerateComposeConfig(network, nodeImageName, workloadImageName, targetPath); err != nil { + log.Fatalf("failed to generate config for docker-compose: %v", err) + } +} diff --git a/tests/antithesis/xsvm/main.go b/tests/antithesis/xsvm/main.go new file mode 100644 index 000000000000..9e70ebff1fbf --- /dev/null +++ b/tests/antithesis/xsvm/main.go @@ -0,0 +1,183 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "crypto/rand" + "log" + "math/big" + "os" + "time" + + "github.com/antithesishq/antithesis-sdk-go/assert" + "github.com/antithesishq/antithesis-sdk-go/lifecycle" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests/antithesis" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/status" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/transfer" +) + +const ( + NumKeys = 5 + PollingInterval = 50 * time.Millisecond +) + +func main() { + c, err := antithesis.NewConfig(os.Args) + if err != nil { + log.Fatalf("invalid config: %s", err) + } + + ctx := context.Background() + if err := antithesis.AwaitHealthyNodes(ctx, c.URIs); err != nil { + log.Fatalf("failed to await healthy nodes: %s", err) + } + + if len(c.ChainIDs) != 1 { + log.Fatalf("expected 1 chainID, saw %d", len(c.ChainIDs)) + } + chainID, err := ids.FromString(c.ChainIDs[0]) + if err != nil { + log.Fatalf("failed to parse chainID: %s", err) + } + + genesisWorkload := &workload{ + id: 0, + chainID: chainID, + key: genesis.VMRQKey, + addrs: set.Of(genesis.VMRQKey.Address()), + uris: c.URIs, + } + + workloads := make([]*workload, NumKeys) + workloads[0] = genesisWorkload + + initialAmount := 100 * units.KiloAvax + for i := 1; i < NumKeys; i++ { + key, err := secp256k1.NewPrivateKey() + if err != nil { + log.Fatalf("failed to generate key: %s", err) + } + + var ( + addr = key.Address() + baseStartTime = time.Now() + ) + transferTxStatus, err := transfer.Transfer( + ctx, + &transfer.Config{ + URI: c.URIs[0], + ChainID: chainID, + AssetID: chainID, + Amount: initialAmount, + To: addr, + PrivateKey: genesisWorkload.key, + }, + ) + if err != nil { + log.Fatalf("failed to issue initial funding transfer: %s", err) + } + log.Printf("issued initial funding transfer %s in %s", transferTxStatus.TxID, time.Since(baseStartTime)) + + genesisWorkload.confirmTransferTx(ctx, transferTxStatus) + + workloads[i] = &workload{ + id: i, + chainID: chainID, + key: key, + addrs: set.Of(addr), + uris: c.URIs, + } + } + + lifecycle.SetupComplete(map[string]any{ + "msg": "initialized workers", + "numWorkers": NumKeys, + }) + + for _, w := range workloads[1:] { + go w.run(ctx) + } + genesisWorkload.run(ctx) +} + +type workload struct { + id int + chainID ids.ID + key *secp256k1.PrivateKey + addrs set.Set[ids.ShortID] + uris []string +} + +func (w *workload) run(ctx context.Context) { + timer := time.NewTimer(0) + if !timer.Stop() { + <-timer.C + } + + uri := w.uris[w.id%len(w.uris)] + + client := api.NewClient(uri, w.chainID.String()) + balance, err := client.Balance(ctx, w.key.Address(), w.chainID) + if err != nil { + log.Fatalf("failed to fetch balance: %s", err) + } + log.Printf("worker %d starting with a balance of %d", w.id, balance) + assert.Reachable("worker starting", map[string]any{ + "worker": w.id, + "balance": balance, + }) + + for { + log.Printf("worker %d executing transfer", w.id) + destAddress, _ := w.addrs.Peek() + txStatus, err := transfer.Transfer( + ctx, + &transfer.Config{ + URI: uri, + ChainID: w.chainID, + AssetID: w.chainID, + Amount: units.Schmeckle, + To: destAddress, + PrivateKey: w.key, + }, + ) + if err != nil { + log.Printf("worker %d failed to issue transfer: %s", w.id, err) + } else { + log.Printf("worker %d issued transfer %s in %s", w.id, txStatus.TxID, time.Since(txStatus.StartTime)) + w.confirmTransferTx(ctx, txStatus) + } + + val, err := rand.Int(rand.Reader, big.NewInt(int64(time.Second))) + if err != nil { + log.Fatalf("failed to read randomness: %s", err) + } + + timer.Reset(time.Duration(val.Int64())) + select { + case <-ctx.Done(): + return + case <-timer.C: + } + } +} + +func (w *workload) confirmTransferTx(ctx context.Context, tx *status.TxIssuance) { + for _, uri := range w.uris { + client := api.NewClient(uri, w.chainID.String()) + if err := api.AwaitTxAccepted(ctx, client, w.key.Address(), tx.Nonce, PollingInterval); err != nil { + log.Printf("worker %d failed to confirm transaction %s on %s: %s", w.id, tx.TxID, uri, err) + return + } + } + log.Printf("worker %d confirmed transaction %s on all nodes", w.id, tx.TxID) +} diff --git a/tests/e2e/README.md b/tests/e2e/README.md index 50ab608a3c4f..0ba62d0b99f1 100644 --- a/tests/e2e/README.md +++ b/tests/e2e/README.md @@ -57,46 +57,33 @@ packages. `x/transfer/virtuous.go` defines X-Chain transfer tests, labeled with `x`, which can be selected by `./tests/e2e/e2e.test --ginkgo.label-filter "x"`. -## Testing against an existing network +## Reusing temporary networks By default, a new temporary test network will be started before each test run and stopped at the end of the run. When developing e2e tests, -it may be helpful to create a temporary network that can be used -across multiple test runs. This can increase the speed of iteration by -removing the requirement to start a new network for every invocation -of the test under development. +it may be helpful to reuse temporary networks across multiple test +runs. This can increase the speed of iteration by removing the +requirement to start a new network for every invocation of the test +under development. -To create a temporary network for use across test runs: +To enable network reuse across test runs, pass `--reuse-network` as an +argument to the test suite: ```bash -# From the root of the avalanchego repo - -# Build the tmpnetctl binary -$ ./scripts/build_tmpnetctl.sh - -# Start a new network -$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego -... -Started network 1000 @ /home/me/.tmpnet/networks/1000 - -Configure tmpnetctl and the test suite to target this network by default -with one of the following statements: - - source /home/me/.tmpnet/networks/1000/network.env - - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 - - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest - -# Start a new test run using the existing network -ginkgo -v ./tests/e2e -- \ - --avalanchego-path=/path/to/avalanchego \ - --ginkgo.focus-file=[name of file containing test] \ - --use-existing-network \ - --network-dir=/path/to/network - -# It is also possible to set the AVALANCHEGO_PATH env var instead of supplying --avalanchego-path -# and to set TMPNET_NETWORK_DIR instead of supplying --network-dir. +ginkgo -v ./tests/e2e -- --avalanchego-path=/path/to/avalanchego --reuse-network ``` -See the tmpnet fixture [README](../fixture/tmpnet/README.md) for more details. +If a network is not already running the first time the suite runs with +`--reuse-network`, one will be started automatically and configured +for reuse by subsequent test runs also supplying `--reuse-network`. + +To stop a network configured for reuse, invoke the test suite with the +`--stop-network` argument. This will stop the network and exit +immediately without executing any tests: + +```bash +ginkgo -v ./tests/e2e -- --stop-network +``` ## Skipping bootstrap checks diff --git a/tests/e2e/banff/suites.go b/tests/e2e/banff/suites.go index 7ac486b373fe..b6da324c98ea 100644 --- a/tests/e2e/banff/suites.go +++ b/tests/e2e/banff/suites.go @@ -30,9 +30,11 @@ var _ = ginkgo.Describe("[Banff]", func() { // Get the P-chain and the X-chain wallets pWallet := wallet.P() xWallet := wallet.X() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() // Pull out useful constants to use when issuing transactions. - xChainID := xWallet.BlockchainID() + xChainID := xContext.BlockchainID owner := &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index d218f2df4fa5..c3dda77b985c 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -37,7 +37,8 @@ var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { ginkgo.It("should ensure that the gas price is affected by load", func() { ginkgo.By("creating a new private network to ensure isolation from other tests") - privateNetwork := e2e.Env.NewPrivateNetwork() + privateNetwork := tmpnet.NewDefaultNetwork("avalanchego-e2e-dynamic-fees") + e2e.Env.StartPrivateNetwork(privateNetwork) ginkgo.By("allocating a pre-funded key") key := privateNetwork.PreFundedKeys[0] diff --git a/tests/e2e/c/interchain_workflow.go b/tests/e2e/c/interchain_workflow.go index cf8437b751b8..bfb342818a5f 100644 --- a/tests/e2e/c/interchain_workflow.go +++ b/tests/e2e/c/interchain_workflow.go @@ -84,7 +84,11 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { pWallet := baseWallet.P() ginkgo.By("defining common configuration") - avaxAssetID := xWallet.AVAXAssetID() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() + cBuilder := cWallet.Builder() + cContext := cBuilder.Context() + avaxAssetID := xContext.AVAXAssetID // Use the same owner for import funds to X-Chain and P-Chain recipientOwner := secp256k1fx.OutputOwners{ Threshold: 1, @@ -107,7 +111,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { ginkgo.By("exporting AVAX from the C-Chain to the X-Chain", func() { _, err := cWallet.IssueExportTx( - xWallet.BlockchainID(), + xContext.BlockchainID, exportOutputs, e2e.WithDefaultContext(), e2e.WithSuggestedGasPrice(ethClient), @@ -117,7 +121,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { ginkgo.By("importing AVAX from the C-Chain to the X-Chain", func() { _, err := xWallet.IssueImportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, &recipientOwner, e2e.WithDefaultContext(), ) @@ -144,7 +148,7 @@ var _ = e2e.DescribeCChain("[Interchain Workflow]", func() { ginkgo.By("importing AVAX from the C-Chain to the P-Chain", func() { _, err = pWallet.IssueImportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, &recipientOwner, e2e.WithDefaultContext(), ) diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go index 3f526d84a94e..f33a3524d2a8 100644 --- a/tests/e2e/e2e_test.go +++ b/tests/e2e/e2e_test.go @@ -6,8 +6,6 @@ package e2e_test import ( "testing" - "github.com/onsi/gomega" - // ensure test packages are scanned by ginkgo _ "github.com/ava-labs/avalanchego/tests/e2e/banff" _ "github.com/ava-labs/avalanchego/tests/e2e/c" @@ -16,6 +14,7 @@ import ( _ "github.com/ava-labs/avalanchego/tests/e2e/x" _ "github.com/ava-labs/avalanchego/tests/e2e/x/transfer" + "github.com/ava-labs/avalanchego/tests/e2e/vms" "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" @@ -23,7 +22,6 @@ import ( ) func TestE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) ginkgo.RunSpecs(t, "e2e test suites") } @@ -35,7 +33,17 @@ func init() { var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process - return e2e.NewTestEnvironment(flagVars, &tmpnet.Network{}).Marshal() + + nodes := tmpnet.NewNodesOrPanic(flagVars.NodeCount()) + subnets := vms.XSVMSubnetsOrPanic(nodes...) + return e2e.NewTestEnvironment( + flagVars, + &tmpnet.Network{ + Owner: "avalanchego-e2e", + Nodes: nodes, + Subnets: subnets, + }, + ).Marshal() }, func(envBytes []byte) { // Run in every ginkgo process diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index a9c70beacbf0..548c82ac1211 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -56,9 +56,16 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL cWallet := baseWallet.C() pWallet := baseWallet.P() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() + pBuilder := pWallet.Builder() + pContext := pBuilder.Context() + cBuilder := cWallet.Builder() + cContext := cBuilder.Context() + ginkgo.By("defining common configuration") recipientEthAddress := evm.GetEthAddress(recipientKey) - avaxAssetID := xWallet.AVAXAssetID() + avaxAssetID := xContext.AVAXAssetID // Use the same owner for sending to X-Chain and importing funds to P-Chain recipientOwner := secp256k1fx.OutputOwners{ Threshold: 1, @@ -114,7 +121,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL Subnet: constants.PrimaryNetworkID, }, nodePOP, - pWallet.AVAXAssetID(), + pContext.AVAXAssetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{rewardKey.Address()}, @@ -143,7 +150,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL }, Subnet: constants.PrimaryNetworkID, }, - pWallet.AVAXAssetID(), + pContext.AVAXAssetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{rewardKey.Address()}, @@ -155,7 +162,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("exporting AVAX from the P-Chain to the X-Chain", func() { _, err := pWallet.IssueExportTx( - xWallet.BlockchainID(), + xContext.BlockchainID, exportOutputs, e2e.WithDefaultContext(), ) @@ -181,7 +188,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("exporting AVAX from the P-Chain to the C-Chain", func() { _, err := pWallet.IssueExportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, exportOutputs, e2e.WithDefaultContext(), ) diff --git a/tests/e2e/p/permissionless_subnets.go b/tests/e2e/p/permissionless_subnets.go index eb0a6e129ae8..dc92bdd60d5c 100644 --- a/tests/e2e/p/permissionless_subnets.go +++ b/tests/e2e/p/permissionless_subnets.go @@ -36,7 +36,9 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { pWallet := baseWallet.P() xWallet := baseWallet.X() - xChainID := xWallet.BlockchainID() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() + xChainID := xContext.BlockchainID var validatorID ids.NodeID ginkgo.By("retrieving the node ID of a primary network validator", func() { diff --git a/tests/e2e/p/staking_rewards.go b/tests/e2e/p/staking_rewards.go index 436d89675bc5..e988cf43c2c4 100644 --- a/tests/e2e/p/staking_rewards.go +++ b/tests/e2e/p/staking_rewards.go @@ -104,6 +104,9 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() + pBuilder := pWallet.Builder() + pContext := pBuilder.Context() + const ( delegationPercent = 0.10 // 10% delegationShare = reward.PercentDenominator * delegationPercent @@ -130,7 +133,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { Subnet: constants.PrimaryNetworkID, }, alphaPOP, - pWallet.AVAXAssetID(), + pContext.AVAXAssetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{alphaValidationRewardKey.Address()}, @@ -159,7 +162,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { Subnet: constants.PrimaryNetworkID, }, betaPOP, - pWallet.AVAXAssetID(), + pContext.AVAXAssetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{betaValidationRewardKey.Address()}, @@ -191,7 +194,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { }, Subnet: constants.PrimaryNetworkID, }, - pWallet.AVAXAssetID(), + pContext.AVAXAssetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{gammaDelegationRewardKey.Address()}, @@ -214,7 +217,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { }, Subnet: constants.PrimaryNetworkID, }, - pWallet.AVAXAssetID(), + pContext.AVAXAssetID, &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{deltaDelegationRewardKey.Address()}, @@ -276,7 +279,7 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { pWallet := baseWallet.P() balances, err := pWallet.Builder().GetBalance() require.NoError(err) - rewardBalances[rewardKey.Address()] = balances[pWallet.AVAXAssetID()] + rewardBalances[rewardKey.Address()] = balances[pContext.AVAXAssetID] } require.Len(rewardBalances, len(rewardKeys)) diff --git a/tests/e2e/p/validator_sets.go b/tests/e2e/p/validator_sets.go index 36072e327a21..a3f3e1e9f075 100644 --- a/tests/e2e/p/validator_sets.go +++ b/tests/e2e/p/validator_sets.go @@ -36,11 +36,14 @@ var _ = e2e.DescribePChain("[Validator Sets]", func() { baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() + pBuilder := pWallet.Builder() + pContext := pBuilder.Context() + const delegatorCount = 15 ginkgo.By(fmt.Sprintf("adding %d delegators", delegatorCount), func() { rewardKey, err := secp256k1.NewPrivateKey() require.NoError(err) - avaxAssetID := pWallet.AVAXAssetID() + avaxAssetID := pContext.AVAXAssetID startTime := time.Now().Add(tmpnet.DefaultValidatorStartTimeDiff) endTime := startTime.Add(time.Second * 360) // This is the default flag value for MinDelegatorStake. diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index 1819df448568..3708c6b82c0a 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -41,8 +41,12 @@ var _ = e2e.DescribePChain("[Workflow]", func() { baseWallet := e2e.NewWallet(keychain, nodeURI) pWallet := baseWallet.P() - avaxAssetID := baseWallet.P().AVAXAssetID() + pBuilder := pWallet.Builder() + pContext := pBuilder.Context() + avaxAssetID := pContext.AVAXAssetID xWallet := baseWallet.X() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() pChainClient := platformvm.NewClient(nodeURI.URI) tests.Outf("{{blue}} fetching minimal stake amounts {{/}}\n") @@ -141,7 +145,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { ginkgo.By("export avax from P to X chain", func() { _, err := pWallet.IssueExportTx( - xWallet.BlockchainID(), + xContext.BlockchainID, []*avax.TransferableOutput{ { Asset: avax.Asset{ diff --git a/tests/e2e/vms/xsvm.go b/tests/e2e/vms/xsvm.go new file mode 100644 index 000000000000..5d3557acd405 --- /dev/null +++ b/tests/e2e/vms/xsvm.go @@ -0,0 +1,177 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package vms + +import ( + "fmt" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/subnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/export" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/importtx" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/transfer" + + ginkgo "github.com/onsi/ginkgo/v2" +) + +const pollingInterval = 50 * time.Millisecond + +var ( + subnetAName = "xsvm-a" + subnetBName = "xsvm-b" +) + +func XSVMSubnetsOrPanic(nodes ...*tmpnet.Node) []*tmpnet.Subnet { + key, err := secp256k1.NewPrivateKey() + if err != nil { + panic(err) + } + subnetANodes := nodes + subnetBNodes := nodes + if len(nodes) > 1 { + // Validate tmpnet bootstrap of a disjoint validator set + midpoint := len(nodes) / 2 + subnetANodes = nodes[:midpoint] + subnetBNodes = nodes[midpoint:] + } + return []*tmpnet.Subnet{ + subnet.NewXSVMOrPanic(subnetAName, key, subnetANodes...), + subnet.NewXSVMOrPanic(subnetBName, key, subnetBNodes...), + } +} + +var _ = ginkgo.Describe("[XSVM]", func() { + require := require.New(ginkgo.GinkgoT()) + + ginkgo.It("should support transfers between subnets", func() { + network := e2e.Env.GetNetwork() + + sourceSubnet := network.GetSubnet(subnetAName) + require.NotNil(sourceSubnet) + destinationSubnet := network.GetSubnet(subnetBName) + require.NotNil(destinationSubnet) + + sourceChain := sourceSubnet.Chains[0] + destinationChain := destinationSubnet.Chains[0] + + sourceValidators := getNodesForIDs(network.Nodes, sourceSubnet.ValidatorIDs) + require.NotEmpty(sourceValidators) + sourceAPINode := sourceValidators[0] + tests.Outf(" issuing transactions for source subnet on %s (%s)\n", sourceAPINode.NodeID, sourceAPINode.URI) + + destinationValidators := getNodesForIDs(network.Nodes, destinationSubnet.ValidatorIDs) + require.NotEmpty(destinationValidators) + destinationAPINode := destinationValidators[0] + tests.Outf(" issuing transactions for destination subnet on %s (%s)\n", destinationAPINode.NodeID, destinationAPINode.URI) + + destinationKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + + ginkgo.By("checking that the funded key has sufficient funds for the export") + sourceClient := api.NewClient(sourceAPINode.URI, sourceChain.ChainID.String()) + initialSourcedBalance, err := sourceClient.Balance( + e2e.DefaultContext(), + sourceChain.PreFundedKey.Address(), + sourceChain.ChainID, + ) + require.NoError(err) + require.GreaterOrEqual(initialSourcedBalance, units.Schmeckle) + + ginkgo.By(fmt.Sprintf("exporting from chain %s on subnet %s", sourceChain.ChainID, sourceSubnet.SubnetID)) + exportTxStatus, err := export.Export( + e2e.DefaultContext(), + &export.Config{ + URI: sourceAPINode.URI, + SourceChainID: sourceChain.ChainID, + DestinationChainID: destinationChain.ChainID, + Amount: units.Schmeckle, + To: destinationKey.Address(), + PrivateKey: sourceChain.PreFundedKey, + }, + ) + require.NoError(err) + tests.Outf(" issued transaction with ID: %s\n", exportTxStatus.TxID) + + ginkgo.By("checking that the export transaction has been accepted on all nodes") + for _, node := range sourceValidators[1:] { + require.NoError(api.AwaitTxAccepted( + e2e.DefaultContext(), + api.NewClient(node.URI, sourceChain.ChainID.String()), + sourceChain.PreFundedKey.Address(), + exportTxStatus.Nonce, + pollingInterval, + )) + } + + ginkgo.By(fmt.Sprintf("issuing transaction on chain %s on subnet %s to activate snowman++ consensus", + destinationChain.ChainID, destinationSubnet.SubnetID)) + recipientKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + transferTxStatus, err := transfer.Transfer( + e2e.DefaultContext(), + &transfer.Config{ + URI: destinationAPINode.URI, + ChainID: destinationChain.ChainID, + AssetID: destinationChain.ChainID, + Amount: units.Schmeckle, + To: recipientKey.Address(), + PrivateKey: destinationChain.PreFundedKey, + }, + ) + require.NoError(err) + tests.Outf(" issued transaction with ID: %s\n", transferTxStatus.TxID) + + ginkgo.By(fmt.Sprintf("importing to blockchain %s on subnet %s", destinationChain.ChainID, destinationSubnet.SubnetID)) + sourceURIs := make([]string, len(sourceValidators)) + for i, node := range sourceValidators { + sourceURIs[i] = node.URI + } + importTxStatus, err := importtx.Import( + e2e.DefaultContext(), + &importtx.Config{ + URI: destinationAPINode.URI, + SourceURIs: sourceURIs, + SourceChainID: sourceChain.ChainID.String(), + DestinationChainID: destinationChain.ChainID.String(), + TxID: exportTxStatus.TxID, + PrivateKey: destinationKey, + }, + ) + require.NoError(err) + tests.Outf(" issued transaction with ID: %s\n", importTxStatus.TxID) + + ginkgo.By("checking that the balance of the source key has decreased") + sourceBalance, err := sourceClient.Balance(e2e.DefaultContext(), sourceChain.PreFundedKey.Address(), sourceChain.ChainID) + require.NoError(err) + require.GreaterOrEqual(initialSourcedBalance-units.Schmeckle, sourceBalance) + + ginkgo.By("checking that the balance of the destination key is non-zero") + destinationClient := api.NewClient(destinationAPINode.URI, destinationChain.ChainID.String()) + destinationBalance, err := destinationClient.Balance(e2e.DefaultContext(), destinationKey.Address(), sourceChain.ChainID) + require.NoError(err) + require.Equal(units.Schmeckle, destinationBalance) + }) +}) + +// Retrieve the nodes corresponding to the provided IDs +func getNodesForIDs(nodes []*tmpnet.Node, nodeIDs []ids.NodeID) []*tmpnet.Node { + desiredNodes := make([]*tmpnet.Node, 0, len(nodeIDs)) + for _, node := range nodes { + for _, nodeID := range nodeIDs { + if node.NodeID == nodeID { + desiredNodes = append(desiredNodes, node) + } + } + } + return desiredNodes +} diff --git a/tests/e2e/x/interchain_workflow.go b/tests/e2e/x/interchain_workflow.go index ce13cf8ab7ad..ecc52f41f032 100644 --- a/tests/e2e/x/interchain_workflow.go +++ b/tests/e2e/x/interchain_workflow.go @@ -42,7 +42,11 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("defining common configuration") recipientEthAddress := evm.GetEthAddress(recipientKey) - avaxAssetID := xWallet.AVAXAssetID() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() + cBuilder := cWallet.Builder() + cContext := cBuilder.Context() + avaxAssetID := xContext.AVAXAssetID // Use the same owner for sending to X-Chain and importing funds to P-Chain recipientOwner := secp256k1fx.OutputOwners{ Threshold: 1, @@ -94,7 +98,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("exporting AVAX from the X-Chain to the C-Chain", func() { _, err := xWallet.IssueExportTx( - cWallet.BlockchainID(), + cContext.BlockchainID, exportOutputs, e2e.WithDefaultContext(), ) @@ -106,7 +110,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("importing AVAX from the X-Chain to the C-Chain", func() { _, err := cWallet.IssueImportTx( - xWallet.BlockchainID(), + xContext.BlockchainID, recipientEthAddress, e2e.WithDefaultContext(), e2e.WithSuggestedGasPrice(ethClient), @@ -132,7 +136,7 @@ var _ = e2e.DescribeXChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("importing AVAX from the X-Chain to the P-Chain", func() { _, err := pWallet.IssueImportTx( - xWallet.BlockchainID(), + xContext.BlockchainID, &recipientOwner, e2e.WithDefaultContext(), ) diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 4736ba93f17a..58a0351ba123 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -9,10 +9,11 @@ import ( "math/rand" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture/e2e" "github.com/ava-labs/avalanchego/utils/set" @@ -28,10 +29,14 @@ import ( const ( totalRounds = 50 - metricBlksProcessing = "avalanche_X_blks_processing" - metricBlksAccepted = "avalanche_X_blks_accepted_count" + blksProcessingMetric = "avalanche_snowman_blks_processing" + blksAcceptedMetric = "avalanche_snowman_blks_accepted_count" ) +var xChainMetricLabels = prometheus.Labels{ + chains.ChainLabel: "X", +} + // This test requires that the network not have ongoing blocks and // cannot reliably be run in parallel. var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { @@ -48,10 +53,15 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { // test avoids the case of a previous test having initiated block // processing but not having completed it. e2e.Eventually(func() bool { - allNodeMetrics, err := tests.GetNodesMetrics(rpcEps, metricBlksProcessing) + allNodeMetrics, err := tests.GetNodesMetrics( + e2e.DefaultContext(), + rpcEps, + ) require.NoError(err) + for _, metrics := range allNodeMetrics { - if metrics[metricBlksProcessing] > 0 { + xBlksProcessing, ok := tests.GetMetricValue(metrics, blksProcessingMetric, xChainMetricLabels) + if !ok || xBlksProcessing > 0 { return false } } @@ -62,11 +72,6 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { "The cluster is generating ongoing blocks. Is this test being run in parallel?", ) - allMetrics := []string{ - metricBlksProcessing, - metricBlksAccepted, - } - // Ensure the same set of 10 keys is used for all tests // by retrieving them outside of runFunc. testKeys := e2e.Env.AllocatePreFundedKeys(10) @@ -84,7 +89,10 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { keychain := secp256k1fx.NewKeychain(testKeys...) baseWallet := e2e.NewWallet(keychain, e2e.Env.GetRandomNodeURI()) - avaxAssetID := baseWallet.X().AVAXAssetID() + xWallet := baseWallet.X() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() + avaxAssetID := xContext.AVAXAssetID wallets := make([]primary.Wallet, len(testKeys)) shortAddrs := make([]ids.ShortID, len(testKeys)) @@ -99,10 +107,15 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { ) } - metricsBeforeTx, err := tests.GetNodesMetrics(rpcEps, allMetrics...) + metricsBeforeTx, err := tests.GetNodesMetrics( + e2e.DefaultContext(), + rpcEps, + ) require.NoError(err) for _, uri := range rpcEps { - tests.Outf("{{green}}metrics at %q:{{/}} %v\n", uri, metricsBeforeTx[uri]) + for _, metric := range []string{blksProcessingMetric, blksAcceptedMetric} { + tests.Outf("{{green}}%s at %q:{{/}} %v\n", metric, uri, metricsBeforeTx[uri][metric]) + } } testBalances := make([]uint64, 0) @@ -146,7 +159,7 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { amountToTransfer := senderOrigBal / 10 - senderNewBal := senderOrigBal - amountToTransfer - baseWallet.X().BaseTxFee() + senderNewBal := senderOrigBal - amountToTransfer - xContext.BaseTxFee receiverNewBal := receiverOrigBal + amountToTransfer ginkgo.By("X-Chain transfer with wrong amount must fail", func() { @@ -224,28 +237,28 @@ RECEIVER NEW BALANCE (AFTER) : %21d AVAX txID := tx.ID() for _, u := range rpcEps { xc := avm.NewClient(u, "X") - status, err := xc.ConfirmTx(e2e.DefaultContext(), txID, 2*time.Second) - require.NoError(err) - require.Equal(choices.Accepted, status) + require.NoError(avm.AwaitTxAccepted(xc, e2e.DefaultContext(), txID, 2*time.Second)) } for _, u := range rpcEps { xc := avm.NewClient(u, "X") - status, err := xc.ConfirmTx(e2e.DefaultContext(), txID, 2*time.Second) - require.NoError(err) - require.Equal(choices.Accepted, status) + require.NoError(avm.AwaitTxAccepted(xc, e2e.DefaultContext(), txID, 2*time.Second)) - mm, err := tests.GetNodeMetrics(u, allMetrics...) + mm, err := tests.GetNodeMetrics(e2e.DefaultContext(), u) require.NoError(err) prev := metricsBeforeTx[u] // +0 since X-chain tx must have been processed and accepted // by now - require.Equal(mm[metricBlksProcessing], prev[metricBlksProcessing]) + currentXBlksProcessing, _ := tests.GetMetricValue(mm, blksProcessingMetric, xChainMetricLabels) + previousXBlksProcessing, _ := tests.GetMetricValue(prev, blksProcessingMetric, xChainMetricLabels) + require.Equal(currentXBlksProcessing, previousXBlksProcessing) // +1 since X-chain tx must have been accepted by now - require.Equal(mm[metricBlksAccepted], prev[metricBlksAccepted]+1) + currentXBlksAccepted, _ := tests.GetMetricValue(mm, blksAcceptedMetric, xChainMetricLabels) + previousXBlksAccepted, _ := tests.GetMetricValue(prev, blksAcceptedMetric, xChainMetricLabels) + require.Equal(currentXBlksAccepted, previousXBlksAccepted+1) metricsBeforeTx[u] = mm } diff --git a/tests/fixture/e2e/env.go b/tests/fixture/e2e/env.go index a5fe08d4d899..05fbbd97ac86 100644 --- a/tests/fixture/e2e/env.go +++ b/tests/fixture/e2e/env.go @@ -5,9 +5,9 @@ package e2e import ( "encoding/json" + "errors" "math/rand" "os" - "path/filepath" "time" "github.com/stretchr/testify/require" @@ -18,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/tests/fixture" "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ginkgo "github.com/onsi/ginkgo/v2" @@ -43,6 +42,10 @@ type TestEnvironment struct { URIs []tmpnet.NodeURI // The URI used to access the http server that allocates test data TestDataServerURI string + // The duration to wait before shutting down private networks. A + // non-zero value may be useful to ensure all metrics can be + // scraped before shutdown. + PrivateNetworkShutdownDelay time.Duration require *require.Assertions } @@ -57,52 +60,80 @@ func (te *TestEnvironment) Marshal() []byte { func NewTestEnvironment(flagVars *FlagVars, desiredNetwork *tmpnet.Network) *TestEnvironment { require := require.New(ginkgo.GinkgoT()) - networkDir := flagVars.NetworkDir() - - // Load or create a test network var network *tmpnet.Network - if len(networkDir) > 0 { - var err error - network, err = tmpnet.ReadNetwork(networkDir) - require.NoError(err) - tests.Outf("{{yellow}}Using an existing network configured at %s{{/}}\n", network.Dir) - - // Set the desired subnet configuration to ensure subsequent creation. - for _, subnet := range desiredNetwork.Subnets { - if existing := network.GetSubnet(subnet.Name); existing != nil { - // Already present - continue + // Need to load the network if it is being stopped or reused + if flagVars.StopNetwork() || flagVars.ReuseNetwork() { + networkDir := flagVars.NetworkDir() + var networkSymlink string // If populated, prompts removal of the referenced symlink if --stop-network is specified + if len(networkDir) == 0 { + // Attempt to reuse the network at the default owner path + symlinkPath, err := tmpnet.GetReusableNetworkPathForOwner(desiredNetwork.Owner) + require.NoError(err) + _, err = os.Stat(symlinkPath) + if !errors.Is(err, os.ErrNotExist) { + // Try to load the existing network + require.NoError(err) + networkDir = symlinkPath + // Enable removal of the referenced symlink if --stop-network is specified + networkSymlink = symlinkPath } - network.Subnets = append(network.Subnets, subnet) } - } else { - network = desiredNetwork - StartNetwork(network, DefaultNetworkDir, flagVars.AvalancheGoExecPath(), flagVars.PluginDir()) - } - // A new network will always need subnet creation and an existing - // network will also need subnets to be created the first time it - // is used. - require.NoError(network.CreateSubnets(DefaultContext(), ginkgo.GinkgoWriter)) + if len(networkDir) > 0 { + var err error + network, err = tmpnet.ReadNetwork(networkDir) + require.NoError(err) + tests.Outf("{{yellow}}Loaded a network configured at %s{{/}}\n", network.Dir) + } - // Wait for chains to have bootstrapped on all nodes - Eventually(func() bool { - for _, subnet := range network.Subnets { - for _, validatorID := range subnet.ValidatorIDs { - uri, err := network.GetURIForNodeID(validatorID) - require.NoError(err) - infoClient := info.NewClient(uri) - for _, chain := range subnet.Chains { - isBootstrapped, err := infoClient.IsBootstrapped(DefaultContext(), chain.ChainID.String()) - // Ignore errors since a chain id that is not yet known will result in a recoverable error. - if err != nil || !isBootstrapped { - return false - } + if flagVars.StopNetwork() { + if len(networkSymlink) > 0 { + // Remove the symlink to avoid attempts to reuse the stopped network + tests.Outf("Removing symlink %s\n", networkSymlink) + if err := os.Remove(networkSymlink); !errors.Is(err, os.ErrNotExist) { + require.NoError(err) } } + if network != nil { + tests.Outf("Stopping network\n") + require.NoError(network.Stop(DefaultContext())) + } else { + tests.Outf("No network to stop\n") + } + os.Exit(0) } - return true - }, DefaultTimeout, DefaultPollingInterval, "failed to see all chains bootstrap before timeout") + } + + // Start a new network + if network == nil { + network = desiredNetwork + StartNetwork( + network, + flagVars.AvalancheGoExecPath(), + flagVars.PluginDir(), + flagVars.NetworkShutdownDelay(), + flagVars.ReuseNetwork(), + ) + + // Wait for chains to have bootstrapped on all nodes + Eventually(func() bool { + for _, subnet := range network.Subnets { + for _, validatorID := range subnet.ValidatorIDs { + uri, err := network.GetURIForNodeID(validatorID) + require.NoError(err) + infoClient := info.NewClient(uri) + for _, chain := range subnet.Chains { + isBootstrapped, err := infoClient.IsBootstrapped(DefaultContext(), chain.ChainID.String()) + // Ignore errors since a chain id that is not yet known will result in a recoverable error. + if err != nil || !isBootstrapped { + return false + } + } + } + } + return true + }, DefaultTimeout, DefaultPollingInterval, "failed to see all chains bootstrap before timeout") + } uris := network.GetNodeURIs() require.NotEmpty(uris, "network contains no nodes") @@ -115,10 +146,11 @@ func NewTestEnvironment(flagVars *FlagVars, desiredNetwork *tmpnet.Network) *Tes require.NoError(err) return &TestEnvironment{ - NetworkDir: network.Dir, - URIs: uris, - TestDataServerURI: testDataServerURI, - require: require, + NetworkDir: network.Dir, + URIs: uris, + TestDataServerURI: testDataServerURI, + PrivateNetworkShutdownDelay: flagVars.NetworkShutdownDelay(), + require: require, } } @@ -158,27 +190,19 @@ func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { } // Create a new private network that is not shared with other tests. -func (te *TestEnvironment) NewPrivateNetwork() *tmpnet.Network { - // Load the shared network to retrieve its path and exec path +func (te *TestEnvironment) StartPrivateNetwork(network *tmpnet.Network) { + // Use the same configuration as the shared network sharedNetwork, err := tmpnet.ReadNetwork(te.NetworkDir) te.require.NoError(err) - network := &tmpnet.Network{} - - // The private networks dir is under the shared network dir to ensure it - // will be included in the artifact uploaded in CI. - privateNetworksDir := filepath.Join(sharedNetwork.Dir, PrivateNetworksDirName) - te.require.NoError(os.MkdirAll(privateNetworksDir, perms.ReadWriteExecute)) - pluginDir, err := sharedNetwork.DefaultFlags.GetStringVal(config.PluginDirKey) te.require.NoError(err) StartNetwork( network, - privateNetworksDir, sharedNetwork.DefaultRuntimeConfig.AvalancheGoPath, pluginDir, + te.PrivateNetworkShutdownDelay, + false, /* reuseNetwork */ ) - - return network } diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go index 2a00df97a885..9e55d0cda16c 100644 --- a/tests/fixture/e2e/flags.go +++ b/tests/fixture/e2e/flags.go @@ -7,15 +7,19 @@ import ( "flag" "fmt" "os" + "time" "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" ) type FlagVars struct { - avalancheGoExecPath string - pluginDir string - networkDir string - useExistingNetwork bool + avalancheGoExecPath string + pluginDir string + networkDir string + reuseNetwork bool + delayNetworkShutdown bool + stopNetwork bool + nodeCount int } func (v *FlagVars) AvalancheGoExecPath() string { @@ -27,7 +31,7 @@ func (v *FlagVars) PluginDir() string { } func (v *FlagVars) NetworkDir() string { - if !v.useExistingNetwork { + if !v.reuseNetwork { return "" } if len(v.networkDir) > 0 { @@ -36,8 +40,25 @@ func (v *FlagVars) NetworkDir() string { return os.Getenv(tmpnet.NetworkDirEnvName) } -func (v *FlagVars) UseExistingNetwork() bool { - return v.useExistingNetwork +func (v *FlagVars) ReuseNetwork() bool { + return v.reuseNetwork +} + +func (v *FlagVars) NetworkShutdownDelay() time.Duration { + if v.delayNetworkShutdown { + // Only return a non-zero value if the delay is enabled. Make sure this value takes + // into account the scrape_interval defined in scripts/run_prometheus.sh. + return 12 * time.Second + } + return 0 +} + +func (v *FlagVars) StopNetwork() bool { + return v.stopNetwork +} + +func (v *FlagVars) NodeCount() int { + return v.nodeCount } func RegisterFlags() *FlagVars { @@ -58,13 +79,31 @@ func RegisterFlags() *FlagVars { &vars.networkDir, "network-dir", "", - fmt.Sprintf("[optional] the dir containing the configuration of an existing network to target for testing. Will only be used if --use-existing-network is specified. Also possible to configure via the %s env variable.", tmpnet.NetworkDirEnvName), + fmt.Sprintf("[optional] the dir containing the configuration of an existing network to target for testing. Will only be used if --reuse-network is specified. Also possible to configure via the %s env variable.", tmpnet.NetworkDirEnvName), ) flag.BoolVar( - &vars.useExistingNetwork, - "use-existing-network", + &vars.reuseNetwork, + "reuse-network", false, - "[optional] whether to target the existing network identified by --network-dir.", + "[optional] reuse an existing network. If an existing network is not already running, create a new one and leave it running for subsequent usage.", + ) + flag.BoolVar( + &vars.delayNetworkShutdown, + "delay-network-shutdown", + false, + "[optional] whether to delay network shutdown to allow a final metrics scrape.", + ) + flag.BoolVar( + &vars.stopNetwork, + "stop-network", + false, + "[optional] stop an existing network and exit without executing any tests.", + ) + flag.IntVar( + &vars.nodeCount, + "node-count", + tmpnet.DefaultNodeCount, + "number of nodes the network should initially consist of", ) return &vars diff --git a/tests/fixture/e2e/helpers.go b/tests/fixture/e2e/helpers.go index c88f3cacfb10..6f6e5382dc7a 100644 --- a/tests/fixture/e2e/helpers.go +++ b/tests/fixture/e2e/helpers.go @@ -121,8 +121,8 @@ func Eventually(condition func() bool, waitFor time.Duration, tick time.Duration func AddEphemeralNode(network *tmpnet.Network, flags tmpnet.FlagsMap) *tmpnet.Node { require := require.New(ginkgo.GinkgoT()) - node, err := network.AddEphemeralNode(DefaultContext(), ginkgo.GinkgoWriter, flags) - require.NoError(err) + node := tmpnet.NewEphemeralNode(flags) + require.NoError(network.StartNode(DefaultContext(), ginkgo.GinkgoWriter, node)) ginkgo.DeferCleanup(func() { tests.Outf("shutting down ephemeral node %q\n", node.NodeID) @@ -199,10 +199,10 @@ func CheckBootstrapIsPossible(network *tmpnet.Network) { ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) defer cancel() - node, err := network.AddEphemeralNode(ctx, ginkgo.GinkgoWriter, tmpnet.FlagsMap{}) - // AddEphemeralNode will initiate node stop if an error is encountered during start, + node := tmpnet.NewEphemeralNode(tmpnet.FlagsMap{}) + require.NoError(network.StartNode(ctx, ginkgo.GinkgoWriter, node)) + // StartNode will initiate node stop if an error is encountered during start, // so no further cleanup effort is required if an error is seen here. - require.NoError(err) // Ensure the node is always stopped at the end of the check defer func() { @@ -216,27 +216,52 @@ func CheckBootstrapIsPossible(network *tmpnet.Network) { } // Start a temporary network with the provided avalanchego binary. -func StartNetwork(network *tmpnet.Network, rootNetworkDir string, avalancheGoExecPath string, pluginDir string) { +func StartNetwork( + network *tmpnet.Network, + avalancheGoExecPath string, + pluginDir string, + shutdownDelay time.Duration, + reuseNetwork bool, +) { require := require.New(ginkgo.GinkgoT()) require.NoError( - tmpnet.StartNewNetwork( + tmpnet.BootstrapNewNetwork( DefaultContext(), ginkgo.GinkgoWriter, network, - rootNetworkDir, + DefaultNetworkDir, avalancheGoExecPath, pluginDir, - tmpnet.DefaultNodeCount, ), ) + tests.Outf("{{green}}Successfully started network{{/}}\n") + + symlinkPath, err := tmpnet.GetReusableNetworkPathForOwner(network.Owner) + require.NoError(err) + + if reuseNetwork { + // Symlink the path of the created network to the default owner path (e.g. latest_avalanchego-e2e) + // to enable easy discovery for reuse. + require.NoError(os.Symlink(network.Dir, symlinkPath)) + tests.Outf("{{green}}Symlinked %s to %s to enable reuse{{/}}\n", network.Dir, symlinkPath) + } + ginkgo.DeferCleanup(func() { + if reuseNetwork { + tests.Outf("{{yellow}}Skipping shutdown for network %s (symlinked to %s) to enable reuse{{/}}\n", network.Dir, symlinkPath) + return + } + + if shutdownDelay > 0 { + tests.Outf("Waiting %s before network shutdown to ensure final metrics scrape\n", shutdownDelay) + time.Sleep(shutdownDelay) + } + tests.Outf("Shutting down network\n") ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) defer cancel() require.NoError(network.Stop(ctx)) }) - - tests.Outf("{{green}}Successfully started network{{/}}\n") } diff --git a/tests/fixture/subnet/xsvm.go b/tests/fixture/subnet/xsvm.go new file mode 100644 index 000000000000..c5bb03bc2026 --- /dev/null +++ b/tests/fixture/subnet/xsvm.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnet + +import ( + "math" + "time" + + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" +) + +func NewXSVMOrPanic(name string, key *secp256k1.PrivateKey, nodes ...*tmpnet.Node) *tmpnet.Subnet { + if len(nodes) == 0 { + panic("a subnet must be validated by at least one node") + } + + genesisBytes, err := genesis.Codec.Marshal(genesis.CodecVersion, &genesis.Genesis{ + Timestamp: time.Now().Unix(), + Allocations: []genesis.Allocation{ + { + Address: key.Address(), + Balance: math.MaxUint64, + }, + }, + }) + if err != nil { + panic(err) + } + + return &tmpnet.Subnet{ + Name: name, + Chains: []*tmpnet.Chain{ + { + VMID: constants.XSVMID, + Genesis: genesisBytes, + PreFundedKey: key, + }, + }, + ValidatorIDs: tmpnet.NodesToIDs(nodes...), + } +} diff --git a/tests/fixture/tmpnet/README.md b/tests/fixture/tmpnet/README.md index 909a29c6ee12..b059da9a554d 100644 --- a/tests/fixture/tmpnet/README.md +++ b/tests/fixture/tmpnet/README.md @@ -49,14 +49,14 @@ A temporary network can be managed by the `tmpnetctl` cli tool: # Build the tmpnetctl binary $ ./scripts/build_tmpnetctl.sh -# Start a new network +# Start a new network. Possible to specify the number of nodes (> 1) with --node-count. $ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego ... -Started network 1000 @ /home/me/.tmpnet/networks/1000 +Started network /home/me/.tmpnet/networks/20240306-152305.924531 (UUID: abaab590-b375-44f6-9ca5-f8a6dc061725) Configure tmpnetctl to target this network by default with one of the following statements: - - source /home/me/.tmpnet/networks/1000/network.env - - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 + - source /home/me/.tmpnet/networks/20240306-152305.924531/network.env + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/20240306-152305.924531 - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest # Stop the network @@ -66,10 +66,18 @@ $ ./build/tmpnetctl stop-network --network-dir=/path/to/network Note the export of the path ending in `latest`. This is a symlink that is set to the last network created by `tmpnetctl start-network`. Setting the `TMPNET_NETWORK_DIR` env var to this symlink ensures that -`tmpnetctl` commands and e2e execution with -`--use-existing-network` will target the most recently deployed temporary +`tmpnetctl` commands target the most recently deployed temporary network. +#### Deprecated usage with e2e suite + +`tmpnetctl` was previously used to create temporary networks for use +across multiple e2e test runs. As the usage of temporary networks has +expanded to require subnets, that usage has been supplanted by the +`--reuse-network` flag defined for the e2e suite. It was easier to +support defining subnet configuration in the e2e suite in code than to +extend a cli tool like `tmpnetctl` to support similar capabilities. + ### Via code A temporary network can be managed in code: @@ -79,6 +87,7 @@ network := &tmpnet.Network{ // Configure non-default values fo DefaultFlags: tmpnet.FlagsMap{ config.LogLevelKey: "INFO", // Change one of the network's defaults }, + Nodes: tmpnet.NewNodesOrPanic(5), // Number of initial validating nodes Subnets: []*tmpnet.Subnet{ // Subnets to create on the new network once it is running { Name: "xsvm-a", // User-defined name used to reference subnet in code and on disk @@ -89,18 +98,18 @@ network := &tmpnet.Network{ // Configure non-default values fo PreFundedKey: , // (Optional) A private key that is funded in the genesis bytes }, }, + ValidatorIDs: , // The IDs of nodes that validate the subnet }, }, } -_ := tmpnet.StartNewNetwork( // Start the network +_ := tmpnet.BootstrapNewNetwork( // Bootstrap the network ctx, // Context used to limit duration of waiting for network health ginkgo.GinkgoWriter, // Writer to report progress of initialization network, "", // Empty string uses the default network path (~/tmpnet/networks) "/path/to/avalanchego", // The path to the binary that nodes will execute "/path/to/plugins", // The path nodes will use for plugin binaries (suggested value ~/.avalanchego/plugins) - 5, // Number of initial validating nodes ) uris := network.GetNodeURIs() @@ -128,8 +137,12 @@ A temporary network relies on configuration written to disk in the following str ``` HOME └── .tmpnet // Root path for the temporary network fixture + ├── prometheus // Working directory for a metrics-scraping prometheus instance + │ └── file_sd_configs // Directory containing file-based service discovery config for prometheus + ├── promtail // Working directory for a log-collecting promtail instance + │ └── file_sd_configs // Directory containing file-based service discovery config for promtail └── networks // Default parent directory for temporary networks - └── 1000 // The networkID is used to name the network dir and starts at 1000 + └── 20240306-152305.924531 // The timestamp of creation is the name of a network's directory ├── NodeID-37E8UK3x2YFsHE3RdALmfWcppcZ1eTuj9 // The ID of a node is the name of its data dir │ ├── chainData │ │ └── ... @@ -150,9 +163,11 @@ HOME ├── config.json // Common configuration (including defaults and pre-funded keys) ├── genesis.json // Genesis for all nodes ├── network.env // Sets network dir env var to simplify network usage - └── subnets // Parent directory for subnet definitions - ├─ subnet-a.json // Configuration for subnet-a and its chain(s) - └─ subnet-b.json // Configuration for subnet-b and its chain(s) + └── subnets // Directory containing subnet config for both avalanchego and tmpnet + ├── subnet-a.json // tmpnet configuration for subnet-a and its chain(s) + ├── subnet-b.json // tmpnet configuration for subnet-b and its chain(s) + └── 2jRbWtaonb2RP8DEM5DBsd7o2o8d...RqNs9 // The ID of a subnet is the name of its configuration dir + └── config.json // avalanchego configuration for subnet ``` ### Common networking configuration @@ -229,3 +244,103 @@ The process details of a node are written by avalanchego to `[base-data-dir]/process.json`. The file contains the PID of the node process, the URI of the node's API, and the address other nodes can use to bootstrap themselves (aka staking address). + +## Monitoring + +Monitoring is an essential part of understanding the workings of a +distributed system such as avalanchego. The tmpnet fixture enables +collection of logs and metrics from temporary networks to a monitoring +stack (prometheus+loki+grafana) to enable results to be analyzed and +shared. + +### Example usage + +```bash +# Start prometheus to collect metrics +PROMETHEUS_ID= PROMETHEUS_PASSWORD= ./scripts/run_prometheus.sh + +# Start promtail to collect logs +LOKI_ID= LOKI_PASSWORD= ./scripts/run_promtail.sh + +# Network start emits link to grafana displaying collected logs and metrics +./build/tmpnetctl start-network +``` + +### Metrics collection + +When a node is started, configuration enabling collection of metrics +from the node is written to +`~/.tmpnet/prometheus/file_sd_configs/[network uuid]-[node id].json`. + +The `scripts/run_prometheus.sh` script starts prometheus in agent mode +configured to scrape metrics from configured nodes and forward the +metrics to a persistent prometheus instance. The script requires that +the `PROMETHEUS_ID` and `PROMETHEUS_PASSWORD` env vars be set. By +default the prometheus instance at +https://prometheus-experimental.avax-dev.network will be targeted and +this can be overridden via the `PROMETHEUS_URL` env var. + +### Log collection + +Nodes log are stored at `~/.tmpnet/networks/[network id]/[node +id]/logs` by default, and can optionally be forwarded to loki with +promtail. + +When a node is started, promtail configuration enabling +collection of logs for the node is written to +`~/.tmpnet/promtail/file_sd_configs/[network +uuid]-[node id].json`. + +The `scripts/run_promtail.sh` script starts promtail configured to +collect logs from configured nodes and forward the results to loki. The +script requires that the `LOKI_ID` and `LOKI_PASSWORD` env vars be +set. By default the loki instance at +https://loki-experimental.avax-dev.network will be targeted and this +can be overridden via the `LOKI_URL` env var. + +### Labels + +The logs and metrics collected for temporary networks will have the +following labels applied: + + - `network_uuid` + - uniquely identifies a network across hosts + - `node_id` + - `is_ephemeral_node` + - 'ephemeral' nodes are expected to run for only a fraction of the + life of a network + - `network_owner` + - an arbitrary string that can be used to differentiate results + when a CI job runs more than one network + +When a network runs as part of a github CI job, the following +additional labels will be applied: + + - `gh_repo` + - `gh_workflow` + - `gh_run_id` + - `gh_run_number` + - `gh_run_attempt` + - `gh_job_id` + +These labels are sourced from Github Actions' `github` context as per +https://docs.github.com/en/actions/learn-github-actions/contexts#github-context. + +### Viewing + +#### Local networks + +When a network is started with tmpnet, a link to the [default grafana +instance](https://grafana-experimental.avax-dev.network) will be +emitted. The dashboards will only be populated if prometheus and +promtail are running locally (as per previous sections) to collect +metrics and logs. + +#### CI + +Collection of logs and metrics is enabled for CI jobs that use +tmpnet. Each job will execute a step titled `Notify of metrics +availability` that emits a link to grafana parametized to show results +for the job. Additional links to grafana parametized to show results +for individual network will appear in the logs displaying the start of +those networks. diff --git a/tests/fixture/tmpnet/cmd/main.go b/tests/fixture/tmpnet/cmd/main.go index dd59c300bbb3..d2b60682323b 100644 --- a/tests/fixture/tmpnet/cmd/main.go +++ b/tests/fixture/tmpnet/cmd/main.go @@ -49,6 +49,7 @@ func main() { var ( rootDir string + networkOwner string avalancheGoPath string pluginDir string nodeCount uint8 @@ -63,21 +64,23 @@ func main() { // Root dir will be defaulted on start if not provided - network := &tmpnet.Network{} + network := &tmpnet.Network{ + Owner: networkOwner, + Nodes: tmpnet.NewNodesOrPanic(int(nodeCount)), + } // Extreme upper bound, should never take this long networkStartTimeout := 2 * time.Minute ctx, cancel := context.WithTimeout(context.Background(), networkStartTimeout) defer cancel() - err := tmpnet.StartNewNetwork( + err := tmpnet.BootstrapNewNetwork( ctx, os.Stdout, network, rootDir, avalancheGoPath, pluginDir, - int(nodeCount), ) if err != nil { return err @@ -94,7 +97,7 @@ func main() { return err } - fmt.Fprintf(os.Stdout, "\nConfigure tmpnetctl to target this network by default with one of the following statements:\n") + fmt.Fprintln(os.Stdout, "\nConfigure tmpnetctl to target this network by default with one of the following statements:") fmt.Fprintf(os.Stdout, " - source %s\n", network.EnvFilePath()) fmt.Fprintf(os.Stdout, " - %s\n", network.EnvFileContents()) fmt.Fprintf(os.Stdout, " - export %s=%s\n", tmpnet.NetworkDirEnvName, latestSymlinkPath) @@ -106,6 +109,7 @@ func main() { startNetworkCmd.PersistentFlags().StringVar(&avalancheGoPath, "avalanchego-path", os.Getenv(tmpnet.AvalancheGoPathEnvName), "The path to an avalanchego binary") startNetworkCmd.PersistentFlags().StringVar(&pluginDir, "plugin-dir", os.ExpandEnv("$HOME/.avalanchego/plugins"), "[optional] the dir containing VM plugins") startNetworkCmd.PersistentFlags().Uint8Var(&nodeCount, "node-count", tmpnet.DefaultNodeCount, "Number of nodes the network should initially consist of") + startNetworkCmd.PersistentFlags().StringVar(&networkOwner, "network-owner", "", "The string identifying the intended owner of the network") rootCmd.AddCommand(startNetworkCmd) stopNetworkCmd := &cobra.Command{ diff --git a/tests/fixture/tmpnet/defaults.go b/tests/fixture/tmpnet/defaults.go index 2b88ef49afc1..c5dbfeeebc96 100644 --- a/tests/fixture/tmpnet/defaults.go +++ b/tests/fixture/tmpnet/defaults.go @@ -7,6 +7,7 @@ import ( "time" "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) @@ -34,23 +35,33 @@ const ( defaultConfigFilename = "config.json" ) -// A set of flags appropriate for testing. -func DefaultFlags() FlagsMap { - // Supply only non-default configuration to ensure that default values will be used. +// Flags appropriate for networks used for all types of testing. +func DefaultTestFlags() FlagsMap { return FlagsMap{ - config.NetworkPeerListGossipFreqKey: "250ms", - config.NetworkMaxReconnectDelayKey: "1s", - config.PublicIPKey: "127.0.0.1", - config.HTTPHostKey: "127.0.0.1", - config.StakingHostKey: "127.0.0.1", - config.HealthCheckFreqKey: "2s", - config.AdminAPIEnabledKey: true, - config.IpcAPIEnabledKey: true, - config.IndexEnabledKey: true, - config.LogDisplayLevelKey: "INFO", - config.LogLevelKey: "DEBUG", - config.MinStakeDurationKey: DefaultMinStakeDuration.String(), + config.NetworkPeerListPullGossipFreqKey: "250ms", + config.NetworkMaxReconnectDelayKey: "1s", + config.HealthCheckFreqKey: "2s", + config.AdminAPIEnabledKey: true, + config.IndexEnabledKey: true, + } +} + +// Flags appropriate for tmpnet networks. +func DefaultTmpnetFlags() FlagsMap { + // Supply only non-default configuration to ensure that default values will be used. + flags := FlagsMap{ + // Specific to tmpnet deployment + config.PublicIPKey: "127.0.0.1", + config.HTTPHostKey: "127.0.0.1", + config.StakingHostKey: "127.0.0.1", + config.LogDisplayLevelKey: logging.Off.String(), // Display logging not needed since nodes run headless + config.LogLevelKey: logging.Debug.String(), + // Specific to e2e testing + config.MinStakeDurationKey: DefaultMinStakeDuration.String(), + config.ProposerVMUseCurrentHeightKey: true, } + flags.SetDefaults(DefaultTestFlags()) + return flags } // A set of chain configurations appropriate for testing. diff --git a/tests/fixture/tmpnet/detached_process_default.go b/tests/fixture/tmpnet/detached_process_default.go new file mode 100644 index 000000000000..0e4b20ddd8e3 --- /dev/null +++ b/tests/fixture/tmpnet/detached_process_default.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +//go:build linux || darwin + +package tmpnet + +import ( + "os/exec" + "syscall" +) + +func configureDetachedProcess(cmd *exec.Cmd) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setsid: true, + } +} diff --git a/tests/fixture/tmpnet/detached_process_windows.go b/tests/fixture/tmpnet/detached_process_windows.go new file mode 100644 index 000000000000..bf7ff9a726b4 --- /dev/null +++ b/tests/fixture/tmpnet/detached_process_windows.go @@ -0,0 +1,12 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +//go:build windows + +package tmpnet + +import "os/exec" + +func configureDetachedProcess(*exec.Cmd) { + panic("tmpnet deployment to windows is not supported") +} diff --git a/tests/fixture/tmpnet/flags.go b/tests/fixture/tmpnet/flags.go index 3084982ea704..c3ec5aabc192 100644 --- a/tests/fixture/tmpnet/flags.go +++ b/tests/fixture/tmpnet/flags.go @@ -18,13 +18,13 @@ import ( type FlagsMap map[string]interface{} // Utility function simplifying construction of a FlagsMap from a file. -func ReadFlagsMap(path string, description string) (*FlagsMap, error) { +func ReadFlagsMap(path string, description string) (FlagsMap, error) { bytes, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to read %s: %w", description, err) } - flagsMap := &FlagsMap{} - if err := json.Unmarshal(bytes, flagsMap); err != nil { + flagsMap := FlagsMap{} + if err := json.Unmarshal(bytes, &flagsMap); err != nil { return nil, fmt.Errorf("failed to unmarshal %s: %w", description, err) } return flagsMap, nil @@ -55,6 +55,20 @@ func (f FlagsMap) GetStringVal(key string) (string, error) { return val, nil } +// GetBoolVal simplifies retrieving a map value as a bool. +func (f FlagsMap) GetBoolVal(key string, defaultVal bool) (bool, error) { + rawVal, ok := f[key] + if !ok { + return defaultVal, nil + } + + val, err := cast.ToBoolE(rawVal) + if err != nil { + return false, fmt.Errorf("failed to cast value for %q: %w", key, err) + } + return val, nil +} + // Write simplifies writing a FlagsMap to the provided path. The // description is used in error messages. func (f FlagsMap) Write(path string, description string) error { diff --git a/tests/fixture/tmpnet/local_network.go b/tests/fixture/tmpnet/local_network.go new file mode 100644 index 000000000000..0687f5790202 --- /dev/null +++ b/tests/fixture/tmpnet/local_network.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tmpnet + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/config" + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +func LocalNetworkOrPanic() *Network { + // Temporary network configured with local network keys + // See: /staking/local/README.md + network := &Network{ + NetworkID: genesis.LocalConfig.NetworkID, + PreFundedKeys: []*secp256k1.PrivateKey{ + genesis.EWOQKey, // Funded in the local genesis + }, + Nodes: []*Node{ + { + Flags: FlagsMap{ + config.StakingTLSKeyContentKey: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBeW1Fa2NQMXRHS1dCL3pFMElZaGEwZEp2UFplc2s3c3k2UTdZN25hLytVWjRTRDc3CmFwTzJDQnB2NXZaZHZjY0VlQ2VhKzBtUnJQdTlNZ1hyWkcwdm9lejhDZHE1bGc4RzYzY2lTcjFRWWFuL0pFcC8KbFZOaTNqMGlIQ1k5ZmR5dzhsb1ZkUitKYWpaRkpIQUVlK2hZZmFvekx3TnFkTHlldXZuZ3kxNWFZZjBXR3FVTQpmUjREby85QVpnQ2pLMkFzcU9RWVVZb2Zqcm9JUEdpdUJ2VDBFeUFPUnRzRTFsdGtKQjhUUDBLYVJZMlhmMThFCkhpZGgrcm0xakJYT1g3YlgrZ002U2J4U0F3YnZ5UXdpbG9ncnVadkxlQmkvTU5qcXlNZkNiTmZaUmVHR0JObnEKSXdxM3FvRDR1dUV0NkhLc0NyQTZNa2s4T3YrWWlrT1FWR01GRjE5OCt4RnpxZy9FakIzbjFDbm5NNCtGcndHbQpTODBkdTZsNXVlUklFV0VBQ0YrSDRabU96WDBxS2Qxb2RCS090dmlSNkRQOVlQbElEbDVXNTFxV1BlVElIZS8zCjhBMGxpN3VDTVJOUDdxdkZibnlHM3d1TXEyUEtwVTFYd0gzeU5aWFVYYnlZenlRVDRrTkFqYXpwZXRDMWFiYVoKQm5QYklSKzhHZG16OUd4SjJDazRDd0h6c3cvRkxlOVR0Z0RpR3ZOQU5SalJaZUdKWHZ6RWpTVG5FRGtxWUxWbgpVUk15RktIcHdJMzdzek9Ebms2K0hFWU9QbFdFd0tQU2h5cTRqZFE3bnNEY3huZkZveWdGUjVuQ0RJNmlFaTA1CmN6SVhiSFp2anBuME9qcjhsKzc5Qmt6Z1c0VDlQVFJuTU1PUU5JQXMxemRmQlV1YU1aOFh1amh2UTlNQ0F3RUEKQVFLQ0FnRUF1VU00TXQ4cjhiWUJUUFZqL1padlhVakFZS2ZxYWNxaWprcnpOMGtwOEM0Y2lqWnR2V0MrOEtnUwo3R0YzNnZTM0dLOVk1dFN3TUtTNnk0SXp2RmxmazJINFQ2VVU0MU9hU0E5bEt2b25EV0NybWpOQW5CZ2JsOHBxCjRVMzRXTEdnb2hycExiRFRBSkh4dGF0OXoxZ2hPZGlHeG5EZ0VVRmlKVlA5L3UyKzI1anRsVEttUGhzdHhnRXkKbUszWXNTcDNkNXhtenE0Y3VYRi9mSjF2UWhzWEhETHFIdDc4aktaWkErQVdwSUI1N1ZYeTY3eTFiazByR25USwp4eFJuT2FPT0R1YkpneHFNRVExV2tMczFKb3c5U3NwZDl2RGdoUHp0NFNOTXpvckI4WURFU01pYjE3eEY2aVhxCmpGajZ4NkhCOEg3bXA0WDNSeU1ZSnVvMnc2bHB6QnNFbmNVWXBLaHFNYWJGMEkvZ2lJNVZkcFNEdmtDQ09GZW4KbldaTFY5QWkveDd0VHEvMEYrY1ZNNjlNZ2ZlOGlZeW1xbGZkNldSWklUS2ZWaU5IQUxsRy9QcTl5SEpzejdOZwpTOEJLT0R0L3NqNFEweEx0RkRUL0RtcFA1MGlxN1NpUzE0b2JjS2NRcjhGQWpNL3NPWS9VbGc0TThNQTdFdWdTCnBESndMbDZYRG9JTU1DTndaMUhHc0RzdHpteDVNZjUwYlM0dGJLNGlaemNwUFg1UkJUbFZkbzlNVFNnbkZpenAKSWkxTmpITHVWVkNTTGIxT2pvVGd1MGNRRmlXRUJDa0MxWHVvUjhSQ1k2aVdWclVINEdlem5pN2NrdDJtSmFOQQpwZDYvODdkRktFM2poNVQ2alplSk1KZzVza1RaSFNvekpEdWFqOXBNSy9KT05TRDA2c0VDZ2dFQkFQcTJsRW1kCmcxaHBNSXFhN2V5MXVvTGQxekZGemxXcnhUSkxsdTM4TjY5bVlET0hyVi96cVJHT3BaQisxbkg3dFFKSVQvTDEKeExOMzNtRlZxQ3JOOHlVbVoraVVXaW9hSTVKWjFqekNnZW1WR2VCZ29kd1A5TU9aZnh4ckRwMTdvVGRhYmFFcQo3WmFCWW5ZOHhLLzRiQ3h1L0I0bUZpRjNaYThaVGQvKzJ5ZXY3Sk0rRTNNb3JXYzdyckttMUFwZmxmeHl0ZGhPCkpMQmlxT2Nxb2JJM2RnSHl6ZXNWYjhjVDRYQ3BvUmhkckZ3b3J0MEpJN3J5ZmRkZDQ5dk1KM0VsUmJuTi9oNEYKZjI0Y1dZL3NRUHEvbmZEbWVjMjhaN25WemExRDRyc3pOeWxZRHZ6ZGpGMFExbUw1ZEZWbnRXYlpBMUNOdXJWdwpuVGZ3dXlROFJGOVluWU1DZ2dFQkFNNmxwTmVxYWlHOWl4S1NyNjVwWU9LdEJ5VUkzL2VUVDR2Qm5yRHRZRis4Cm9oaUtnSXltRy92SnNTZHJ5bktmd0pPYkV5MmRCWWhDR0YzaDl6Mm5jOUtKUUQvc3U3d3hDc2RtQnM3WW9EaU0KdXpOUGxSQW1JMFFBRklMUENrNDh6L2xVUWszci9NenUwWXpSdjdmSTRXU3BJR0FlZlZQRHF5MXVYc0FURG9ESgphcmNFa05ENUxpYjg5THg3cjAyRWV2SkpUZGhUSk04bUJkUmw2d3BOVjN4QmR3aXM2N3VTeXVuRlpZcFNpTXc3CldXaklSaHpoTEl2cGdENzhVdk52dUppMFVHVkVqVHFueHZ1VzNZNnNMZklrODBLU1IyNFVTaW5UMjd0Ly94N3oKeXpOa283NWF2RjJobTFmOFkvRXBjSEhBYXg4TkFRRjV1dVY5eEJOdnYzRUNnZ0VBZFMvc1JqQ0syVU5wdmcvRwowRkx0V0FncmNzdUhNNEl6alZ2SnMzbWw2YVYzcC81dUtxQncwVlVVekdLTkNBQTRUbFhRa09jUnh6VnJTNkhICkZpTG4yT0NIeHkyNHExOUdhenowcDdmZkUzaHUvUE1PRlJlY04rVkNoZDBBbXRuVHRGVGZVMnNHWE1nalp0TG0KdUwzc2lpUmlVaEZKWE9FN05Vb2xuV0s1dTJZK3RXQlpwUVZKY0N4MGJ1c054NytBRXR6blpMQzU4M3hhS0p0RApzMUs3SlJRQjdqVTU1eHJDMEc5cGJrTXlzbTBOdHlGemd3bWZpcEJIVmxDcHl2ZzZEQ3hkOEZodmhOOVplYTFiCmZoa2MwU0pab3JIQzVoa3FweWRKRG1sVkNrMHZ6RUFlUU00Qzk0WlVPeXRibmpRbm1YcDE0Q05BU1lxTFh0ZVEKdWVSbzB3S0NBUUFHMEYxMEl4Rm0xV290alpxdlpKZ21RVkJYLzBmclVQY3hnNHZwQjVyQzdXUm03TUk2WVF2UgpMS0JqeldFYWtIdjRJZ2ZxM0IrZms1WmNHaVJkNnhTZG41cjN3S1djR2YzaC8xSkFKZEo2cXVGTld0VnVkK04zCnpZemZsMVllcUZDdlJ3RDhzc2hlTlkzQlYvVTdhU3ROZDJveTRTNSt3WmYyWW9wTFNSV1VWNC9tUXdkSGJNQUIKMXh0Mno1bEROQmdkdng4TEFBclpyY1pKYjZibGF4RjBibkF2WUF4UjNoQkV6eFovRGlPbW9GcGRZeVUwdEpRVQpkUG1lbWhGZUo1UHRyUnh0aW1vaHdnQ0VzVC9UQVlodVVKdVkyVnZ6bkVXcHhXdWNiaWNLYlQySkQwdDY3bUVCCnNWOSs4anFWYkNsaUJ0ZEJhZHRib2hqd2trb1IzZ0J4QW9JQkFHM2NadU5rSVdwRUxFYmVJQ0tvdVNPS04wNnIKRnMvVVhVOHJvTlRoUFI3dlB0amVEMU5ETW1VSEpyMUZHNFNKclNpZ2REOHFOQmc4dy9HM25JMEl3N2VGc2trNQo4bU5tMjFDcER6T04zNlpPN0lETWo1dXlCbGoydCtJeGwvdUpZaFlTcHVOWHlVVE1tK3JrRkowdmRTVjRmakxkCkoybTMwanVZbk1pQkJKZjdkejVNOTUrVDB4aWNHV3lWMjR6VllZQmJTbzBOSEVHeHFlUmhpa05xWk5Qa29kNmYKa2ZPSlpHYWxoMkthSzVSTXBacEZGaFova1c5eFJXTkpaeUNXZ2tJb1lrZGlsTXVJU0J1M2xDcms4cmRNcEFMMAp3SEVjcTh4d2NnWUNTMnFrOEh3anRtVmQzZ3BCMXk5VXNoTXIzcW51SDF3TXBVNUMrbk0yb3kzdlNrbz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K", + config.StakingCertContentKey: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TVRWYUdBOHpNREU1TURjeE1ERTIKTVRJeE5Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRREtZU1J3L1cwWXBZSC9NVFFoaUZyUjBtODlsNnlUdXpMcER0anVkci81Um5oSVB2dHFrN1lJR20vbTlsMjkKeHdSNEo1cjdTWkdzKzcweUJldGtiUytoN1B3SjJybVdEd2JyZHlKS3ZWQmhxZjhrU24rVlUyTGVQU0ljSmoxOQozTER5V2hWMUg0bHFOa1VrY0FSNzZGaDlxak12QTJwMHZKNjYrZURMWGxwaC9SWWFwUXg5SGdPai8wQm1BS01yCllDeW81QmhSaWgrT3VnZzhhSzRHOVBRVElBNUcyd1RXVzJRa0h4TS9RcHBGalpkL1h3UWVKMkg2dWJXTUZjNWYKdHRmNkF6cEp2RklEQnUvSkRDS1dpQ3U1bTh0NEdMOHcyT3JJeDhKczE5bEY0WVlFMmVvakNyZXFnUGk2NFMzbwpjcXdLc0RveVNUdzYvNWlLUTVCVVl3VVhYM3o3RVhPcUQ4U01IZWZVS2Vjemo0V3ZBYVpMelIyN3FYbTU1RWdSCllRQUlYNGZobVk3TmZTb3AzV2gwRW82MitKSG9NLzFnK1VnT1hsYm5XcFk5NU1nZDcvZndEU1dMdTRJeEUwL3UKcThWdWZJYmZDNHlyWThxbFRWZkFmZkkxbGRSZHZKalBKQlBpUTBDTnJPbDYwTFZwdHBrR2M5c2hIN3daMmJQMApiRW5ZS1RnTEFmT3pEOFV0NzFPMkFPSWE4MEExR05GbDRZbGUvTVNOSk9jUU9TcGd0V2RSRXpJVW9lbkFqZnV6Ck00T2VUcjRjUmc0K1ZZVEFvOUtIS3JpTjFEdWV3TnpHZDhXaktBVkhtY0lNanFJU0xUbHpNaGRzZG0rT21mUTYKT3Z5WDd2MEdUT0JiaFAwOU5HY3d3NUEwZ0N6WE4xOEZTNW94bnhlNk9HOUQwd0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUFxTDFUV0kxUFRNbTNKYVhraGRUQmU4dHNrNytGc0hBRnpUY0JWQnNCOGRrSk5HaHhiCmRsdTdYSW0rQXlHVW4wajhzaXo4cW9qS2JPK3JFUFYvSW1USDVXN1EzNnJYU2Rndk5VV3BLcktJQzVTOFBVRjUKVDRwSCtscFlJbFFIblRhS011cUgzbk8zSTQwSWhFaFBhYTJ3QXd5MmtEbHo0NmZKY3I2YU16ajZaZzQzSjVVSwpaaWQrQlFzaVdBVWF1NVY3Q3BDN0dNQ3g0WWRPWldXc1QzZEFzdWc5aHZ3VGU4MWtLMUpvVEgwanV3UFRCSDB0CnhVZ1VWSVd5dXdlTTFVd1lGM244SG13cTZCNDZZbXVqaE1ES1QrM2xncVp0N2VaMVh2aWVMZEJSbFZRV3pPYS8KNlFZVGtycXdQWmlvS0lTdHJ4VkdZams0MHFFQ05vZENTQ0l3UkRnYm5RdWJSV3Jkc2x4aUl5YzVibEpOdU9WKwpqZ3Y1ZDJFZVVwd1VqdnBadUVWN0ZxUEtHUmdpRzBqZmw2UHNtczlnWVVYZCt5M3l0RzlIZW9ETm1MVFNUQkU0Cm5DUVhYOTM1UDIveE91b2s2Q3BpR3BQODlEWDd0OHlpd2s4TEZOblkzcnZ2NTBuVnk4a2VyVmRuZkhUbW9NWjkKL0lCZ29qU0lLb3Y0bG1QS2RnekZmaW16aGJzc1ZDYTRETy9MSWhURjdiUWJIMXV0L09xN25wZE9wTWpMWUlCRQo5bGFndlJWVFZGd1QvdXdyQ2NYSENiMjFiL3B1d1Y5NFNOWFZ3dDdCaGVGVEZCZHR4SnJSNGpqcjJUNW9kTGtYCjZuUWNZOFYyT1Q3S094bjBLVmM2cGwzc2FKVExtTCtILzNDdEFhbzlOdG11VURhcEtJTlJTVk55dmc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + config.StakingSignerKeyContentKey: "QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDE=", + }, + }, + { + Flags: FlagsMap{ + config.StakingTLSKeyContentKey: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS2dJQkFBS0NBZ0VBM1U2RWV0SjJ1amJrZllrZ0ZETXN6MXlUVEZsVUd5OGsrZjRtUW9pZHdRZUdGakFZClg4YVNWaVlsbDNEUHM1dzRLU2JzZ2hIb2VRNmVtV054WHU1T2g0YmVoWmhhUTQ4SjQ2VUpuWkJDbElOTkJ2aEoKVDA2ZjFMSnJEeS9pUUxNRDZnV1ZxVEFVdDNiRjBEYnB6RExUQnBhMytVdEplbDErTXU1SFB4azhXSUwzbG1mUwpiVWY1dGlqWHEvc0k0QVI2aWVLejZweHdkOUxBMkQyWVBkUXM5UUpQQVc5c05pTmRQTTBGWUNpdW5pS2pTbHlTCmYrS1lzMG1YVk9QZHRNRE1XQWFaM3d3SElaOFhvdlZQNzkwRzArM2lsQ05ueHEvb1Rsa3lGbFBOUGxYamdiZkcKdWorTEV2UUVJZWYraHZLc0pnS2p0MkVQWGNUZk5vcnZ4OS9qZmhobXZ6TksrYXhGcTg4Q0xoWWNDcjhrSEtUMApRWnRza0p3WjRTL1J1WkRlcCtEZ1ROQ3JJR2R3bUpSMlVvT0hsbkRZQWJkVmdoYkVITzczMUJXaVo2L0VEOTZSCmVGZitBYUpPOEV6dVJ6ZEFOUkdCRldMbCtkelJwNnlsWHZoWVBmc1REb0Z6MXBGRGFuYjZTUFdXMWQyZEV1Zk8KdXFoZC83d1dhNFlBcXluT1JQN3pIc1FVWHZDb3VNQ3FqalZZWXZWaGxIRVI3eFJJcEhQdTFhejQ5R0dodXMxUwozZnJqZ0NUZk5YeWpqMktPRitMR2F6TWoySmR6b3hyVElYMWVDcndtaXhLOVhianBKYWp1bkFyeGtQSWdKUzFjCnQxU2NsdVJpZEE5Q3NvZ0lxK0ZUWFFDU3FOV1lJaG9yeVMxcnBVMzQxSW53amZhTmRlUUlxaWNZcXNNQ0F3RUEKQVFLQ0FnQU5HVU9nSFdybmxLNHJlLzFKRk1wWEw2eU1QVkZNRnB0Q3JMZEpBdHNMZk0yRDdLN1VwR1V1OGkwUgpiSnp1alpXSllnTm5vM1cyREpaNGo3azdIREhMdGNEZitXZUdUaVlRc2trQ2FYSjNaZG9lU24zVVV0d0U4OWFBClhKNHdwQ2ZjSng1M21CL3h4L2JuWHdpeGpHU1BKRWFaVzhwcWtyUVFnYWYzNVI5OFFhd3oyOHRKcXBQdUl6YTQKdURBTFNsaVNacmV0Y0RyNzdKNTdiaEhmdnZvMk9qL0EzdjV4cWVBdjVCYW9YV0FRZmc1YUxXYUNhVUFPaEpHUApkYmsrcEphenN4aFNhbHpWc1p2dGlrV0Q5Zm9jZXgwSkZadGoyQytReTVpNlY1VnpWaFFVTG5OMXZLTVhxUmZCCmhnQzdyZ3RnYUpHV0hnbVJ6RUJGOHkxRUVFMWZvaGJvMnNxa0c0b016M2pCWjRvNE1BRFFjcGZLMnFjaGdybmsKT3hJUy91VThzemR1ODRpSDhzNkYvSGwxKzg3am5xNk85UmUwaU1TdXZ5VWJqQUVlOENtOVAvYTVNMVg5ZXl6dwpXU1hTUFpCd0tTUm9QM3d1eWNiRW9uVFdRblFIZHd5U1krSXZkdGdsaUVEaEtyVmJaR25rczV6bWFhSXlkVy95CkxTMlM5SlJNNVkrWHAwdlYzbkdsRWVoQ1VkclhvUTFEei9BaUhuV0hqYnhvQ0ZHdDBxTDZDT0p6aUFHZlVYS2EKY1E1aURkN3pjMkozbTJaNmM4Vzh4a1BKZSsxZG1OV2ZHSHJqYThEU0h0VGNEWTZBcWQ5OFZ1MG5pdThQQzdieApBdncrKzZKMndHN0xOODlyZ1IwdVA3YXM5Q3g0a0hIc09Gd3ArbEtPRFZlMmR3MHZBUUtDQVFFQTdtb05DalA2CjVQa1NrU05QaS9qdzFZL0ZDd0JvSkV6bDNRNWZ0d0dya1laRlJMQlR2Q0NsaTJqaGV4YUMwRDkreWpzVmFMLzIKVmFwNDMveGk1NWlwbmlxdjhjMVdBYzV4RmgrNmhDeWRTNks5b3dEeGxITjc1TUdMcm1yWWpZKzNhTWRvMTVEbQp4NWJ6bk9MTHlNVVc0QWsrNzdNVHcxMmZhZC83TDBBTlh1bUZGajZ5ZGNTOFBIbWhKbG16NVZlZ1d6NWIxS0dRCksvL3BoY3VPbTM0OXhla3Q3SjVrS1JiREVxTE9sWnYvRUlBZENCUU00VTNkNlAvMnZVVXk1bktZRzBGMXhlYUMKbGVWcHIxRVBvRUkrWGtUeStqam9hQnM3aVVIcGNEMzU5WFFDV0xuaXdmMVlmdHRrOXpKcDdtNnRSL0dlYWJsawp1bm5INXp5Rmt3emxRd0tDQVFFQTdhRnROc2pMMFVFWGx5QllqQ1JPb1B1NmthL280UXlFYVd2TUhjaVh1K0d2Ck03VFFDRjJpOW9lUVhBQkl5VE5vUXIrcE5qQVJib1k4cDArOVpmVjhRR2x2SDZhd1cyTU56RDA3bGc5aHdzalkKSk9DSTY0WHhaajE4M0doSGdOOS9jRTRQWEJyUUNxUExQQ0tkVjY2eUFSOVdObTlWYTNZOVhmL1J2Y29MaU5CMQpGQWc1YmhiTlFNblIzOG5QSnM5K3N1U3FZQjh4QURLdndtS0Vkb255K1dJTS9HUXlZWmlEbFhFajhFZldRb3VNCndBb2s2VnVoczZjdUxpSEh6WEZSNFk2UkNXUmIybmYyVnJ6V29wejJCcDAySWVIWTBVWnNaZUtucWhhOWR0VXUKWkNJdDJNWlVFTHhpaDlKUyt3ekNYOEJKazN4ZWRpODl6T1pLUng0TWdRS0NBUUVBeHFuVUo5WmNrSVFEdHJFbgp6Y2tvVmF5eFVwT0tOQVZuM1NYbkdBWHFReDhSaFVVdzRTaUxDWG5odWNGdVM3MDlGNkxZR2lzclJ3TUFLaFNUCkRjMG1PY2YwU0pjRHZnbWFMZ2RPVW1raXdTM2d1MzFEMEtIU2NUSGVCUDYvYUdhRFBHbzlzTExydXhETCtzVDUKYmxqYzBONmpkUFZSMkkraEVJWTFOcEEzRkFtZWZvVE1ERnBkU0Q5Snl6MGdMRkV5TEJYd1MyUTlVSXkwdUdxQQpjSTFuU0EwZjJYVzZuSXA5RG9CZmlFY3U2VDczOGcxVEZrTGVVUk5KTlRuK1NnemZOb2I3Ym1iQUZjdk9udW43CkRWMWx2d1BSUERSRFpNeWNkYWxZcmREWEFuTWlxWEJyeFo0b0tiMERpd0NWU0xzczVUQXZBb1licTA5akJncG0KZTd4WkpRS0NBUUVBM2Y3bDBiMXFzNVdVM1VtSmozckh2aHNOWTljcnZ6cjdaS1VoTGwzY2F0aGUzZlk0TnVpTApPcmJReFRJNnpVUnFUWmxTRWw1N21uNXJvWDZjR09scVo1NVlBd0N0VnVMRjNCMEVVcDhTSEcrWGhYUUNWYzF2CkJLM0N2UUhxY3RuWTYyanhib0ZhQSthYkVoWGdXaTdJK3NWMHZDdnNhQlV4SldTOVpBbWlGdkZ2dndRajZ0WUEKY0Z0YTV5OVlpQkJtYytldHgxaThaVXYwNktzeXhxNy9QNzA3Rm5yZ21rNXA5eTJZZm53T0RXTGpYZkRjSk9uRwp1ZGdnQzFiaG11c1hySm1NbzNLUFlSeWJGTk1ielJUSHZzd1Y2emRiWDc3anU1Y3dQWFU3RVEzOVpleU1XaXlHCkVwQjdtQm1FRGljUVczVi9CdnEwSU1MbmdFbFA4UHFBZ1FLQ0FRRUFxNEJFMVBGTjZoUU9xZTBtY084ZzltcXUKenhsMk1NMEtiMkFCRThmeFEydzRGeTdnNDJOb3pEVVcxMy9NTjdxMUkrQXdNaGJsNEliMlFJbUVNVHVGYUhQWQpBM09abG5FOUwwb2k0Rkkra0cyZUpPQi8rNXBIU3VmL2pyWi80Z0FSSyt1Yy9DRGVhSWxqUC9ueHcwY1grc0YrCkhqWDRPYjQvQ3lFSWVJVUdkT0dzN2c5a2Yrb2lyWHJ5dURjWnhsLzJmUU94cXZhOWRoaEJMaFBYRzNvdFNwMFQKRDkweEMxbFNQTElIZitWVWlGOWJMTXRVcDRtZUdjZ3dwWFBWalJWNWNibExyUDlQeGJldmxoRzJEM3ZuT0s5QQo4aldJOVAxdU5CRUFVVFNtWFY4cmVNWU95TlhKSDhZYmJUNHlpYXJXbmFRTTBKMGlwV3dYR0VlV2Fndi9hQT09Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==", + config.StakingCertContentKey: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TVRsYUdBOHpNREU1TURjeE1ERTIKTVRJeE9Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRGRUb1I2MG5hNk51UjlpU0FVTXl6UFhKTk1XVlFiTHlUNS9pWkNpSjNCQjRZV01CaGZ4cEpXSmlXWGNNK3oKbkRncEp1eUNFZWg1RHA2WlkzRmU3azZIaHQ2Rm1GcERqd25qcFFtZGtFS1VnMDBHK0VsUFRwL1VzbXNQTCtKQQpzd1BxQlpXcE1CUzNkc1hRTnVuTU10TUdscmY1UzBsNlhYNHk3a2MvR1R4WWd2ZVdaOUp0Ui9tMktOZXIrd2pnCkJIcUo0clBxbkhCMzBzRFlQWmc5MUN6MUFrOEJiMncySTEwOHpRVmdLSzZlSXFOS1hKSi80cGl6U1pkVTQ5MjAKd014WUJwbmZEQWNobnhlaTlVL3YzUWJUN2VLVUkyZkdyK2hPV1RJV1U4MCtWZU9CdDhhNlA0c1M5QVFoNS82Rwo4cXdtQXFPM1lROWR4TjgyaXUvSDMrTitHR2EvTTByNXJFV3J6d0l1Rmh3S3Z5UWNwUFJCbTJ5UW5CbmhMOUc1CmtONm40T0JNMEtzZ1ozQ1lsSFpTZzRlV2NOZ0J0MVdDRnNRYzd2ZlVGYUpucjhRUDNwRjRWLzRCb2s3d1RPNUgKTjBBMUVZRVZZdVg1M05HbnJLVmUrRmc5K3hNT2dYUFdrVU5xZHZwSTlaYlYzWjBTNTg2NnFGMy92QlpyaGdDcgpLYzVFL3ZNZXhCUmU4S2k0d0txT05WaGk5V0dVY1JIdkZFaWtjKzdWclBqMFlhRzZ6VkxkK3VPQUpOODFmS09QCllvNFg0c1pyTXlQWWwzT2pHdE1oZlY0S3ZDYUxFcjFkdU9rbHFPNmNDdkdROGlBbExWeTNWSnlXNUdKMEQwS3kKaUFpcjRWTmRBSktvMVpnaUdpdkpMV3VsVGZqVWlmQ045bzExNUFpcUp4aXF3d0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUNRT2R3RDdlUkl4QnZiUUhVYyttMFRSekVhMTdCQ2ZjazFZMld3TjNUWlhER1NrUFZFCjB1dWpBOFNMM3FpOC9DVExHUnFJOVUzZ1JaSmYrdEpQQkYvUDAyMVBFbXlhRlRTNGh0eGNEeFR4dVp2MmpDbzkKK1hoVUV5dlJXaXRUbW95MWVzcTNta290VlFIZVRtUXZ3Q3NRSkFoY3RWQS9oUmRKd21NUHMxQjhReE9VSTZCcQpTT0JIYTlDc1hJelZPRnY4RnFFOTFQWkEybnMzMHNLUVlycm5iSDk5YXBmRjVXZ2xMVW95UHd4ZjJlM0FBQ2g3CmJlRWRrNDVpdnZLd2k1Sms4bnI4NUtESFlQbHFrcjBiZDlFaGw4eHBsYU5CZE1QZVJ1ZnFCRGx6dGpjTEozd28KbW5ydDk1Z1FNZVNvTEhZM1VOc0lSamJqNDN6SW11N3E5di9ERDlwcFFwdTI2YVJEUm1CTmdMWkE5R001WG5iWgpSRmkzVnhMeXFhc0djU3phSHd6NWM3dk9CT2tPZGxxY1F6SVNSdldEeGlOMUhrQUwraGtpUUN1TWNoZ09SQWdNCnd6UG9vYThyZld0TElwT1hNcHd1VkdiLzhyR05MRVBvdm9DSzl6NmMrV1oremtSbzQrM1RRa09NWTY2WGh0N3IKQWhseTNsZXIrVHlnNmE1alhUOTJXS0MvTVhCWUF5MlpRTm95MjA0a05LZXZjSDdSMmNTa3hJVGQzbjVFYWNOeQo1TUF0Q05JazdKd2VMQ2g5ckxyTFVCdCtpNG40NHNQK0xWaGZXSGVtbmdBOENvRjRuNmVRMHBwMGl4WlRlbjBqCjR1TjBHMk5mK0plR01scW9PYkxXZElPZEgvcGJEcHBYR29aYUtLRGQ3K2JBNzRGbGU1VWg3KzFlM0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + config.StakingSignerKeyContentKey: "QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDI=", + }, + }, + { + Flags: FlagsMap{ + config.StakingTLSKeyContentKey: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKSndJQkFBS0NBZ0VBdkpsUTA2QjI1RkJkb0VYVlg2Y25XU3pTbmtOL0hlaDJSWkZETjFOWFZpMlJVbVRiCjJvRGZJWlVMMlA3VUx0d09OS2RmVm9DbTgxbWEzdDdZTlFKTVBDYzdYYy9ZSmVFVEo5ZU5pQ0R0d29zQ0tiWkIKTkNhS1cyWVpodVh3QXRwL3pGMExYNG5pTjRjM2tkT0MxdUNZQ3lzMzl6Wi9NV1haUVlDWHdaaSthbEZKeE96dQp5R3lSaHBtdW9FZzkzc0hyUk9pa0diVGJYOU1VU0w5UEhhR1NtTUU1ZWlMWkhSaXUrcm42cXRQZkJsY0Zyd05jCkVycDN1UGpCWUxQVGVIaFNZK2ljcmZPWDRpdjNNRDhleWtYWUdJQWM2MlVPQkQ3SVVaOEZxd1U0bmpqbWlid0EKTmtUTVRCR015a2F5UUFMR0NOYzhiVGNxaUZHZ1UzTVptcy9qVjFCbHZzQjhDRTRuTkJxZjEwSnRpOEsvNWNTRgplanB3SlM5d29HMGw0cTBwNkJ2bmgwUHJ3OHdRSEVRNUV3SUwveGxDTTVYNms5a3JVeXROcmlBWUNXTTd4VXh1CktaUjRyWEhFWDh2TU1iSWVXZk1SdmZaWmowRUxTN0o1VUFGZmk0dE9nbS9BaCtJVkZhSWxvbGJPVUFHb2FscHIKK1g2YUY5QkxOdWtjbEIxeG00eE50TnFjakRBTzQxSHExRlNqSW96cUJtYlhCK3N3ZUtmM1NkRTdVNkVjRG9LdwpPelFpUFFHRmJhbFY2MXh2N25ZL0xxOXE5VzAxY0NHeDdEbmt4RDk0R05KaDBWcHRoUDJ5NDdmYjIyQ0VWTVVmCk1WcHk1RzNnYzRqVThJUHQrbzNReGZ1b0x0M0ZNLzUyMHAvR0dQbU5ZNDE3ak41YWhmSWpHZ0tHNGJzQ0F3RUEKQVFLQ0FnQSt1SElUM3lLSzdWc2xxUE83K3Rmd0pTTHFOU0k2TFF2Z09OMzBzVWV6UmpZMUE0dkdEK09reEcrTApPN3dPMVduNEFzMkc5QVFSbS9RUU9HWUl3dm5kYTJLbjRTNU44cHN2UGRVNHQxSzZ4d1h5SDBWeDlYcy95Q1duCklpTCtuL0d1WWljZEg3cldvcVpOWGR6K1h2VFJpZzd6clBFQjJaQTE0M0VVbGhxRk93RmdkemMxK2owdldUNmsKMlVHU0trVjJ4ak9FeFF2THcyUFVpYUxqQk0rKzgwdU5IYmU4b0cvWXZDN3J6c2cxMEl6NFZoS3h1OGVEQVY4MgpMTGVnTWN1Y3BFZ3U1WHJXWWE2MElkbTRoUi9Iamh1UUFTeDNKdlh4aHdRWWl3VDRRWTRSc2k4VDNTOWdBTm9rCmp2eEtvMkYrb1MzY1dHTlJzR3UwTk93SCt5anNWeU1ZYXpjTE9VZXNBQWU4NXR0WGdZcjAyK1ovdU1ueHF0T0YKZ2pJSFkzWDVRWmJENGw0Z2J3eCtQTGJqc2o0S0M2cjN5WnJyNTFQZExVckJ2b3FCaHF3dUNrc2RhTW50V0dNRQp1MFYvb29KaTQrdXpDWXpOMDZqRmZBRlhhMnBXelZCNXlLdzFkNnlZaTlVL2JQZDR4bjFwaExVTUhyQzJidmRNCkg4UDE4Z0FTNnJrV24rYWdlaVdSSG1rZjR1b0tndjNQck1qaWprQmFHcGY2eGp2NiswUTM5M2pkVklDN3dnSlYKOFcwaTFmMUF3djY4MDg5bUhCRWFyUFR2M2d6MzkyNTFXRkNQTlFoRXVTeTc5TGk1emp3T3ByWlhTME1uSlhibQpCMDBJUFRJdzUxS3Vhb3VlV3pZMXBBMklGUS8wc0gzZm8ySmhEMHBwMWdJMERkZTdFUUtDQVFFQTdSVmdOZWxrCjNIM1pKc29PZk9URmEwM2xuWHVFZlRBeXhoRUVDUno2NGs1NHBTYkVXVjczUEllWUJ5WnZuc3JLUnlkcFlXVVAKQ3AvbUtoQUpINFVDZjJoelkwR3lPNy9ENitIRUtaZENnNmEwRE5LY2tBb0ZrQmZlT2xMSkxqTFZBVzJlRVZ4egp0bEZ0Ky9XQkU5MEdDdkU1b3ZYdUVoWEdhUHhDUHA1Z2lJTjVwaFN6U0Q1NTdid3dPeVB3TktGWjdBbzc3VU5LCmt6NkV6Y3ZRZ3FiMjA1U1JSS0dwUzgvVC85TGNMc1VZVmtCZllRL0JheWpmZk8rY1FGNHZINXJCNHgvOC9UN3QKdVVhNzl1WStMZUdIZ1RTRklBdWk5TEVLNXJ5Ly8yaERKSU5zSXRZTWtzMVFvNFN1dTIzcE91R2VyamlGVEtXbAptT0lvRm1QbWJlYkFjd0tDQVFFQXk2V2FKY3pQY0tRUS9ocWdsUXR4VTZWZlA0OVpqVUtrb2krT0NtdHZLRHRzCjdwSmZJa3VsdWhuWUdlcUZmam5vcHc1YmdaSE5GbTZHY2NjNUN3QnRONldrMUtubk9nRElnM2tZQ3JmanRLeS8KQlNTVjNrTEVCdmhlOUVKQTU2bUZnUDdSdWZNYkhUWGhYUEdMa2dFN0pCWmoyRUt4cDFxR1lZVlplc1RNRndETQpLRUh3eklHY0ZreVpzZDJqcHR5TFlxY2ZES3pUSG1GR2N3MW1kdExXQVVkcHYzeHJTM0d2ckNiVU1xSW9kalJkCnFrcmcvZC9rUXBLN0Ezb0xPV2ZhNmVCUTJCWHFhV0IxeDEzYnpKMldsc2h4SkFaMXAxb3pLaWk1QlE5cnZ3V28KbXVJNXZkN282QTlYc2w4UXpsdVNTU1BpK05oalo2NGdNQnJYY2lSdm1RS0NBUUIvZEI1azNUUDcxU3dJVGxlNwpqTUVWRHF1Q0hnVDd5QTJEcldJZUJCWmIweFBJdFM2WlhSUk0xaGhFdjhVQitNTUZ2WXBKY2FyRWEzR3c2eTM4ClkrVVQyWE11eVFLb1hFOVhYK2UwOUR3dHlsREJFL2hXOXd4R2lvNU5qSFBiQWpqQXE4MXVSK1ZzL2huQ2Voa0sKTktncStjT2lkOU9rcFZBazRIZzhjYWd6dTNxS2JsWnpZQ0xzUzE4aWJBK1dPNmU3M1VTYUtMTE90YTF2ZFVLQworbjkyLzBlWlBjOWxralRHTXZWcnIwbUdGTlV4dU9haVZUYlFVNEFNbXBWNnlCZXpvbDYvUmpWR2hXQkhPei95CktteE9hWTJuekptdU1mOUtTKzVyd0FGWWY4NkNhOUFXbTRuZVhsWVJMT1ZWWWpXTU01WjF2aGRvT1N5VDNPRGoKOUVsQkFvSUJBR0NSUGFCeEYyajlrOFU3RVN5OENWZzEwZzNNeHhWU0pjbDJyVzlKZEtOcVVvUnF5a3Z6L1RsYgphZnNZRjRjOHBKTWJIczg1T1R4SzJ0djNNWmlDOGtkeDk5Q1VaTDQvZ3RXOVJXWkh2dVY5Q1BQQ1hvTFB2QzdsCjlmanp0ZDFrcUpiN3ZxM2psdGJxSnR5dytaTVpuRmJIZXo4Z21TZVhxS056M1hOM0FLUmp6MnZEb1JFSTRPQSsKSUorVVR6Y2YyOFRESk5rWTF0L1FGdDBWM0tHNTVwc2lwd1dUVlRtb1JqcG5DemFiYUg1czVJR05FbFd3cG9mZgpGbWxXcFIzcW5vZEt4R3RETVM0WS9LQzJaRFVLQVUrczZ1Ry9ZbWtpUDZMZFBxY2tvZDRxSzhLT1JmMUFSOGRMCkJ6WGhHSklTSURNb25rZU1MTThNWmQwSnpXSWwzdmtDZ2dFQVBCa0V4ZDJqNFZZNXMrd1FKZGlNdG81RERvY2kKa0FFSXZJa0pZOUkrUHQybHBpblFLQWNBQVhidnVlYUprSnBxMzFmNlk2NnVvazhRbkQwOWJJUUNBQmpqbEl2ZQpvN3FRK0g4L2lxSFFYMW5iSER6SW5hRGRhZDNqWXRrV1VIakhQYUtnMi9rdHlOa0Z0bFNIc2t2dkNFVnc1YWp1CjgwUTN0UnBRRzlQZTRaUmpLRXpOSXBNWGZRa3NGSDBLd2p3QVZLd1lKTHFaeHRORVlvazRkcGVmU0lzbkgvclgKcHdLL3B5QnJGcXhVNlBVUlVMVUp1THFSbGFJUlhBVTMxUm1Kc1ZzMkpibUk3Q2J0ajJUbXFBT3hzTHNpNVVlSgpjWnhjVEF1WUNOWU11ODhrdEh1bDhZSmRCRjNyUUtVT25zZ1cxY3g3SDZMR2J1UFpUcGc4U2J5bHR3PT0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K", + config.StakingCertContentKey: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpKYUdBOHpNREU1TURjeE1ERTIKTVRJeU1sb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRQzhtVkRUb0hia1VGMmdSZFZmcHlkWkxOS2VRMzhkNkhaRmtVTTNVMWRXTFpGU1pOdmFnTjhobFF2WS90UXUKM0E0MHAxOVdnS2J6V1pyZTN0ZzFBa3c4Snp0ZHo5Z2w0Uk1uMTQySUlPM0Npd0lwdGtFMEpvcGJaaG1HNWZBQwoybi9NWFF0ZmllSTNoemVSMDRMVzRKZ0xLemYzTm44eFpkbEJnSmZCbUw1cVVVbkU3TzdJYkpHR21hNmdTRDNlCndldEU2S1FadE50ZjB4Ukl2MDhkb1pLWXdUbDZJdGtkR0s3NnVmcXEwOThHVndXdkExd1N1bmU0K01GZ3M5TjQKZUZKajZKeXQ4NWZpSy9jd1B4N0tSZGdZZ0J6clpRNEVQc2hSbndXckJUaWVPT2FKdkFBMlJNeE1FWXpLUnJKQQpBc1lJMXp4dE55cUlVYUJUY3htYXorTlhVR1crd0h3SVRpYzBHcC9YUW0yTHdyL2x4SVY2T25BbEwzQ2diU1hpCnJTbm9HK2VIUSt2RHpCQWNSRGtUQWd2L0dVSXpsZnFUMlN0VEswMnVJQmdKWXp2RlRHNHBsSGl0Y2NSZnk4d3gKc2g1Wjh4Rzk5bG1QUVF0THNubFFBVitMaTA2Q2I4Q0g0aFVWb2lXaVZzNVFBYWhxV212NWZwb1gwRXMyNlJ5VQpIWEdiakUyMDJweU1NQTdqVWVyVVZLTWlqT29HWnRjSDZ6QjRwL2RKMFR0VG9Sd09nckE3TkNJOUFZVnRxVlhyClhHL3Vkajh1cjJyMWJUVndJYkhzT2VURVAzZ1kwbUhSV20yRS9iTGp0OXZiWUlSVXhSOHhXbkxrYmVCemlOVHcKZyszNmpkREYrNmd1M2NVei9uYlNuOFlZK1kxampYdU0zbHFGOGlNYUFvYmh1d0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUFlMmtDMEhqS1pVK2RsblUyUmxmQnBCNFFnenpyRkU1TjlBOEYxTWxFNHZWM0F6Q2cxClJWZEhQdm5pWHpkTmhEaWlmbEswbC9jbnJGdjJYMVR6WU1yckE2NzcvdXNIZjJCdzB4am0vaXBIT3Q1Vis0VE4KbVpBSUE0SVBsMDlnUDI4SVpMYzl4U3VxNEZvSGVNOE9UeGh0dE9sSU5ocXBHOVA1ZDZiUGV6VzZaekkzQ2RQUApDRjY5eEs0R0Zsai9OUW5Bb0ZvZ2lkNG9qWVlOVGovY000UFlRVTJLYnJsekx5UHVVay9DZ3dlZlhMTUg4Ny9ICmUza1BEZXY4MFRqdjJQbTVuRDkzN2ZaZmdyRW95b2xLeGlSVmNmWlZNeFI3cWhQaGl6anVlRDBEQWtmUUlzN0wKWVZTeXgvcWpFdjJiQllhaW01UlFha1VlSFIxWHU1WGovazV6cjMzdDk3OWVkZTUwYnlRcmNXbTRINUp4bkVwRApKeEpuRmZET1U2bzE0U0tHSFNyYW81WjRDM2RJNTVETTg0V0xBU25sTUk1Qks0WHRTM25vdExOekc4ZGZXV2hUCjltMEhjcnkrd1BORGNHcjhNdGoxbG9zLzBiTURxTUhDNGpjRlcxaHJYQ1VVczlSWXpFK04veG9xd0NRU2dOMVAKRTczdVhUeVNXajVvdk1SNVRQRjZQaGNmdExCL096aXFPN0Z2ZXJFQnB2R0dIVUFuVVQ2MUp0am9kalhQYkVkagowVmd5TU9CWTJ5NTNIVFhueDNkeGVGWmtVZFJYL1ZaWXk4dE1LM01UWSs3VUlVNWNXWW5DWkFvNUxOY2MwdWtSClM2V1M5KzZlYVE2WFJqaGZOVWp4OWE3RnpxYXBXZHRUZWRwaXBtQlAxTmphcDNnMjlpVXVWbkxRZWc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + config.StakingSignerKeyContentKey: "QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDM=", + }, + }, + { + Flags: FlagsMap{ + config.StakingTLSKeyContentKey: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBMlp3NkF4eE5wNC9Oc1E0eDlFMit6bHpLa0F4OC9zMnk0bmJlakVpTlZ3ZDd6Z0NCCklqNnE3WHB4cDZrVzFSWEp1Um1jYjhZZUdFQS9Bb3lrcE5hb0dTWE03TkF6MG9oa1BCSDVlRHhqdU1aeEc1WlkKQzl6MUVUMXFlNWhGZDZJZW44U0FvRUNrd1pVK3U5Ukp4Y3dlWmNpVnFicGtjN3dOYjhvUVRMR1BHanZzWGF1UwoxQlpiV013WGI2WUMxMVdnOEIyVU5qMEJGclJkSGRDVHRqZEZoUlF4alpzZXAvS1NrbmlBRlE3RThHUW1TdmhRCm5uaE1xQm4xN0NEYkhOTWdqRUxKV3NKSHpqS2dQY2dWZHlZdWtKaFdDc2htTGV1VEZoamFqd09GNnlNcFlpc0EKZ2w4TS9abzJsc2plNndNc1BYRnBTekRnelpnemtWc3hrV2VkdmREQkFCQ0diS28yQjFZclVVelAwWnpTR25VNgpHUmp0TXViZ0pETmVISms3NTdDdFFqL2Rxclgwb3JLdStWdnlQZFhhMDRhQkxVeXUvRkJqcGYrVU5XQVVkUEJmCnJVbEZtVnNoM2lPbERnVkFoZ3Rtd3Jac3lqSmU3MU51WlJ1cG13a2RTYmtJdWtXS1ozekkxeDkrcFZlRXUzMDcKMHNNTGtkTjRkaGR4OVhoWkZqUnRwV3hhMXRIY3d3ajRONVY4SmhVT3VZVkhFSVhMYzU2QWMrUXRaUmRubHROOApMbFlNWXA1N2xaU1I0OHZ4cTZwbHJBRXA5VHc3bGJPSk81T2d5WDhNaHdJZ05Mc1Y2eG44V0w2ZXgvSStQdG1ECnBsb2xwZWNXSUt4QTQ3cStaL0djZU9qeDBIcTlnTDZ0N0d4QmhIdFRBclZ5Wlk5M2EzZVI0cHQ1dmpVQ0F3RUEKQVFLQ0FnQk1vQk5aWnd6OUZNa0VNSkJzaXpmRjZLeTNQbjZCSnFOMzFRMldiakcrMUhiRzJpeWVoMXllMUwvUwpudHJZVzV5MW5nd1UyN2xiSnJ4SlJJYnhPRmpteWdXMzJiUjF6T3NtcjltZGVmNVBZU2tRNHNiTUhwajQ0aHh0CnV2ZXpJWllSQWh1YzBrWnhtQUVJR0wrRmM5TzhXWDVCenMxeVoyUi8yYklWbjJ4WmU0SkdsWlRWTTY0a3ZYRC8KTW9ETG5HNVlQc0lpdXlaMy9UalF0OUpibG1qWGJIM3FkQlcrWTg4eTNsV1RsS2pLVVNtZXVvT0EyYkY4ZSsrNQpudlFvMlRzYnlLU29YY0wxRzZTTFBMbzZRMnFnSmRRZVplUjlCUGU5RHpGZXJJbnFlMjRtRUNoVXYrMk9HMUJmCmxnblF6VVExdW9xdUhGNzhaank2VVZkSjhTZDh1ZnZLQzlyejhKWXNJeW5mdzBnUUMzRjgvZW1tMVFTYWJGdlkKdEc0K3gwSzhGZ3JpampFMDhSdnFnSW5keDlmdENOb040dTNsWHhQckpoS3ByMnh1WFNhNFZaYnVtZ043ZnFXeApVQkM4bG1QUWk1VlptajNuSmZqNGRhdG1CVHZzMWRPTFJNZGZkdFRGeitjQWRXTlp4WDNIT0xaVVNxTVZXZ1hZCmtYMHM3SVY5R255VW50Qmt0WCtJRWJXbEF0dHpsZHlxRjltZDRhdmpLWFErWTRQSy9zUjF5V3N1dnRpWmRZVUwKL1FyUUhYMENzVnYxaFJjWDB5ZWtBMGE4cXdhR214RWNuZEVLdjd3RjFpNjI2amMyZkRSNnFJMXlwMjBYbDNTaQprWUJTTmg3VksyMTBYSWhkZFN1VnhXNS9neU5uRkFCRGZwMWJTZFRoNVpKUmZOdnRRUUtDQVFFQTlaaXBueXU4CkpLbEx0V3JoMmlQOXBtRkJhZVVnb0Y2OElWZmQ2SVhkVjduQUhTV3lzaUM0M1NDVW1JNjN4bHVNWUVKRmRBWisKbS9pUmMvbjZWRktFQlc0K1VqazlWVzFFMWlxSGdTQUJnN250RXNCMk1EY1lZMHFLc040Q1lqQzJmTllPOTd6Sgo1b2p1ODRVM1FuOFRXTmtNc3JVVTdjcm0yb0FRZDA4QWl6VkZxTG8xZDhhSXpScSt0bDk1MlMvbGhmWEtjL1A5CmtmaGwrUktqaVlDMnpiV25HaW54YzJOYmY1cFd3bm10U3JjZW5nK1prZ1ZmU0IzSHZTY2txekVOeWU5WWtwVk0KR0UrS2pFZHNzK1FuR1FSV00ySlBseW9ZRG1oVDZycmFzUlQ2VEtzZWN3bzFyUlhCaTRDMWVUWlFTblpmMjRPZwpRdXJTLy9Yekh6Ym5rUUtDQVFFQTR0UVNtYUpQWk5XWXhPSG5saXZ6c2VsZkpZeWc1SnlKVk1RV3cvN0N2VGNWCkdPcG9ENGhDMjVwdUFuaVQxVS9SeWFZYVVGWitJcDJGaEsyUWNlK3Vza05ndHFGTjlwaGgvRGVPMWc4Q1NhSWUKNkVidGc4TjhHTGMwWWhkaUp0ZDJYR3JrdGoyWHRoTUw3T0pQWUlpZGQ0OHRHdVFpemZpam80RmUxUzByU1c1NgpCNFJIVGgvTzZhMHRhTmVGYm5aUUpENTJoYTl3bG5jL1BaU0NVTWI5QzBkMDhkU3hkQlFWK1NWZEdybC9JUmZDCnFISG9DODZHWURjbW52aUQ1Q0ZPeHB4N0FKL2hRQXdQRlFSQ25XR0h3RGpwY29NT3RrdHlvN3BqOU1EdXpCVWIKa3I0cjFlaThmN1BDOWRtU1ltWXpKTVF4TGZ6K1RpMlN5eU9tZE0xQ1pRS0NBUUVBc1ZyNGl6Q0xJcko3TU55cAprdDFRekRrSmd3NXEvRVROZVFxNS9yUEUveGZ0eTE2dzUvL0hZRENwL20xNSt5MmJkdHdFeWQveXlIRzlvRklTClc1aG5MSURMVXBkeFdtS1pSa3ZhSlA1VythaG5zcFg0QTZPVjRnWXZsOEFMV3Bzdy9YK2J1WDNGRTgwcE9nU20KdmtlRVVqSVVBRzNTV2xLZldZVUgzeERYSkxCb3lJc0lGNkh3b3FWQXVmVEN5bnZUTldVbE9ZMG1QYVp6QldaWApZUEhwa1M0d0tTM0c1bndHMUdSQmFSbHpjalJCVVFXVThpVWRCTGcweUwwZXR0MnF4bndvcTFwVFpHNzBiNDhZCnllUGw5Q1AwbUJEVHh5Y256aWU3Q2hTNzN3dDJJYTJsUkpCSDZPR0FMbHpaTUZwdnF3WkcvUC9WMk4wNVdJeGwKY05JMmNRS0NBUUVBb3lzN1ZobFVVNHp6b0cyQlVwMjdhRGdnb2JwUDR5UllCZ29vOWtURmdhZW1IWTVCM1NxQQpMY2toYWRXalFzZHdla1pxbDNBZ3ZIWGtIbFZjbXhsMzZmUmVGZ0pqT3dqVE04UWpsQWluOUtBUzY3UmFGM2NBClJpZEVIMndDeHo0bmZzUEdVdkpydUNaclpiUkd0WUtSQS9pUzBjMWEzQ0FJVnc0eFVkaDBVeGFONGVwZUFPMFEKd3pnNGVqclBXVzd5cDUvblVyT3BvaE9XQW81YVVCRlU1bEE0NTkzQTZXZXBodGhCNlgrVzNBOWprQmlnZkIzTQp2Rm53Qmx0dlJTUlFycjdTSE5qbUNGU2taTkh6dVpMM1BHZTBSeFBQK1lLOHJOcmdIS2pOSHpIdjY5ZXhZT2RTCjhlbzJUUFIrUVJxVG45Y2lLWnJjdFJCRGtLM01pQ2svb1FLQ0FRQVpJWmRrT0NsVVBIZlNrNHg1bkJYYXNoS1kKZ0R6ZXlZSFlMd05hQmRFS2dITnVmNmpDbHRLV29EelpzcXJ2MU55YS8xNDhzVGdTVGc5MzFiYmNoK2xuSEtKZApjWHJDUVpXQm51MlVxdWlzRk1lTk92cHAwY1B0NHRJWURaVkNSTVJyd0lsWnFJSnhiMm5Bd0Z2YjBmRWZMays0CmdtdSszY0NhTi92UzNvSkE5RUZremp4RzBYaUxPeW55QVpiNWZZMDRObUZPSXNxM3JnVDREZUN1ckhUS3RPSjIKdDE0b1ROcTA2TEQ1NjZPblQ2cGxMN3ZhTHRUUi85L3FKYzAwN1dqdzhRZGJUdVFBTHFDaldXZzJiN0JWa095UgpvOUdyaFB6U2VUNm5CSEk4RW9KdjBueGVRV05EWDlwWmlXLzFuc3l1QUFGSjlJU2JEV2p6L1R3QjE3VUwKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K", + config.StakingCertContentKey: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpWYUdBOHpNREU1TURjeE1ERTIKTVRJeU5Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRFpuRG9ESEUybmo4MnhEakgwVGI3T1hNcVFESHoremJMaWR0Nk1TSTFYQjN2T0FJRWlQcXJ0ZW5HbnFSYlYKRmNtNUdaeHZ4aDRZUUQ4Q2pLU2sxcWdaSmN6czBEUFNpR1E4RWZsNFBHTzR4bkVibGxnTDNQVVJQV3A3bUVWMwpvaDZmeElDZ1FLVEJsVDY3MUVuRnpCNWx5SldwdW1SenZBMXZ5aEJNc1k4YU8reGRxNUxVRmx0WXpCZHZwZ0xYClZhRHdIWlEyUFFFV3RGMGQwSk8yTjBXRkZER05teDZuOHBLU2VJQVZEc1R3WkNaSytGQ2VlRXlvR2ZYc0lOc2MKMHlDTVFzbGF3a2ZPTXFBOXlCVjNKaTZRbUZZS3lHWXQ2NU1XR05xUEE0WHJJeWxpS3dDQ1h3ejltamFXeU43cgpBeXc5Y1dsTE1PRE5tRE9SV3pHUlo1MjkwTUVBRUlac3FqWUhWaXRSVE0vUm5OSWFkVG9aR08weTV1QWtNMTRjCm1Udm5zSzFDUDkycXRmU2lzcTc1Vy9JOTFkclRob0V0VEs3OFVHT2wvNVExWUJSMDhGK3RTVVdaV3lIZUk2VU8KQlVDR0MyYkN0bXpLTWw3dlUyNWxHNm1iQ1IxSnVRaTZSWXBuZk1qWEgzNmxWNFM3ZlR2U3d3dVIwM2gyRjNIMQplRmtXTkcybGJGclcwZHpEQ1BnM2xYd21GUTY1aFVjUWhjdHpub0J6NUMxbEYyZVcwM3d1Vmd4aW5udVZsSkhqCnkvR3JxbVdzQVNuMVBEdVZzNGs3azZESmZ3eUhBaUEwdXhYckdmeFl2cDdIOGo0KzJZT21XaVdsNXhZZ3JFRGoKdXI1bjhaeDQ2UEhRZXIyQXZxM3NiRUdFZTFNQ3RYSmxqM2RyZDVIaW0zbStOUUlEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUE0MGF4MGRBTXJiV2lrYUo1czZramFHa1BrWXV4SE5KYncwNDdEbzBoancrbmNYc3hjClFESG1XY29ISHBnTVFDeDArdnA4eStvS1o0cG5xTmZHU3VPVG83L2wwNW9RVy9OYld3OW1Id1RpTE1lSTE4L3gKQXkrNUxwT2FzdytvbXFXTGJkYmJXcUwwby9SdnRCZEsycmtjSHpUVnpFQ2dHU294VUZmWkQrY2syb2RwSCthUgpzUVZ1ODZBWlZmY2xOMm1qTXlGU3FNSXRxUmNWdzdycXIzWHk2RmNnUlFQeWtVbnBndUNFZ2NjOWM1NGMxbFE5ClpwZGR0NGV6WTdjVGRrODZvaDd5QThRRmNodnRFOVpiNWRKNVZ1OWJkeTlpZzFreXNjUFRtK1NleWhYUmNoVW8KcWw0SC9jekdCVk1IVVk0MXdZMlZGejdIaXRFQ2NUQUlwUzZRdmN4eGdZZXZHTmpaWnh5WnZFQThTWXBMTVp5YgpvbWs0ZW5EVExkL3hLMXlGN1ZGb2RUREV5cTYzSUFtME5UUVpVVnZJRGZKZXV6dU56NTV1eGdkVXEyUkxwYUplCjBidnJ0OU9ieitmNWoyam9uYjJlMEJ1dWN3U2RUeUZYa1VDeE1XK3BpSVVHa3lyZ3VBaGxjSG9oRExFbzJ1Qi8KaVE0Zm9zR3Fxc2w0N2IrVGV6VDVwU1NibGtnVWppd3o2ZURwTTRsUXB4MjJNeHNIVmx4RkhyY0JObTBUZDkydgpGaXhybWxsYW1BWmJFejF0Qi8vMGJpcEthT09adWhBTkpmcmdOOEJDNnYyYWhsNC9TQnV1dDA5YTBBenl4cXBwCnVDc3lUbmZORWQxVzZjNm5vYXEyNHMrN1c3S0tMSWVrdU5uMU51bm5IcUtxcmlFdUgxeGx4eFBqWUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + config.StakingSignerKeyContentKey: "QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDQ=", + }, + }, + { + Flags: FlagsMap{ + config.StakingTLSKeyContentKey: "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBNEN1YStiM1I3U1JSSU1PNFJoUDVjeW1oM0w4TTY5OW1xNXF0SlBiV0hJU1YzMDk1Ck5ZOVpWN1FRSFhGRGQ0RWtTbzRNeC83NDI1OUhtdHdJZ3dsSGRqSEpQNTlDbkNXZU4wdVNPMll6KzYyMnI5dmUKbXlnbWRSNis2K1lGMkJZWlB0MmxMRlh3MUtnRHdjTWovQ25sZDlRRVZRUDdQbGozbHFyT3ZVRjAzY2liZ1dCTwphYStOTDlVbi9ubTFDSjJkKzdobU41SWp0Qk1meVlIS0VMQmljNjFnLzg3VzBRaFNMNjNUU1dYZEtScnpKWmt5CnFlMTdERWQzS2I2cE8rOVAvekN3bmNobHY5U0FLMDByZzliM2U1Z1Y5U2NsVnlzbkdEZEx5RHJXNE1UemhiclYKTXFnem5qKzZPZGF1SWowdDNRa1lINXAvNFZpOE12Z3JTWHRBUmlyRytMWk5MU3NhTkxIdkwvU1FQSmxMWjVzNQpqTGVHUFk0b3pxd2pnZTE4RmlwaVdCSnZEZVZHM011bktkcUMvTDBGYmM4OXoxNjVCYmZ0M0JiZFk2NDNhVUdJCkIrSThwSkZnM2pveEg4dFJRc003RFViblMwN3k0WHk1L092MlUrSEMyZlNTTy8xbHRDRHd2QlhPNVljNXlmbXkKRUZIaEozV2huWUZaTWJoSU5ETVhheks5STFOQ0JhRjhra1Z6VWMzQTFtVW9SSXdVNys3WnhjUzVxRFZQcmlXLwp5cVRCK3JFV0l0ODdQT1huR2pDamNodml3NWdXb1ZKUjhlWVNnQitQVHZQT2VmZ0RJYklQZFRqV1NaWFkwRjlLCkdheUNzeXVRYWJLbms1ZUhObUxmM3dDNG1XbmhaTmFRUkxMMVhqRGdRaUxsaFBtK0ZDQkdpd0RvY1g4Q0F3RUEKQVFLQ0FnRUFwdU1QcnhtSDdYbjZBK0J4a1lwUlRWRVROWm50N3JRVVpYRHpzZThwbTNXQmRneGVlbWRMNWlVaApVaW4rUmp1WVh3Qzl0eTYwNmh2OFhPZXVWbzlUNmtSS1JOazE1N1dCd2p5Nmt3b1ZiU3I0TkpnRmM1RkNnREx4CmhBRnRIRi9uVDR3RzZhalpjQmZkSkNVNDV3UHgxM0c1LytqRTVMZXJLem5pUzdjdFgrZDNEYXc2OUNkRGZ2YTcKblpIU0dxWHM5WGRrY2I2VVlmMVN6dHVYS1RHSE9nTTdrWFhWS3kxOHNnNUFuQVgvemhoSUtCZVRSanFNUHFuOQpwdEJRZ1ZRNlJBdGxrVEdkdm1CZlF0MWlwZllsckplZTBUSGhkTEdsbXp1ZmFXT1VrU1ZPL3FJSEVuMXlZRCtsClRtWHFvWWJXWEJYbkpiQUp3Q1FsaC9TRmxXRHlpV1dPeHN6eGR3d1QyeWJ3N09SM2EwREVWME1iS0prVWV4eUYKOTJMcjNxb0JTWlJGUW5YVnZCZ2pRT3duekVGcGgxQU51R1kzb2RMOEpTTTF0SG5pSXNDczRXaERQT3NiQWoraAprd1M1MWNvbE1rM2JOQ1ozeGVBcmpNTEJWTGdUN3hMWC83WlljNy9vVEVGV2lrKzIwVHZTRVd6ZEUxTi80Z2ZKCmpFVS9WcXJuTmp5ZXYydzlBazZiRWt3WkZMUzZWWjlyVFdURjlqazhDMWFYai9SaGZhYUMzM3hYQmJobjlIdVgKbFR1L0phTE1wMFFjNGFDbHFVWU02TGx4SWVqSDViOGZJeENOSEppc2xYSkRhNmE2YVFsODVCaVFPRFBGeFZUNQpXQ3BRRDQ4NThFdUxkWDRCUlcyZklHUlk2RGl2UjZ1SlJBbXhMZitFd0FnL3JnVHpVc0VDZ2dFQkFQU2tIWDVGCkJoUmd1ZEYwTW53Titlbmo0U29YSGhSRytEVG9yeE8xWmgycU45bG5YTzluTUtNQ1hWSkxJVnZHRnVpTVJTSjAKVktmMXUwVXFhQkYwMk1iSXZiZWk3bXpra1cwLzc0bTA0WDM3aXlNbXRubW9vUTBHRVY4NG9PTndBdDNEZWVUZwp2SXBPdHE5VjI2WEhHYVFEeGNSRk1GQnVEMDJhMnlmM0pZa1hqNzRpMnNjTVA0eHhNSE1rSnhHSzlGU0JPaG5wCmsvcDBoTWwzRlZHZm81TnM1VDFSbDNwTXVlRUYzQjUrQnZyVjF6MTRJTi8wbHd1aHVqclVVWVM0RXcrUGs1ekMKRlN1YmZJUU1xU1QxanZYWFRhR2dYMEdQZmZhNGx4Z2FERUFUTGV3dkwzRmp5MjdYemw1N2k5WnZUTkM0eUZhZAo0b2tqci9lSXRIdEtWSEVDZ2dFQkFPcVVLd3cvNnVpSk1OdmMyTnhMVU14dXpCMDdycU9aS1QyVk1Ca0c1R3prCnY4MWZEdGxuZEQ4Y3dIU3FPTEtzY0gvUUtYRDdXSzNGQ3V2WlN2TXdDakVCNFBwMXpnd0pvQmV4dVh2RkREYnMKMFQ3N1Fpd2UrMldtUklpWWV2NWFSRzNsbkJNTThSRFMvUVB6RWRveEhkenJGVVJZVmwwcnY1bC83cndCMlpkNgp4QVlIY1VwWmM0WmF5c0VncVFDdVpRcUM3TXJxN3FmQnlVdGhIMjhZaWN6MTk3OGZwRTNkeDE1Y2VxalU5akJRCnhVVXdiZUtUL1VrUVF2bVlIZHRnd0VqaHpWUUwxT0FBV2tUNlJzc01xeDJSQWRpMFNxV1BGRWh4TlBIQnBHOUIKbEtVREJCSU02ZHU5MTZPbjBCamdoaDNXaHhRS3BUSXp2ZU5BaWV4YlhPOENnZ0VCQU52Sm9oR3ljMzdWVTd3ZwoxOFpxVEEvY3dvc3REOElKN0s2a0tiN2NKeTBabzJsM21xQWZKaXdkVUxoQmRXdmRNUEdtSytxRGR4Y2JCeTloCnBQT2g5YXZKNStCV3lqd2NzYWJrWFJGcjUzWm5DcDcvQmN1Uk8zZlc3cjZNd3NieStEQkNrWDJXaHV6L1FOT1AKb0hGMHljMTM4aktlTW9UZ0RIR2RZYTJyTmhiUGl6MjRWTE9saG1abnZxNkRXWEpDVTdha0R3Mytzd3E5cWhyUwpHTjRuUFMrVEV2VWZHNmN0ellXajNSbXNBaHRUQ1RoWmQ3ZWRLQ0swSHZzQmkyZGdkUWR5NTV4YkplZnlubENJCmkySUFGM3M0L3E3cHhRckNudG1OQjNvSTFONndISDduK1lpMnJxc2J5WFZMSzl2d1RLUHNqMWg2S204cEY4dWQKRHdFQlM1RUNnZ0VBTW5xMkZNbkFiRS94Z3E2d3dCODVBUFVxMlhPWmJqMHNZY016K1g3Qk15bTZtS0JIR3NPbgpnVmxYbFFONGRnS2pwdTJOclhGNU1OUEJPT1dtdWxSeExRQ2hnR1JQZGNtd2VNalhDR3ByNlhubXdXM2lYSXBDClFTcVpmdWVKT0NrR3BydU5iWkFRWkRWekd5RjRpd0tjMFlpSktBNzJidEJXUjlyKzdkaGNFYnZxYVAyN0JHdmgKYjEwa1dwRURyVkRhRDN3REp0dU5oZTR1dWhqcFljZmZCNHM2eUJjd0RVMlhkSmZrRVdiYW42VVIvb1NnY095MQp5YjVGRzE3L3RkREpNQ1hmUUtIWEtta0pBK1R6elFncDNvL3czTWhYYys4cFJ6bU5VaVVBbEt5QkowMVIxK3lOCmVxc010M3dLVFFBci9FbkpBYWdVeW92VjVneGlZY2w3WXdLQ0FRQWRPWWNaeC9sLy9PMEZ0bTZ3cFhHUmpha04KSUhGY1AyaTdtVVc3NkV3bmoxaUZhOVl2N3BnYmJCRDlTMVNNdWV0ZklXY3FTakRpVWF5bW5EZEExOE5WWVVZdgpsaGxVSjZrd2RWdXNlanFmY24rNzVKZjg3QnZXZElWR3JOeFBkQjdaL2xtYld4RnF5WmkwMFI5MFVHQm50YU11CnpnL2lickxnYXR6QTlTS2dvV1htMmJMdDZiYlhlZm1PZ25aWHl3OFFrbzcwWHh0eDVlQlIxQkRBUWpEaXM4MW4KTGc5NnNKM0xPbjdTWEhmeEozQnRYc2hUSkFvQkZ4NkVwbXVsZ05vUFdJa0p0ZDdYV1lQNll5MjJEK2tLN09oSApScTNDaVlNdERtWm91Yi9rVkJMME1WZFNtN2huMVRTVlRIakZvVzZjd1EzN2lLSGprWlZSd1gxS3p0MEIKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K", + config.StakingCertContentKey: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZOekNDQXg4Q0NRQzY4N1hGeHREUlNqQU5CZ2txaGtpRzl3MEJBUXNGQURCL01Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1Rsa3hEekFOQmdOVkJBY01Ca2wwYUdGallURVFNQTRHQTFVRUNnd0hRWFpoYkdGaQpjekVPTUF3R0ExVUVDd3dGUjJWamEyOHhEREFLQmdOVkJBTU1BMkYyWVRFaU1DQUdDU3FHU0liM0RRRUpBUllUCmMzUmxjR2hsYmtCaGRtRnNZV0p6TG05eVp6QWdGdzB4T1RBM01ESXhOakV5TWpsYUdBOHpNREU1TURjeE1ERTIKTVRJeU9Wb3dPakVMTUFrR0ExVUVCaE1DVlZNeEN6QUpCZ05WQkFnTUFrNVpNUkF3RGdZRFZRUUtEQWRCZG1GcwpZV0p6TVF3d0NnWURWUVFEREFOaGRtRXdnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDCkFRRGdLNXI1dmRIdEpGRWd3N2hHRS9sekthSGN2d3pyMzJhcm1xMGs5dFljaEpYZlQzazFqMWxYdEJBZGNVTjMKZ1NSS2pnekgvdmpibjBlYTNBaURDVWQyTWNrL24wS2NKWjQzUzVJN1pqUDdyYmF2Mjk2YktDWjFIcjdyNWdYWQpGaGsrM2FVc1ZmRFVxQVBCd3lQOEtlVjMxQVJWQS9zK1dQZVdxczY5UVhUZHlKdUJZRTVwcjQwdjFTZitlYlVJCm5aMzd1R1kza2lPMEV4L0pnY29Rc0dKenJXRC96dGJSQ0ZJdnJkTkpaZDBwR3ZNbG1US3A3WHNNUjNjcHZxazcKNzAvL01MQ2R5R1cvMUlBclRTdUQxdmQ3bUJYMUp5VlhLeWNZTjB2SU90Ymd4UE9GdXRVeXFET2VQN281MXE0aQpQUzNkQ1JnZm1uL2hXTHd5K0N0SmUwQkdLc2I0dGswdEt4bzBzZTh2OUpBOG1VdG5tem1NdDRZOWppak9yQ09CCjdYd1dLbUpZRW04TjVVYmN5NmNwMm9MOHZRVnR6ejNQWHJrRnQrM2NGdDFqcmpkcFFZZ0g0anlra1dEZU9qRWYKeTFGQ3d6c05SdWRMVHZMaGZMbjg2L1pUNGNMWjlKSTcvV1cwSVBDOEZjN2xoem5KK2JJUVVlRW5kYUdkZ1ZreAp1RWcwTXhkck1yMGpVMElGb1h5U1JYTlJ6Y0RXWlNoRWpCVHY3dG5GeExtb05VK3VKYi9LcE1INnNSWWkzenM4CjVlY2FNS055RytMRG1CYWhVbEh4NWhLQUg0OU84ODU1K0FNaHNnOTFPTlpKbGRqUVgwb1pySUt6SzVCcHNxZVQKbDRjMll0L2ZBTGlaYWVGazFwQkVzdlZlTU9CQ0l1V0UrYjRVSUVhTEFPaHhmd0lEQVFBQk1BMEdDU3FHU0liMwpEUUVCQ3dVQUE0SUNBUUIrMlZYbnFScWZHN0gyL0swbGd6eFQrWDlyMXUrWURuMEVhVUdBRzcxczcwUW5xYnBuClg3dEJtQ0tMTjZYZ1BMMEhyTjkzM253aVlybWZiOFMzM3paN2t3OEdKRHZhVGFtTE55ZW00LzhxVEJRbW5Sd2UKNnJRN1NZMmw3M0lnODdtUjBXVGkrclRuVFR0YzY2Ky9qTHRGZWFqMFljbDloQlpYSEtpVUxTR2hzYlVid3RregppdU5sQU5ob05LWE5JQUJSSW1VcTZPd1loRVFOMER3SFhqNzl3a3B5RFlqS1p3SHVFWlVrbmM4UGwyb1FQQmtlCm1pbDN0c3J2R1Jrd2hpc25YWDd0cWg2cldLVlpOSmtPNjhoeTdYTzlhVFhqYmNCLzdZMUs4M0lTTkV5R1BzSC8KcHdGeWQvajhPNG1vZHdoN1Vsd3cxL2h3Y3FucWlFRkUzS3p4WDJwTWg3VnhlQW1YMnQ1ZVhGWk9sUngxbGVjTQpYUmtWdTE5bFlES1FIR1NyR3huZytCRmxTT0I5NmU1a1hJYnVJWEtwUEFBQ29CUS9KWllidEhrczlIOE90TllPClAyam9xbW5ROXdHa0U1Y28xSWkvL2oydHVvQ1JDcEs4Nm1tYlRseU5ZdksrMS9ra0tjc2FpaVdYTnJRc3JJRFoKQkZzMEZ3WDVnMjRPUDUrYnJ4VGxSWkUwMVI2U3Q4bFFqNElVd0FjSXpHOGZGbU1DV2FZYXZyQ1pUZVlhRWl5RgpBMFgyVkEvdlo3eDlENVA5WjVPYWtNaHJNVytoSlRZcnBIMXJtNktSN0IyNmlVMmtKUnhUWDd4UTlscmtzcWZCCjdsWCtxMGloZWVZQTRjSGJHSk5Xd1dnZCtGUXNLL1BUZWl5cjRyZnF1dHV0ZFdBMEl4b0xSYzNYRnc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==", + config.StakingSignerKeyContentKey: "QXZhbGFuY2hlTG9jYWxOZXR3b3JrVmFsaWRhdG9yMDU=", + }, + }, + }, + } + for _, node := range network.Nodes { + if err := node.EnsureNodeID(); err != nil { + panic(fmt.Sprintf("failed to ensure nodeID: %s", err)) + } + } + + return network +} diff --git a/tests/fixture/tmpnet/network.go b/tests/fixture/tmpnet/network.go index 28626af3c4df..63796be267a4 100644 --- a/tests/fixture/tmpnet/network.go +++ b/tests/fixture/tmpnet/network.go @@ -9,17 +9,18 @@ import ( "errors" "fmt" "io" - "io/fs" "os" "path/filepath" + "slices" "strconv" "strings" "time" + "github.com/google/uuid" + "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/set" @@ -40,13 +41,20 @@ const ( // increase the time for a network's nodes to be seen as healthy. networkHealthCheckInterval = 200 * time.Millisecond + // All temporary networks will use this arbitrary network ID by default. + defaultNetworkID = 88888 + // eth address: 0x8db97C7cEcE249c2b98bDC0226Cc4C2A57BF52FC HardHatKeyStr = "56289e99c94b6912bfc12adc093c9b51124f0dc54ac7a766b2bc5ccf558d8027" ) -// HardhatKey is a legacy used for hardhat testing in subnet-evm -// TODO(marun) Remove when no longer needed. -var HardhatKey *secp256k1.PrivateKey +var ( + // Key expected to be funded for subnet-evm hardhat testing + // TODO(marun) Remove when subnet-evm configures the genesis with this key. + HardhatKey *secp256k1.PrivateKey + + errInsufficientNodes = errors.New("at least one node is required") +) func init() { hardhatKeyBytes, err := hex.DecodeString(HardHatKeyStr) @@ -61,11 +69,31 @@ func init() { // Collects the configuration for running a temporary avalanchego network type Network struct { + // Uniquely identifies the temporary network for metrics + // collection. Distinct from avalanchego's concept of network ID + // since the utility of special network ID values (e.g. to trigger + // specific fork behavior in a given network) precludes requiring + // unique network ID values across all temporary networks. + UUID string + + // A string identifying the entity that started or maintains this + // network. Useful for differentiating between networks when a + // given CI job uses multiple networks. + Owner string + // Path where network configuration and data is stored Dir string + // Id of the network. If zero, must be set in Genesis. + NetworkID uint32 + // Configuration common across nodes - Genesis *genesis.UnparsedConfig + + // Genesis for the network. If nil, NetworkID must be non-zero + Genesis *genesis.UnparsedConfig + + // Configuration for primary network chains (P, X, C) + // TODO(marun) Rename to PrimaryChainConfigs ChainConfigs map[string]FlagsMap // Default configuration to use when creating new nodes @@ -82,6 +110,13 @@ type Network struct { Subnets []*Subnet } +func NewDefaultNetwork(owner string) *Network { + return &Network{ + Owner: owner, + Nodes: NewNodesOrPanic(DefaultNodeCount), + } +} + // Ensure a real and absolute network dir so that node // configuration that embeds the network path will continue to // work regardless of symlink and working directory changes. @@ -93,22 +128,24 @@ func toCanonicalDir(dir string) (string, error) { return filepath.EvalSymlinks(absDir) } -func StartNewNetwork( +func BootstrapNewNetwork( ctx context.Context, w io.Writer, network *Network, rootNetworkDir string, avalancheGoExecPath string, pluginDir string, - nodeCount int, ) error { - if err := network.EnsureDefaultConfig(w, avalancheGoExecPath, pluginDir, nodeCount); err != nil { + if len(network.Nodes) == 0 { + return errInsufficientNodes + } + if err := network.EnsureDefaultConfig(w, avalancheGoExecPath, pluginDir); err != nil { return err } if err := network.Create(rootNetworkDir); err != nil { return err } - return network.Start(ctx, w) + return network.Bootstrap(ctx, w) } // Stops the nodes of the network configured in the provided directory. @@ -145,16 +182,26 @@ func ReadNetwork(dir string) (*Network, error) { } // Initializes a new network with default configuration. -func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, pluginDir string, nodeCount int) error { +func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, pluginDir string) error { if _, err := fmt.Fprintf(w, "Preparing configuration for new network with %s\n", avalancheGoPath); err != nil { return err } + // A UUID supports centralized metrics collection + if len(n.UUID) == 0 { + n.UUID = uuid.NewString() + } + // Ensure default flags if n.DefaultFlags == nil { n.DefaultFlags = FlagsMap{} } - n.DefaultFlags.SetDefaults(DefaultFlags()) + n.DefaultFlags.SetDefaults(DefaultTmpnetFlags()) + + if len(n.Nodes) == 1 { + // Sybil protection needs to be disabled for a single node network to start + n.DefaultFlags[config.SybilProtectionEnabledKey] = false + } // Only configure the plugin dir with a non-empty value to ensure // the use of the default value (`[datadir]/plugins`) when @@ -165,8 +212,8 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi } } - // Ensure pre-funded keys - if len(n.PreFundedKeys) == 0 { + // Ensure pre-funded keys if the genesis is not predefined + if n.Genesis == nil && len(n.PreFundedKeys) == 0 { keys, err := NewPrivateKeys(DefaultPreFundedKeyCount) if err != nil { return err @@ -191,11 +238,6 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi n.DefaultRuntimeConfig.AvalancheGoPath = avalancheGoPath } - // Ensure nodes are created - if len(n.Nodes) == 0 { - n.Nodes = NewNodes(nodeCount) - } - // Ensure nodes are configured for i := range n.Nodes { if err := n.EnsureNodeConfig(n.Nodes[i]); err != nil { @@ -206,45 +248,32 @@ func (n *Network) EnsureDefaultConfig(w io.Writer, avalancheGoPath string, plugi return nil } -// Creates the network on disk, choosing its network id and generating its genesis in the process. +// Creates the network on disk, generating its genesis and configuring its nodes in the process. func (n *Network) Create(rootDir string) error { + // Ensure creation of the root dir if len(rootDir) == 0 { // Use the default root dir var err error - rootDir, err = getDefaultRootDir() + rootDir, err = getDefaultRootNetworkDir() if err != nil { return err } } - - // Ensure creation of the root dir if err := os.MkdirAll(rootDir, perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create root network dir: %w", err) } - // Determine the network path and ID - var ( - networkDir string - networkID uint32 - ) - if n.Genesis != nil && n.Genesis.NetworkID > 0 { - // Use the network ID defined in the provided genesis - networkID = n.Genesis.NetworkID + // A time-based name ensures consistent directory ordering + dirName := time.Now().Format("20060102-150405.999999") + if len(n.Owner) > 0 { + // Include the owner to differentiate networks created at similar times + dirName = fmt.Sprintf("%s-%s", dirName, n.Owner) } - if networkID > 0 { - // Use a directory with a random suffix - var err error - networkDir, err = os.MkdirTemp(rootDir, fmt.Sprintf("%d.", n.Genesis.NetworkID)) - if err != nil { - return fmt.Errorf("failed to create network dir: %w", err) - } - } else { - // Find the next available network ID based on the contents of the root dir - var err error - networkID, networkDir, err = findNextNetworkID(rootDir) - if err != nil { - return err - } + + // Ensure creation of the network dir + networkDir := filepath.Join(rootDir, dirName) + if err := os.MkdirAll(networkDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create network dir: %w", err) } canonicalDir, err := toCanonicalDir(networkDir) if err != nil { @@ -252,18 +281,18 @@ func (n *Network) Create(rootDir string) error { } n.Dir = canonicalDir + // Ensure the existence of the plugin directory or nodes won't be able to start. pluginDir, err := n.DefaultFlags.GetStringVal(config.PluginDirKey) if err != nil { return err } if len(pluginDir) > 0 { - // Ensure the existence of the plugin directory or nodes won't be able to start. if err := os.MkdirAll(pluginDir, perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create plugin dir: %w", err) } } - if n.Genesis == nil { + if n.NetworkID == 0 && n.Genesis == nil { // Pre-fund known legacy keys to support ad-hoc testing. Usage of a legacy key will // require knowing the key beforehand rather than retrieving it from the set of pre-funded // keys exposed by a network. Since allocation will not be exclusive, a test using a @@ -275,7 +304,7 @@ func (n *Network) Create(rootDir string) error { } keysToFund = append(keysToFund, n.PreFundedKeys...) - genesis, err := NewTestGenesis(networkID, n.Nodes, keysToFund) + genesis, err := NewTestGenesis(defaultNetworkID, n.Nodes, keysToFund) if err != nil { return err } @@ -294,40 +323,129 @@ func (n *Network) Create(rootDir string) error { return n.Write() } -// Starts all nodes in the network -func (n *Network) Start(ctx context.Context, w io.Writer) error { - if _, err := fmt.Fprintf(w, "Starting network %d @ %s\n", n.Genesis.NetworkID, n.Dir); err != nil { - return err +// Starts the specified nodes +func (n *Network) StartNodes(ctx context.Context, w io.Writer, nodesToStart ...*Node) error { + if len(nodesToStart) == 0 { + return errInsufficientNodes + } + nodesToWaitFor := nodesToStart + if !slices.Contains(nodesToStart, n.Nodes[0]) { + // If starting all nodes except the bootstrap node (because the bootstrap node is already + // running), ensure that the health of the bootstrap node will be logged by including it in + // the set of nodes to wait for. + nodesToWaitFor = n.Nodes + } else { + // Simplify output by only logging network start when starting all nodes or when starting + // the first node by itself to bootstrap subnet creation. + if _, err := fmt.Fprintf(w, "Starting network %s (UUID: %s)\n", n.Dir, n.UUID); err != nil { + return err + } } + // Record the time before nodes are started to ensure visibility of subsequently collected metrics via the emitted link + startTime := time.Now() + // Configure the networking for each node and start - for _, node := range n.Nodes { + for _, node := range nodesToStart { if err := n.StartNode(ctx, w, node); err != nil { return err } } - if _, err := fmt.Fprintf(w, "Waiting for all nodes to report healthy...\n\n"); err != nil { + if _, err := fmt.Fprint(w, "Waiting for nodes to report healthy...\n\n"); err != nil { + return err + } + if err := waitForHealthy(ctx, w, nodesToWaitFor); err != nil { return err } - if err := n.WaitForHealthy(ctx, w); err != nil { + if _, err := fmt.Fprintf(w, "\nStarted network %s (UUID: %s)\n", n.Dir, n.UUID); err != nil { return err } - if _, err := fmt.Fprintf(w, "\nStarted network %d @ %s\n", n.Genesis.NetworkID, n.Dir); err != nil { + // Provide a link to the main dashboard filtered by the uuid and showing results from now till whenever the link is viewed + if _, err := fmt.Fprintf(w, "\nMetrics: https://grafana-experimental.avax-dev.network/d/kBQpRdWnk/avalanche-main-dashboard?&var-filter=network_uuid%%7C%%3D%%7C%s&var-filter=is_ephemeral_node%%7C%%3D%%7Cfalse&from=%d&to=now\n", n.UUID, startTime.UnixMilli()); err != nil { return err } return nil } -func (n *Network) AddEphemeralNode(ctx context.Context, w io.Writer, flags FlagsMap) (*Node, error) { - node := NewNode("") - node.Flags = flags - node.IsEphemeral = true - if err := n.StartNode(ctx, w, node); err != nil { - return nil, err +// Start the network for the first time +func (n *Network) Bootstrap(ctx context.Context, w io.Writer) error { + if len(n.Subnets) == 0 { + // Without the need to coordinate subnet configuration, + // starting all nodes at once is the simplest option. + return n.StartNodes(ctx, w, n.Nodes...) } - return node, nil + + // The node that will be used to create subnets and bootstrap the network + bootstrapNode := n.Nodes[0] + + // Whether sybil protection will need to be re-enabled after subnet creation + reEnableSybilProtection := false + + if len(n.Nodes) > 1 { + // Reduce the cost of subnet creation for a network of multiple nodes by + // creating subnets with a single node with sybil protection + // disabled. This allows the creation of initial subnet state without + // requiring coordination between multiple nodes. + + if _, err := fmt.Fprintln(w, "Starting a single-node network with sybil protection disabled for quicker subnet creation"); err != nil { + return err + } + + // If sybil protection is enabled, it should be re-enabled before the node is used to bootstrap the other nodes + var err error + reEnableSybilProtection, err = bootstrapNode.Flags.GetBoolVal(config.SybilProtectionEnabledKey, true) + if err != nil { + return fmt.Errorf("failed to read sybil protection flag: %w", err) + } + + // Ensure sybil protection is disabled for the bootstrap node. + bootstrapNode.Flags[config.SybilProtectionEnabledKey] = false + } + + if err := n.StartNodes(ctx, w, bootstrapNode); err != nil { + return err + } + + // Don't restart the node during subnet creation since it will always be restarted afterwards. + if err := n.CreateSubnets(ctx, w, bootstrapNode.URI, false /* restartRequired */); err != nil { + return err + } + + if reEnableSybilProtection { + if _, err := fmt.Fprintf(w, "Re-enabling sybil protection for %s\n", bootstrapNode.NodeID); err != nil { + return err + } + delete(bootstrapNode.Flags, config.SybilProtectionEnabledKey) + } + + if _, err := fmt.Fprintf(w, "Restarting bootstrap node %s\n", bootstrapNode.NodeID); err != nil { + return err + } + + if len(n.Nodes) == 1 { + // Ensure the node is restarted to pick up subnet and chain configuration + return n.RestartNode(ctx, w, bootstrapNode) + } + + // TODO(marun) This last restart of the bootstrap node might be unnecessary if: + // - sybil protection didn't change + // - the node is not a subnet validator + + // Ensure the bootstrap node is restarted to pick up configuration changes. Avoid using + // RestartNode since the node won't be able to report healthy until other nodes are started. + if err := bootstrapNode.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop node %s: %w", bootstrapNode.NodeID, err) + } + if err := n.StartNode(ctx, w, bootstrapNode); err != nil { + return fmt.Errorf("failed to start node %s: %w", bootstrapNode.NodeID, err) + } + + if _, err := fmt.Fprintln(w, "Starting remaining nodes..."); err != nil { + return err + } + return n.StartNodes(ctx, w, n.Nodes[1:]...) } // Starts the provided node after configuring it for the network. @@ -356,39 +474,35 @@ func (n *Network) StartNode(ctx context.Context, w io.Writer, node *Node) error return nil } -// Waits until all nodes in the network are healthy. -func (n *Network) WaitForHealthy(ctx context.Context, w io.Writer) error { - ticker := time.NewTicker(networkHealthCheckInterval) - defer ticker.Stop() - - healthyNodes := set.NewSet[ids.NodeID](len(n.Nodes)) - for healthyNodes.Len() < len(n.Nodes) { - for _, node := range n.Nodes { - if healthyNodes.Contains(node.NodeID) { - continue - } - - healthy, err := node.IsHealthy(ctx) - if err != nil && !errors.Is(err, ErrNotRunning) { - return err - } - if !healthy { - continue - } - - healthyNodes.Add(node.NodeID) - if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { - return err - } - } +// Restart a single node. +func (n *Network) RestartNode(ctx context.Context, w io.Writer, node *Node) error { + // Ensure the node reuses the same API port across restarts to ensure + // consistent labeling of metrics. Otherwise prometheus's automatic + // addition of the `instance` label (host:port) results in + // segmentation of results for a given node every time the port + // changes on restart. This segmentation causes graphs on the grafana + // dashboards to display multiple series per graph for a given node, + // one for each port that the node used. + // + // There is a non-zero chance of the port being allocatted to a + // different process and the node subsequently being unable to start, + // but the alternative is having to update the grafana dashboards + // query-by-query to ensure that node metrics ignore the instance + // label. + if err := node.SaveAPIPort(); err != nil { + return err + } - select { - case <-ctx.Done(): - return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) - case <-ticker.C: - } + if err := node.Stop(ctx); err != nil { + return fmt.Errorf("failed to stop node %s: %w", node.NodeID, err) } - return nil + if err := n.StartNode(ctx, w, node); err != nil { + return fmt.Errorf("failed to start node %s: %w", node.NodeID, err) + } + if _, err := fmt.Fprintf(w, " waiting for node %s to report healthy\n", node.NodeID); err != nil { + return err + } + return WaitForHealthy(ctx, node) } // Stops all nodes in the network. @@ -423,20 +537,11 @@ func (n *Network) Stop(ctx context.Context) error { // Restarts all non-ephemeral nodes in the network. func (n *Network) Restart(ctx context.Context, w io.Writer) error { - if _, err := fmt.Fprintf(w, " restarting network\n"); err != nil { + if _, err := fmt.Fprintln(w, " restarting network"); err != nil { return err } for _, node := range n.Nodes { - if err := node.Stop(ctx); err != nil { - return fmt.Errorf("failed to stop node %s: %w", node.NodeID, err) - } - if err := n.StartNode(ctx, w, node); err != nil { - return fmt.Errorf("failed to start node %s: %w", node.NodeID, err) - } - if _, err := fmt.Fprintf(w, " waiting for node %s to report healthy\n", node.NodeID); err != nil { - return err - } - if err := WaitForHealthy(ctx, node); err != nil { + if err := n.RestartNode(ctx, w, node); err != nil { return err } } @@ -450,10 +555,20 @@ func (n *Network) Restart(ctx context.Context, w io.Writer) error { func (n *Network) EnsureNodeConfig(node *Node) error { flags := node.Flags + // Ensure nodes can label their metrics with the network uuid + node.NetworkUUID = n.UUID + + // Ensure nodes can label metrics with an indication of the shared/private nature of the network + node.NetworkOwner = n.Owner + // Set the network name if available - if n.Genesis != nil && n.Genesis.NetworkID > 0 { + networkID := n.NetworkID + if networkID == 0 && n.Genesis != nil && n.Genesis.NetworkID > 0 { + networkID = n.Genesis.NetworkID + } + if networkID > 0 { // Convert the network id to a string to ensure consistency in JSON round-tripping. - flags[config.NetworkNameKey] = strconv.FormatUint(uint64(n.Genesis.NetworkID), 10) + flags[config.NetworkNameKey] = strconv.FormatUint(uint64(networkID), 10) } if err := node.EnsureKeys(); err != nil { @@ -464,13 +579,26 @@ func (n *Network) EnsureNodeConfig(node *Node) error { // Set fields including the network path if len(n.Dir) > 0 { - node.Flags.SetDefaults(FlagsMap{ - config.GenesisFileKey: n.getGenesisPath(), + defaultFlags := FlagsMap{ config.ChainConfigDirKey: n.getChainConfigDir(), - }) + } + + if n.Genesis != nil { + defaultFlags[config.GenesisFileKey] = n.getGenesisPath() + } + + // Only set the subnet dir if it exists or the node won't start. + subnetDir := n.getSubnetDir() + if _, err := os.Stat(subnetDir); err == nil { + defaultFlags[config.SubnetConfigDirKey] = subnetDir + } else if !errors.Is(err, os.ErrNotExist) { + return err + } + + node.Flags.SetDefaults(defaultFlags) // Ensure the node's data dir is configured - dataDir := node.getDataDir() + dataDir := node.GetDataDir() if len(dataDir) == 0 { // NodeID will have been set by EnsureKeys dataDir = filepath.Join(n.Dir, node.NodeID.String()) @@ -485,17 +613,26 @@ func (n *Network) EnsureNodeConfig(node *Node) error { } } - // Ensure available subnets are tracked + return nil +} + +// TrackedSubnetsForNode returns the subnet IDs for the given node +func (n *Network) TrackedSubnetsForNode(nodeID ids.NodeID) string { subnetIDs := make([]string, 0, len(n.Subnets)) for _, subnet := range n.Subnets { if subnet.SubnetID == ids.Empty { + // Subnet has not yet been created continue } - subnetIDs = append(subnetIDs, subnet.SubnetID.String()) + // Only track subnets that this node validates + for _, validatorID := range subnet.ValidatorIDs { + if validatorID == nodeID { + subnetIDs = append(subnetIDs, subnet.SubnetID.String()) + break + } + } } - flags[config.TrackSubnetsKey] = strings.Join(subnetIDs, ",") - - return nil + return strings.Join(subnetIDs, ",") } func (n *Network) GetSubnet(name string) *Subnet { @@ -507,18 +644,23 @@ func (n *Network) GetSubnet(name string) *Subnet { return nil } -// Ensure that each subnet on the network is created and that it is validated by all non-ephemeral nodes. -func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { +// Ensure that each subnet on the network is created. If restartRequired is false, node restart +// to pick up configuration changes becomes the responsibility of the caller. +func (n *Network) CreateSubnets(ctx context.Context, w io.Writer, apiURI string, restartRequired bool) error { createdSubnets := make([]*Subnet, 0, len(n.Subnets)) for _, subnet := range n.Subnets { - if _, err := fmt.Fprintf(w, "Creating subnet %q\n", subnet.Name); err != nil { - return err + if len(subnet.ValidatorIDs) == 0 { + return fmt.Errorf("subnet %s needs at least one validator", subnet.SubnetID) } if subnet.SubnetID != ids.Empty { // The subnet already exists continue } + if _, err := fmt.Fprintf(w, "Creating subnet %q\n", subnet.Name); err != nil { + return err + } + if subnet.OwningKey == nil { // Allocate a pre-funded key and remove it from the network so it won't be used for // other purposes @@ -554,42 +696,66 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { return nil } - // Ensure the in-memory subnet state - n.Subnets = append(n.Subnets, createdSubnets...) - // Ensure the pre-funded key changes are persisted to disk if err := n.Write(); err != nil { return err } - // Reconfigure nodes for the new subnets - if _, err := fmt.Fprintf(w, "Configured nodes to track new subnet(s). Restart is required.\n"); err != nil { - return err - } + reconfiguredNodes := []*Node{} for _, node := range n.Nodes { - if err := n.EnsureNodeConfig(node); err != nil { + existingTrackedSubnets, err := node.Flags.GetStringVal(config.TrackSubnetsKey) + if err != nil { return err } + trackedSubnets := n.TrackedSubnetsForNode(node.NodeID) + if existingTrackedSubnets == trackedSubnets { + continue + } + node.Flags[config.TrackSubnetsKey] = trackedSubnets + reconfiguredNodes = append(reconfiguredNodes, node) } - // Restart nodes to allow new configuration to take effect - // TODO(marun) Only restart the validator nodes of newly-created subnets - if err := n.Restart(ctx, w); err != nil { - return err + + if restartRequired { + if _, err := fmt.Fprintln(w, "Restarting node(s) to enable them to track the new subnet(s)"); err != nil { + return err + } + + for _, node := range reconfiguredNodes { + if len(node.URI) == 0 { + // Only running nodes should be restarted + continue + } + if err := n.RestartNode(ctx, w, node); err != nil { + return err + } + } } - // Add each node as a subnet validator + // Add validators for the subnet for _, subnet := range createdSubnets { if _, err := fmt.Fprintf(w, "Adding validators for subnet %q\n", subnet.Name); err != nil { return err } - if err := subnet.AddValidators(ctx, w, n.Nodes); err != nil { + + // Collect the nodes intended to validate the subnet + validatorIDs := set.NewSet[ids.NodeID](len(subnet.ValidatorIDs)) + validatorIDs.Add(subnet.ValidatorIDs...) + validatorNodes := []*Node{} + for _, node := range n.Nodes { + if !validatorIDs.Contains(node.NodeID) { + continue + } + validatorNodes = append(validatorNodes, node) + } + + if err := subnet.AddValidators(ctx, w, apiURI, validatorNodes...); err != nil { return err } } // Wait for nodes to become subnet validators pChainClient := platformvm.NewClient(n.Nodes[0].URI) - restartRequired := false + validatorsToRestart := set.Set[ids.NodeID]{} for _, subnet := range createdSubnets { if err := waitForActiveValidators(ctx, w, pChainClient, subnet); err != nil { return err @@ -612,17 +778,29 @@ func (n *Network) CreateSubnets(ctx context.Context, w io.Writer) error { // subnet's validator nodes will need to be restarted for those nodes to read // the newly written chain configuration and apply it to the chain(s). if subnet.HasChainConfig() { - restartRequired = true + validatorsToRestart.Add(subnet.ValidatorIDs...) } } - if !restartRequired { + if !restartRequired || len(validatorsToRestart) == 0 { return nil } + if _, err := fmt.Fprintln(w, "Restarting node(s) to pick up chain configuration"); err != nil { + return err + } + // Restart nodes to allow configuration for the new chains to take effect - // TODO(marun) Only restart the validator nodes of subnets that have chains that need configuring - return n.Restart(ctx, w) + for _, node := range n.Nodes { + if !validatorsToRestart.Contains(node.NodeID) { + continue + } + if err := n.RestartNode(ctx, w, node); err != nil { + return err + } + } + + return nil } func (n *Network) GetURIForNodeID(nodeID ids.NodeID) (string, error) { @@ -667,42 +845,64 @@ func (n *Network) getBootstrapIPsAndIDs(skippedNode *Node) ([]string, []string, return bootstrapIPs, bootstrapIDs, nil } -// Retrieves the default root dir for storing networks and their -// configuration. -func getDefaultRootDir() (string, error) { - homeDir, err := os.UserHomeDir() - if err != nil { - return "", err - } - return filepath.Join(homeDir, ".tmpnet", "networks"), nil -} +// Waits until the provided nodes are healthy. +func waitForHealthy(ctx context.Context, w io.Writer, nodes []*Node) error { + ticker := time.NewTicker(networkHealthCheckInterval) + defer ticker.Stop() -// Finds the next available network ID by attempting to create a -// directory numbered from 1000 until creation succeeds. Returns the -// network id and the full path of the created directory. -func findNextNetworkID(rootDir string) (uint32, string, error) { - var ( - networkID uint32 = 1000 - dirPath string - ) + unhealthyNodes := set.Of(nodes...) for { - _, reserved := constants.NetworkIDToNetworkName[networkID] - if reserved { - networkID++ - continue + for node := range unhealthyNodes { + healthy, err := node.IsHealthy(ctx) + if err != nil && !errors.Is(err, ErrNotRunning) { + return err + } + if !healthy { + continue + } + + unhealthyNodes.Remove(node) + if _, err := fmt.Fprintf(w, "%s is healthy @ %s\n", node.NodeID, node.URI); err != nil { + return err + } } - dirPath = filepath.Join(rootDir, strconv.FormatUint(uint64(networkID), 10)) - err := os.Mkdir(dirPath, perms.ReadWriteExecute) - if err == nil { - return networkID, dirPath, nil + if unhealthyNodes.Len() == 0 { + return nil } - if !errors.Is(err, fs.ErrExist) { - return 0, "", fmt.Errorf("failed to create network directory: %w", err) + select { + case <-ctx.Done(): + return fmt.Errorf("failed to see all nodes healthy before timeout: %w", ctx.Err()) + case <-ticker.C: } + } +} - // Directory already exists, keep iterating - networkID++ +// Retrieves the root dir for tmpnet data. +func getTmpnetPath() (string, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(homeDir, ".tmpnet"), nil +} + +// Retrieves the default root dir for storing networks and their +// configuration. +func getDefaultRootNetworkDir() (string, error) { + tmpnetPath, err := getTmpnetPath() + if err != nil { + return "", err + } + return filepath.Join(tmpnetPath, "networks"), nil +} + +// Retrieves the path to a reusable network path for the given owner. +func GetReusableNetworkPathForOwner(owner string) (string, error) { + networkPath, err := getDefaultRootNetworkDir() + if err != nil { + return "", err } + return filepath.Join(networkPath, "latest_"+owner), nil } diff --git a/tests/fixture/tmpnet/network_config.go b/tests/fixture/tmpnet/network_config.go index 1ae4e96788d7..2823a577371c 100644 --- a/tests/fixture/tmpnet/network_config.go +++ b/tests/fixture/tmpnet/network_config.go @@ -140,7 +140,7 @@ func (n *Network) readChainConfigs() error { if err != nil { return err } - n.ChainConfigs[chainAlias] = *chainConfig + n.ChainConfigs[chainAlias] = chainConfig } return nil @@ -185,6 +185,8 @@ func (n *Network) readConfig() error { // The subset of network fields to store in the network config file. type serializedNetworkConfig struct { + UUID string + Owner string DefaultFlags FlagsMap DefaultRuntimeConfig NodeRuntimeConfig PreFundedKeys []*secp256k1.PrivateKey @@ -192,6 +194,8 @@ type serializedNetworkConfig struct { func (n *Network) writeNetworkConfig() error { config := &serializedNetworkConfig{ + UUID: n.UUID, + Owner: n.Owner, DefaultFlags: n.DefaultFlags, DefaultRuntimeConfig: n.DefaultRuntimeConfig, PreFundedKeys: n.PreFundedKeys, diff --git a/tests/fixture/tmpnet/network_test.go b/tests/fixture/tmpnet/network_test.go index c04c497c2485..db8d1c404716 100644 --- a/tests/fixture/tmpnet/network_test.go +++ b/tests/fixture/tmpnet/network_test.go @@ -15,8 +15,8 @@ func TestNetworkSerialization(t *testing.T) { tmpDir := t.TempDir() - network := &Network{} - require.NoError(network.EnsureDefaultConfig(&bytes.Buffer{}, "/path/to/avalanche/go", "", 1)) + network := NewDefaultNetwork("testnet") + require.NoError(network.EnsureDefaultConfig(&bytes.Buffer{}, "/path/to/avalanche/go", "")) require.NoError(network.Create(tmpDir)) // Ensure node runtime is initialized require.NoError(network.readNodes()) diff --git a/tests/fixture/tmpnet/node.go b/tests/fixture/tmpnet/node.go index 59025b649112..3a6076af1283 100644 --- a/tests/fixture/tmpnet/node.go +++ b/tests/fixture/tmpnet/node.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "net" "net/http" "os" "path/filepath" @@ -53,6 +54,16 @@ type NodeRuntimeConfig struct { // Node supports configuring and running a node participating in a temporary network. type Node struct { + // Uniquely identifies the network the node is part of to enable monitoring. + NetworkUUID string + + // Identify the entity associated with this network. This is + // intended to be used to label metrics to enable filtering + // results for a test run between the primary/shared network used + // by the majority of tests and private networks used by + // individual tests. + NetworkOwner string + // Set by EnsureNodeID which is also called when the node is read. NodeID ids.NodeID @@ -83,11 +94,24 @@ func NewNode(dataDir string) *Node { } } +// Initializes an ephemeral node using the provided config flags +func NewEphemeralNode(flags FlagsMap) *Node { + node := NewNode("") + node.Flags = flags + node.IsEphemeral = true + + return node +} + // Initializes the specified number of nodes. -func NewNodes(count int) []*Node { +func NewNodesOrPanic(count int) []*Node { nodes := make([]*Node, count) for i := range nodes { - nodes[i] = NewNode("") + node := NewNode("") + if err := node.EnsureKeys(); err != nil { + panic(err) + } + nodes[i] = node } return nodes } @@ -166,7 +190,7 @@ func (n *Node) readState() error { return n.getRuntime().readState() } -func (n *Node) getDataDir() string { +func (n *Node) GetDataDir() string { return cast.ToString(n.Flags[config.DataDirKey]) } @@ -204,13 +228,14 @@ func (n *Node) Stop(ctx context.Context) error { // Sets networking configuration for the node. // Convenience method for setting networking flags. func (n *Node) SetNetworkingConfig(bootstrapIDs []string, bootstrapIPs []string) { - var ( - // Use dynamic port allocation. - httpPort uint16 = 0 - stakingPort uint16 = 0 - ) - n.Flags[config.HTTPPortKey] = httpPort - n.Flags[config.StakingPortKey] = stakingPort + if _, ok := n.Flags[config.HTTPPortKey]; !ok { + // Default to dynamic port allocation + n.Flags[config.HTTPPortKey] = 0 + } + if _, ok := n.Flags[config.StakingPortKey]; !ok { + // Default to dynamic port allocation + n.Flags[config.StakingPortKey] = 0 + } n.Flags[config.BootstrapIDsKey] = strings.Join(bootstrapIDs, ",") n.Flags[config.BootstrapIPsKey] = strings.Join(bootstrapIPs, ",") } @@ -244,7 +269,7 @@ func (n *Node) EnsureBLSSigningKey() error { if err != nil { return fmt.Errorf("failed to generate staking signer key: %w", err) } - n.Flags[config.StakingSignerKeyContentKey] = base64.StdEncoding.EncodeToString(bls.SerializeSecretKey(newKey)) + n.Flags[config.StakingSignerKeyContentKey] = base64.StdEncoding.EncodeToString(bls.SecretKeyToBytes(newKey)) return nil } @@ -330,8 +355,28 @@ func (n *Node) EnsureNodeID() error { if err != nil { return fmt.Errorf("failed to ensure node ID: failed to load tls cert: %w", err) } - stakingCert := staking.CertificateFromX509(tlsCert.Leaf) + stakingCert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + if err != nil { + return fmt.Errorf("failed to ensure node ID: failed to parse staking cert: %w", err) + } n.NodeID = ids.NodeIDFromCert(stakingCert) return nil } + +// Saves the currently allocated API port to the node's configuration +// for use across restarts. Reusing the port ensures consistent +// labeling of metrics. +func (n *Node) SaveAPIPort() error { + hostPort := strings.TrimPrefix(n.URI, "http://") + if len(hostPort) == 0 { + // Without an API URI there is nothing to save + return nil + } + _, port, err := net.SplitHostPort(hostPort) + if err != nil { + return err + } + n.Flags[config.HTTPPortKey] = port + return nil +} diff --git a/tests/fixture/tmpnet/node_config.go b/tests/fixture/tmpnet/node_config.go index 3ebbc01b6c32..4752b2c343c3 100644 --- a/tests/fixture/tmpnet/node_config.go +++ b/tests/fixture/tmpnet/node_config.go @@ -18,7 +18,7 @@ import ( // (reading/writing configuration) and node.go (orchestration). func (n *Node) getFlagsPath() string { - return filepath.Join(n.getDataDir(), "flags.json") + return filepath.Join(n.GetDataDir(), "flags.json") } func (n *Node) readFlags() error { @@ -46,7 +46,7 @@ func (n *Node) writeFlags() error { } func (n *Node) getConfigPath() string { - return filepath.Join(n.getDataDir(), defaultConfigFilename) + return filepath.Join(n.GetDataDir(), defaultConfigFilename) } func (n *Node) readConfig() error { @@ -61,12 +61,16 @@ func (n *Node) readConfig() error { } type serializedNodeConfig struct { + NetworkUUID string + NetworkOwner string IsEphemeral bool RuntimeConfig *NodeRuntimeConfig } func (n *Node) writeConfig() error { config := serializedNodeConfig{ + NetworkUUID: n.NetworkUUID, + NetworkOwner: n.NetworkOwner, IsEphemeral: n.IsEphemeral, RuntimeConfig: n.RuntimeConfig, } @@ -91,7 +95,7 @@ func (n *Node) Read() error { } func (n *Node) Write() error { - if err := os.MkdirAll(n.getDataDir(), perms.ReadWriteExecute); err != nil { + if err := os.MkdirAll(n.GetDataDir(), perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create node dir: %w", err) } @@ -102,7 +106,7 @@ func (n *Node) Write() error { } func (n *Node) writeMetricsSnapshot(data []byte) error { - metricsDir := filepath.Join(n.getDataDir(), "metrics") + metricsDir := filepath.Join(n.GetDataDir(), "metrics") if err := os.MkdirAll(metricsDir, perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create metrics dir: %w", err) } diff --git a/tests/fixture/tmpnet/node_process.go b/tests/fixture/tmpnet/node_process.go index c2e2e33139bf..a866cec63db9 100644 --- a/tests/fixture/tmpnet/node_process.go +++ b/tests/fixture/tmpnet/node_process.go @@ -14,12 +14,15 @@ import ( "os" "os/exec" "path/filepath" + "strconv" + "strings" "syscall" "time" "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/node" + "github.com/ava-labs/avalanchego/utils/perms" ) const ( @@ -109,13 +112,17 @@ func (p *NodeProcess) Start(w io.Writer) error { return fmt.Errorf("failed to remove stale process context file: %w", err) } + // All arguments are provided in the flags file cmd := exec.Command(p.node.RuntimeConfig.AvalancheGoPath, "--config-file", p.node.getFlagsPath()) // #nosec G204 + // Ensure process is detached from the parent process so that an error in the parent will not affect the child + configureDetachedProcess(cmd) + if err := cmd.Start(); err != nil { return err } // Determine appropriate level of node description detail - dataDir := p.node.getDataDir() + dataDir := p.node.GetDataDir() nodeDescription := fmt.Sprintf("node %q", p.node.NodeID) if p.node.IsEphemeral { nodeDescription = "ephemeral " + nodeDescription @@ -126,15 +133,6 @@ func (p *NodeProcess) Start(w io.Writer) error { nodeDescription = fmt.Sprintf("%s with path: %s", nodeDescription, dataDir) } - go func() { - if err := cmd.Wait(); err != nil { - if err.Error() != "signal: killed" { - _, _ = fmt.Fprintf(w, "%s finished with error: %v\n", nodeDescription, err) - } - } - _, _ = fmt.Fprintf(w, "%s exited\n", nodeDescription) - }() - // A node writes a process context file on start. If the file is not // found in a reasonable amount of time, the node is unlikely to have // started successfully. @@ -142,8 +140,12 @@ func (p *NodeProcess) Start(w io.Writer) error { return fmt.Errorf("failed to start local node: %w", err) } - _, err = fmt.Fprintf(w, "Started %s\n", nodeDescription) - return err + if _, err = fmt.Fprintf(w, "Started %s\n", nodeDescription); err != nil { + return err + } + + // Configure collection of metrics and logs + return p.writeMonitoringConfig() } // Signals the node process to stop. @@ -154,7 +156,7 @@ func (p *NodeProcess) InitiateStop() error { } if proc == nil { // Already stopped - return nil + return p.removeMonitoringConfig() } if err := proc.Signal(syscall.SIGTERM); err != nil { return fmt.Errorf("failed to send SIGTERM to pid %d: %w", p.pid, err) @@ -172,7 +174,7 @@ func (p *NodeProcess) WaitForStopped(ctx context.Context) error { return fmt.Errorf("failed to retrieve process: %w", err) } if proc == nil { - return nil + return p.removeMonitoringConfig() } select { @@ -199,7 +201,7 @@ func (p *NodeProcess) IsHealthy(ctx context.Context) (bool, error) { } func (p *NodeProcess) getProcessContextPath() string { - return filepath.Join(p.node.getDataDir(), config.DefaultProcessContextFilename) + return filepath.Join(p.node.GetDataDir(), config.DefaultProcessContextFilename) } func (p *NodeProcess) waitForProcessContext(ctx context.Context) error { @@ -256,3 +258,95 @@ func (p *NodeProcess) getProcess() (*os.Process, error) { } return nil, fmt.Errorf("failed to determine process status: %w", err) } + +// Write monitoring configuration enabling collection of metrics and logs from the node. +func (p *NodeProcess) writeMonitoringConfig() error { + // Ensure labeling that uniquely identifies the node and its network + commonLabels := FlagsMap{ + "network_uuid": p.node.NetworkUUID, + "node_id": p.node.NodeID, + "is_ephemeral_node": strconv.FormatBool(p.node.IsEphemeral), + "network_owner": p.node.NetworkOwner, + // prometheus/promtail ignore empty values so including these + // labels with empty values outside of a github worker (where + // the env vars will not be set) should not be a problem. + "gh_repo": os.Getenv("GH_REPO"), + "gh_workflow": os.Getenv("GH_WORKFLOW"), + "gh_run_id": os.Getenv("GH_RUN_ID"), + "gh_run_number": os.Getenv("GH_RUN_NUMBER"), + "gh_run_attempt": os.Getenv("GH_RUN_ATTEMPT"), + "gh_job_id": os.Getenv("GH_JOB_ID"), + } + + tmpnetDir, err := getTmpnetPath() + if err != nil { + return err + } + + prometheusConfig := []FlagsMap{ + { + "targets": []string{strings.TrimPrefix(p.node.URI, "http://")}, + "labels": commonLabels, + }, + } + if err := p.writeMonitoringConfigFile(tmpnetDir, "prometheus", prometheusConfig); err != nil { + return err + } + + promtailLabels := FlagsMap{ + "__path__": filepath.Join(p.node.GetDataDir(), "logs", "*.log"), + } + promtailLabels.SetDefaults(commonLabels) + promtailConfig := []FlagsMap{ + { + "targets": []string{"localhost"}, + "labels": promtailLabels, + }, + } + return p.writeMonitoringConfigFile(tmpnetDir, "promtail", promtailConfig) +} + +// Return the path for this node's prometheus configuration. +func (p *NodeProcess) getMonitoringConfigPath(tmpnetDir string, name string) string { + // Ensure a unique filename to allow config files to be added and removed + // by multiple nodes without conflict. + return filepath.Join(tmpnetDir, name, "file_sd_configs", fmt.Sprintf("%s_%s.json", p.node.NetworkUUID, p.node.NodeID)) +} + +// Ensure the removal of the prometheus configuration file for this node. +func (p *NodeProcess) removeMonitoringConfig() error { + tmpnetDir, err := getTmpnetPath() + if err != nil { + return err + } + + for _, name := range []string{"promtail", "prometheus"} { + configPath := p.getMonitoringConfigPath(tmpnetDir, name) + if err := os.Remove(configPath); err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to remove %s config: %w", name, err) + } + } + + return nil +} + +// Write the configuration for a type of monitoring (e.g. prometheus, promtail). +func (p *NodeProcess) writeMonitoringConfigFile(tmpnetDir string, name string, config []FlagsMap) error { + configPath := p.getMonitoringConfigPath(tmpnetDir, name) + + dir := filepath.Dir(configPath) + if err := os.MkdirAll(dir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create %s service discovery dir: %w", name, err) + } + + bytes, err := DefaultJSONMarshal(config) + if err != nil { + return fmt.Errorf("failed to marshal %s config: %w", name, err) + } + + if err := os.WriteFile(configPath, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write %s config: %w", name, err) + } + + return nil +} diff --git a/tests/fixture/tmpnet/subnet.go b/tests/fixture/tmpnet/subnet.go index 41058d930a66..9e4d30f83f7f 100644 --- a/tests/fixture/tmpnet/subnet.go +++ b/tests/fixture/tmpnet/subnet.go @@ -40,6 +40,7 @@ type Chain struct { // Write the chain configuration to the specified directory. func (c *Chain) WriteConfig(chainDir string) error { + // TODO(marun) Ensure removal of an existing file if no configuration should be provided if len(c.Config) == 0 { return nil } @@ -62,6 +63,8 @@ type Subnet struct { // networks (since the SubnetID will be different every time the subnet is created) Name string + Config FlagsMap + // The ID of the transaction that created the subnet SubnetID ids.ID @@ -152,9 +155,7 @@ func (s *Subnet) CreateChains(ctx context.Context, w io.Writer, uri string) erro } // Add validators to the subnet -func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, nodes []*Node) error { - apiURI := nodes[0].URI - +func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, apiURI string, nodes ...*Node) error { wallet, err := s.GetWallet(ctx, apiURI) if err != nil { return err @@ -198,8 +199,6 @@ func (s *Subnet) AddValidators(ctx context.Context, w io.Writer, nodes []*Node) if _, err := fmt.Fprintf(w, " added %s as validator for subnet `%s`\n", node.NodeID, s.Name); err != nil { return err } - - s.ValidatorIDs = append(s.ValidatorIDs, node.NodeID) } return nil @@ -210,14 +209,14 @@ func (s *Subnet) Write(subnetDir string, chainDir string) error { if err := os.MkdirAll(subnetDir, perms.ReadWriteExecute); err != nil { return fmt.Errorf("failed to create subnet dir: %w", err) } - path := filepath.Join(subnetDir, s.Name+".json") + tmpnetConfigPath := filepath.Join(subnetDir, s.Name+".json") // Since subnets are expected to be serialized for the first time // without their chains having been created (i.e. chains will have // empty IDs), use the absence of chain IDs as a prompt for a - // subnet name uniquness check. + // subnet name uniqueness check. if len(s.Chains) > 0 && s.Chains[0].ChainID == ids.Empty { - _, err := os.Stat(path) + _, err := os.Stat(tmpnetConfigPath) if err != nil && !os.IsNotExist(err) { return err } @@ -226,12 +225,39 @@ func (s *Subnet) Write(subnetDir string, chainDir string) error { } } + // Write subnet configuration for tmpnet bytes, err := DefaultJSONMarshal(s) if err != nil { - return fmt.Errorf("failed to marshal subnet %s: %w", s.Name, err) + return fmt.Errorf("failed to marshal tmpnet subnet %s: %w", s.Name, err) + } + if err := os.WriteFile(tmpnetConfigPath, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write tmpnet subnet config %s: %w", s.Name, err) + } + + // The subnet and chain configurations for avalanchego can only be written once + // they have been created since the id of the creating transaction must be + // included in the path. + if s.SubnetID == ids.Empty { + return nil } - if err := os.WriteFile(path, bytes, perms.ReadWrite); err != nil { - return fmt.Errorf("failed to write subnet %s: %w", s.Name, err) + + // TODO(marun) Ensure removal of an existing file if no configuration should be provided + if len(s.Config) > 0 { + // Write subnet configuration for avalanchego + bytes, err = DefaultJSONMarshal(s.Config) + if err != nil { + return fmt.Errorf("failed to marshal avalanchego subnet config %s: %w", s.Name, err) + } + + avgoConfigDir := filepath.Join(subnetDir, s.SubnetID.String()) + if err := os.MkdirAll(avgoConfigDir, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to create avalanchego subnet config dir: %w", err) + } + + avgoConfigPath := filepath.Join(avgoConfigDir, defaultConfigFilename) + if err := os.WriteFile(avgoConfigPath, bytes, perms.ReadWrite); err != nil { + return fmt.Errorf("failed to write avalanchego subnet config %s: %w", s.Name, err) + } } for _, chain := range s.Chains { @@ -269,12 +295,12 @@ func waitForActiveValidators( return err } - if _, err := fmt.Fprintf(w, " "); err != nil { + if _, err := fmt.Fprint(w, " "); err != nil { return err } for { - if _, err := fmt.Fprintf(w, "."); err != nil { + if _, err := fmt.Fprint(w, "."); err != nil { return err } validators, err := pChainClient.GetCurrentValidators(ctx, subnet.SubnetID, nil) diff --git a/tests/fixture/tmpnet/utils.go b/tests/fixture/tmpnet/utils.go index b363bdec8671..ba32ed3d4341 100644 --- a/tests/fixture/tmpnet/utils.go +++ b/tests/fixture/tmpnet/utils.go @@ -87,3 +87,11 @@ func NewPrivateKeys(keyCount int) ([]*secp256k1.PrivateKey, error) { } return keys, nil } + +func NodesToIDs(nodes ...*Node) []ids.NodeID { + nodeIDs := make([]ids.NodeID, len(nodes)) + for i, node := range nodes { + nodeIDs[i] = node.NodeID + } + return nodeIDs +} diff --git a/tests/http.go b/tests/http.go deleted file mode 100644 index 073b6d2df126..000000000000 --- a/tests/http.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package tests - -import ( - "bufio" - "context" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// "metric name" -> "metric value" -type NodeMetrics map[string]float64 - -// URI -> "metric name" -> "metric value" -type NodesMetrics map[string]NodeMetrics - -// GetNodeMetrics retrieves the specified metrics the provided node URI. -func GetNodeMetrics(nodeURI string, metricNames ...string) (NodeMetrics, error) { - uri := nodeURI + "/ext/metrics" - return GetMetricsValue(uri, metricNames...) -} - -// GetNodesMetrics retrieves the specified metrics for the provided node URIs. -func GetNodesMetrics(nodeURIs []string, metricNames ...string) (NodesMetrics, error) { - metrics := make(NodesMetrics, len(nodeURIs)) - for _, u := range nodeURIs { - var err error - metrics[u], err = GetNodeMetrics(u, metricNames...) - if err != nil { - return nil, fmt.Errorf("failed to retrieve metrics for %s: %w", u, err) - } - } - return metrics, nil -} - -func GetMetricsValue(url string, metrics ...string) (map[string]float64, error) { - lines, err := getHTTPLines(url) - if err != nil { - return nil, err - } - mm := make(map[string]float64, len(metrics)) - for _, line := range lines { - if strings.HasPrefix(line, "# ") { - continue - } - found, name := false, "" - for _, name = range metrics { - if !strings.HasPrefix(line, name) { - continue - } - found = true - break - } - if !found || name == "" { // no matched metric found - continue - } - ll := strings.Split(line, " ") - if len(ll) != 2 { - continue - } - fv, err := strconv.ParseFloat(ll[1], 64) - if err != nil { - return nil, fmt.Errorf("failed to parse %q (%w)", ll, err) - } - mm[name] = fv - } - return mm, nil -} - -func getHTTPLines(url string) ([]string, error) { - req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, url, nil) - if err != nil { - return nil, err - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - - rd := bufio.NewReader(resp.Body) - lines := []string{} - for { - line, err := rd.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - _ = resp.Body.Close() - return nil, err - } - lines = append(lines, strings.TrimSpace(line)) - } - return lines, resp.Body.Close() -} diff --git a/tests/metrics.go b/tests/metrics.go new file mode 100644 index 000000000000..2caa11ece5e8 --- /dev/null +++ b/tests/metrics.go @@ -0,0 +1,83 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tests + +import ( + "context" + "fmt" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/api/metrics" + + dto "github.com/prometheus/client_model/go" +) + +// "metric name" -> "metric value" +type NodeMetrics map[string]*dto.MetricFamily + +// URI -> "metric name" -> "metric value" +type NodesMetrics map[string]NodeMetrics + +// GetNodeMetrics retrieves the specified metrics the provided node URI. +func GetNodeMetrics(ctx context.Context, nodeURI string) (NodeMetrics, error) { + client := metrics.NewClient(nodeURI) + return client.GetMetrics(ctx) +} + +// GetNodesMetrics retrieves the specified metrics for the provided node URIs. +func GetNodesMetrics(ctx context.Context, nodeURIs []string) (NodesMetrics, error) { + metrics := make(NodesMetrics, len(nodeURIs)) + for _, u := range nodeURIs { + var err error + metrics[u], err = GetNodeMetrics(ctx, u) + if err != nil { + return nil, fmt.Errorf("failed to retrieve metrics for %s: %w", u, err) + } + } + return metrics, nil +} + +// GetMetricValue returns the value of the specified metric which has the +// required labels. +// +// If multiple metrics match the provided labels, the first metric found is +// returned. +// +// Only Counter and Gauge metrics are supported. +func GetMetricValue(metrics NodeMetrics, name string, labels prometheus.Labels) (float64, bool) { + metricFamily, ok := metrics[name] + if !ok { + return 0, false + } + + for _, metric := range metricFamily.Metric { + if !labelsMatch(metric, labels) { + continue + } + + switch { + case metric.Gauge != nil: + return metric.Gauge.GetValue(), true + case metric.Counter != nil: + return metric.Counter.GetValue(), true + } + } + return 0, false +} + +func labelsMatch(metric *dto.Metric, labels prometheus.Labels) bool { + var found int + for _, label := range metric.Label { + expectedValue, ok := labels[label.GetName()] + if !ok { + continue + } + if label.GetValue() != expectedValue { + return false + } + found++ + } + return found == len(labels) +} diff --git a/tests/upgrade/upgrade_test.go b/tests/upgrade/upgrade_test.go index 131d3d53cd2a..c885a0821b8c 100644 --- a/tests/upgrade/upgrade_test.go +++ b/tests/upgrade/upgrade_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/tests/fixture/e2e" @@ -17,7 +16,6 @@ import ( ) func TestUpgrade(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) ginkgo.RunSpecs(t, "upgrade test suites") } @@ -45,8 +43,8 @@ var _ = ginkgo.Describe("[Upgrade]", func() { require := require.New(ginkgo.GinkgoT()) ginkgo.It("can upgrade versions", func() { - network := &tmpnet.Network{} - e2e.StartNetwork(network, e2e.DefaultNetworkDir, avalancheGoExecPath, "" /* pluginDir */) + network := tmpnet.NewDefaultNetwork("avalanchego-upgrade") + e2e.StartNetwork(network, avalancheGoExecPath, "" /* pluginDir */, 0 /* shutdownDelay */, false /* reuseNetwork */) ginkgo.By(fmt.Sprintf("restarting all nodes with %q binary", avalancheGoExecPathToUpgradeTo)) for _, node := range network.Nodes { diff --git a/trace/noop.go b/trace/noop.go index 8c2a63a912cc..914146d3cb27 100644 --- a/trace/noop.go +++ b/trace/noop.go @@ -3,25 +3,13 @@ package trace -import ( - "context" +import "go.opentelemetry.io/otel/trace/noop" - "go.opentelemetry.io/otel/trace" - - "github.com/ava-labs/avalanchego/utils/constants" -) - -var Noop Tracer = noOpTracer{ - t: trace.NewNoopTracerProvider().Tracer(constants.AppName), -} +var Noop Tracer = noOpTracer{} // noOpTracer is an implementation of trace.Tracer that does nothing. type noOpTracer struct { - t trace.Tracer -} - -func (n noOpTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return n.t.Start(ctx, spanName, opts...) //nolint:spancheck + noop.Tracer } func (noOpTracer) Close() error { diff --git a/utils/atomic.go b/utils/atomic.go index 3bb125ee8af6..7236d9a50de8 100644 --- a/utils/atomic.go +++ b/utils/atomic.go @@ -3,13 +3,27 @@ package utils -import "sync" +import ( + "encoding/json" + "sync" +) + +var ( + _ json.Marshaler = (*Atomic[struct{}])(nil) + _ json.Unmarshaler = (*Atomic[struct{}])(nil) +) type Atomic[T any] struct { lock sync.RWMutex value T } +func NewAtomic[T any](value T) *Atomic[T] { + return &Atomic[T]{ + value: value, + } +} + func (a *Atomic[T]) Get() T { a.lock.RLock() defer a.lock.RUnlock() @@ -23,3 +37,17 @@ func (a *Atomic[T]) Set(value T) { a.value = value } + +func (a *Atomic[T]) MarshalJSON() ([]byte, error) { + a.lock.RLock() + defer a.lock.RUnlock() + + return json.Marshal(a.value) +} + +func (a *Atomic[T]) UnmarshalJSON(b []byte) error { + a.lock.Lock() + defer a.lock.Unlock() + + return json.Unmarshal(b, &a.value) +} diff --git a/utils/atomic_test.go b/utils/atomic_test.go index 3fa74063c18a..eee159d783f9 100644 --- a/utils/atomic_test.go +++ b/utils/atomic_test.go @@ -4,6 +4,8 @@ package utils import ( + "encoding/json" + "net/netip" "testing" "github.com/stretchr/testify/require" @@ -24,3 +26,46 @@ func TestAtomic(t *testing.T) { a.Set(false) require.False(a.Get()) } + +func TestAtomicJSON(t *testing.T) { + tests := []struct { + name string + value *Atomic[netip.AddrPort] + expected string + }{ + { + name: "zero value", + value: new(Atomic[netip.AddrPort]), + expected: `""`, + }, + { + name: "ipv4 value", + value: NewAtomic(netip.AddrPortFrom( + netip.AddrFrom4([4]byte{1, 2, 3, 4}), + 12345, + )), + expected: `"1.2.3.4:12345"`, + }, + { + name: "ipv6 loopback", + value: NewAtomic(netip.AddrPortFrom( + netip.IPv6Loopback(), + 12345, + )), + expected: `"[::1]:12345"`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + b, err := json.Marshal(test.value) + require.NoError(err) + require.Equal(test.expected, string(b)) + + var parsed Atomic[netip.AddrPort] + require.NoError(json.Unmarshal([]byte(test.expected), &parsed)) + require.Equal(test.value.Get(), parsed.Get()) + }) + } +} diff --git a/utils/beacon/beacon.go b/utils/beacon/beacon.go index 38ac6df5b0f5..112c50f6db22 100644 --- a/utils/beacon/beacon.go +++ b/utils/beacon/beacon.go @@ -4,23 +4,24 @@ package beacon import ( + "net/netip" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" ) var _ Beacon = (*beacon)(nil) type Beacon interface { ID() ids.NodeID - IP() ips.IPPort + IP() netip.AddrPort } type beacon struct { id ids.NodeID - ip ips.IPPort + ip netip.AddrPort } -func New(id ids.NodeID, ip ips.IPPort) Beacon { +func New(id ids.NodeID, ip netip.AddrPort) Beacon { return &beacon{ id: id, ip: ip, @@ -31,6 +32,6 @@ func (b *beacon) ID() ids.NodeID { return b.id } -func (b *beacon) IP() ips.IPPort { +func (b *beacon) IP() netip.AddrPort { return b.ip } diff --git a/utils/beacon/set.go b/utils/beacon/set.go index 8b6970b55421..56a292203ed5 100644 --- a/utils/beacon/set.go +++ b/utils/beacon/set.go @@ -5,10 +5,10 @@ package beacon import ( "errors" + "net/netip" "strings" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" ) var ( @@ -25,7 +25,7 @@ type Set interface { Add(Beacon) error RemoveByID(ids.NodeID) error - RemoveByIP(ips.IPPort) error + RemoveByIP(netip.AddrPort) error Len() int @@ -35,14 +35,14 @@ type Set interface { type set struct { ids map[ids.NodeID]int - ips map[string]int + ips map[netip.AddrPort]int beacons []Beacon } func NewSet() Set { return &set{ ids: make(map[ids.NodeID]int), - ips: make(map[string]int), + ips: make(map[netip.AddrPort]int), } } @@ -53,14 +53,14 @@ func (s *set) Add(b Beacon) error { return errDuplicateID } - ipStr := b.IP().String() - _, duplicateIP := s.ips[ipStr] + ip := b.IP() + _, duplicateIP := s.ips[ip] if duplicateIP { return errDuplicateIP } s.ids[id] = len(s.beacons) - s.ips[ipStr] = len(s.beacons) + s.ips[ip] = len(s.beacons) s.beacons = append(s.beacons, b) return nil } @@ -71,12 +71,12 @@ func (s *set) RemoveByID(idToRemove ids.NodeID) error { return errUnknownID } toRemove := s.beacons[indexToRemove] - ipToRemove := toRemove.IP().String() + ipToRemove := toRemove.IP() indexToMove := len(s.beacons) - 1 toMove := s.beacons[indexToMove] idToMove := toMove.ID() - ipToMove := toMove.IP().String() + ipToMove := toMove.IP() s.ids[idToMove] = indexToRemove s.ips[ipToMove] = indexToRemove @@ -89,8 +89,8 @@ func (s *set) RemoveByID(idToRemove ids.NodeID) error { return nil } -func (s *set) RemoveByIP(ip ips.IPPort) error { - indexToRemove, exists := s.ips[ip.String()] +func (s *set) RemoveByIP(ip netip.AddrPort) error { + indexToRemove, exists := s.ips[ip] if !exists { return errUnknownIP } diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index 976d0582e3ff..04e250909fb5 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -4,13 +4,12 @@ package beacon import ( - "net" + "net/netip" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" ) func TestSet(t *testing.T) { @@ -20,18 +19,18 @@ func TestSet(t *testing.T) { id1 := ids.BuildTestNodeID([]byte{1}) id2 := ids.BuildTestNodeID([]byte{2}) - ip0 := ips.IPPort{ - IP: net.IPv4zero, - Port: 0, - } - ip1 := ips.IPPort{ - IP: net.IPv4zero, - Port: 1, - } - ip2 := ips.IPPort{ - IP: net.IPv4zero, - Port: 2, - } + ip0 := netip.AddrPortFrom( + netip.IPv4Unspecified(), + 0, + ) + ip1 := netip.AddrPortFrom( + netip.IPv4Unspecified(), + 1, + ) + ip2 := netip.AddrPortFrom( + netip.IPv4Unspecified(), + 2, + ) b0 := New(id0, ip0) b1 := New(id1, ip1) diff --git a/utils/bimap/bimap.go b/utils/bimap/bimap.go index d0651ff36cd2..bde60d97b13f 100644 --- a/utils/bimap/bimap.go +++ b/utils/bimap/bimap.go @@ -8,6 +8,8 @@ import ( "encoding/json" "errors" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/utils" ) @@ -110,6 +112,17 @@ func (m *BiMap[K, V]) DeleteValue(val V) (K, bool) { return key, true } +// Keys returns the keys of the map. The keys will be in an indeterminate order. +func (m *BiMap[K, _]) Keys() []K { + return maps.Keys(m.keyToValue) +} + +// Values returns the values of the map. The values will be in an indeterminate +// order. +func (m *BiMap[_, V]) Values() []V { + return maps.Values(m.keyToValue) +} + // Len return the number of entries in this map. func (m *BiMap[K, V]) Len() int { return len(m.keyToValue) diff --git a/utils/bimap/bimap_test.go b/utils/bimap/bimap_test.go index 9b4433a51c70..1792bec9a3dd 100644 --- a/utils/bimap/bimap_test.go +++ b/utils/bimap/bimap_test.go @@ -309,23 +309,33 @@ func TestBiMapDeleteValue(t *testing.T) { } } -func TestBiMapLen(t *testing.T) { +func TestBiMapLenAndLists(t *testing.T) { require := require.New(t) m := New[int, int]() require.Zero(m.Len()) + require.Empty(m.Keys()) + require.Empty(m.Values()) m.Put(1, 2) require.Equal(1, m.Len()) + require.ElementsMatch([]int{1}, m.Keys()) + require.ElementsMatch([]int{2}, m.Values()) m.Put(2, 3) require.Equal(2, m.Len()) + require.ElementsMatch([]int{1, 2}, m.Keys()) + require.ElementsMatch([]int{2, 3}, m.Values()) m.Put(1, 3) require.Equal(1, m.Len()) + require.ElementsMatch([]int{1}, m.Keys()) + require.ElementsMatch([]int{3}, m.Values()) m.DeleteKey(1) require.Zero(m.Len()) + require.Empty(m.Keys()) + require.Empty(m.Values()) } func TestBiMapJSON(t *testing.T) { diff --git a/utils/bytes.go b/utils/bytes.go index a32f353cf75e..4232b98fc66f 100644 --- a/utils/bytes.go +++ b/utils/bytes.go @@ -3,7 +3,11 @@ package utils -import "crypto/rand" +import ( + "crypto/rand" + "math/bits" + "sync" +) // RandomBytes returns a slice of n random bytes // Intended for use in testing @@ -12,3 +16,68 @@ func RandomBytes(n int) []byte { _, _ = rand.Read(b) return b } + +// Constant taken from the "math" package +const intSize = 32 << (^uint(0) >> 63) // 32 or 64 + +// BytesPool tracks buckets of available buffers to be allocated. Each bucket +// allocates buffers of the following length: +// +// 0 +// 1 +// 3 +// 7 +// 15 +// 31 +// 63 +// 127 +// ... +// MaxInt +// +// In order to allocate a buffer of length 19 (for example), we calculate the +// number of bits required to represent 19 (5). And therefore allocate a slice +// from bucket 5, which has length 31. This is the bucket which produces the +// smallest slices that are at least length 19. +// +// When replacing a buffer of length 19, we calculate the number of bits +// required to represent 20 (5). And therefore place the slice into bucket 4, +// which has length 15. This is the bucket which produces the largest slices +// that a length 19 slice can be used for. +type BytesPool [intSize]sync.Pool + +func NewBytesPool() *BytesPool { + var p BytesPool + for i := range p { + // uint is used here to avoid overflowing int during the shift + size := uint(1)< 0; size-- { + p.Put(p.Get(size)) + } + } +} + +func BenchmarkBytesPool_Ascending(b *testing.B) { + p := NewBytesPool() + for i := 0; i < b.N; i++ { + for size := 0; size < 100_000; size++ { + p.Put(p.Get(size)) + } + } +} + +func BenchmarkBytesPool_Random(b *testing.B) { + p := NewBytesPool() + sizes := make([]int, 1_000) + for i := range sizes { + sizes[i] = rand.Intn(100_000) //#nosec G404 + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, size := range sizes { + p.Put(p.Get(size)) + } + } +} diff --git a/utils/compression/compressor_test.go b/utils/compression/compressor_test.go index fa8554a8a769..dbe5c2c7ee95 100644 --- a/utils/compression/compressor_test.go +++ b/utils/compression/compressor_test.go @@ -24,18 +24,13 @@ var ( TypeNone: func(int64) (Compressor, error) { //nolint:unparam // an error is needed to be returned to compile return NewNoCompressor(), nil }, - TypeGzip: NewGzipCompressor, TypeZstd: NewZstdCompressor, } - //go:embed gzip_zip_bomb.bin - gzipZipBomb []byte - //go:embed zstd_zip_bomb.bin zstdZipBomb []byte zipBombs = map[Type][]byte{ - TypeGzip: gzipZipBomb, TypeZstd: zstdZipBomb, } ) @@ -154,10 +149,6 @@ func TestNewCompressorWithInvalidLimit(t *testing.T) { } } -func FuzzGzipCompressor(f *testing.F) { - fuzzHelper(f, TypeGzip) -} - func FuzzZstdCompressor(f *testing.F) { fuzzHelper(f, TypeZstd) } @@ -168,9 +159,6 @@ func fuzzHelper(f *testing.F, compressionType Type) { err error ) switch compressionType { - case TypeGzip: - compressor, err = NewGzipCompressor(maxMessageSize) - require.NoError(f, err) case TypeZstd: compressor, err = NewZstdCompressor(maxMessageSize) require.NoError(f, err) diff --git a/utils/compression/gzip_compressor.go b/utils/compression/gzip_compressor.go deleted file mode 100644 index da0b941a47a1..000000000000 --- a/utils/compression/gzip_compressor.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package compression - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "math" - "sync" -) - -var ( - _ Compressor = (*gzipCompressor)(nil) - - ErrInvalidMaxSizeCompressor = errors.New("invalid gzip compressor max size") - ErrDecompressedMsgTooLarge = errors.New("decompressed msg too large") - ErrMsgTooLarge = errors.New("msg too large to be compressed") -) - -// TODO: Remove once v1.11.x is out. -type gzipCompressor struct { - maxSize int64 - gzipWriterPool sync.Pool -} - -// Compress [msg] and returns the compressed bytes. -func (g *gzipCompressor) Compress(msg []byte) ([]byte, error) { - if int64(len(msg)) > g.maxSize { - return nil, fmt.Errorf("%w: (%d) > (%d)", ErrMsgTooLarge, len(msg), g.maxSize) - } - - var writeBuffer bytes.Buffer - gzipWriter := g.gzipWriterPool.Get().(*gzip.Writer) - gzipWriter.Reset(&writeBuffer) - defer g.gzipWriterPool.Put(gzipWriter) - - if _, err := gzipWriter.Write(msg); err != nil { - return nil, err - } - if err := gzipWriter.Close(); err != nil { - return nil, err - } - return writeBuffer.Bytes(), nil -} - -// Decompress decompresses [msg]. -func (g *gzipCompressor) Decompress(msg []byte) ([]byte, error) { - bytesReader := bytes.NewReader(msg) - gzipReader, err := gzip.NewReader(bytesReader) - if err != nil { - return nil, err - } - - // We allow [io.LimitReader] to read up to [g.maxSize + 1] bytes, so that if - // the decompressed payload is greater than the maximum size, this function - // will return the appropriate error instead of an incomplete byte slice. - limitedReader := io.LimitReader(gzipReader, g.maxSize+1) - - decompressed, err := io.ReadAll(limitedReader) - if err != nil { - return nil, err - } - if int64(len(decompressed)) > g.maxSize { - return nil, fmt.Errorf("%w: (%d) > (%d)", ErrDecompressedMsgTooLarge, len(decompressed), g.maxSize) - } - return decompressed, gzipReader.Close() -} - -// NewGzipCompressor returns a new gzip Compressor that compresses -func NewGzipCompressor(maxSize int64) (Compressor, error) { - if maxSize == math.MaxInt64 { - // "Decompress" creates "io.LimitReader" with max size + 1: - // if the max size + 1 overflows, "io.LimitReader" reads nothing - // returning 0 byte for the decompress call - // require max size 8Y1zu)1%J^>|z=ny(YsgO>YI+Qdt%Y%cnL);2EnS&5IxT;{LBKQ)T&EujS zoFh&yXHJ^QAzPiY2|3>FbDzF|&NtUHh=|Mg8bAERfB)aB|A z{BNImB|v}x0RjXF5FkK+009C75*8T$uSr%a_O-)jcd_&kAV7cs0RjXF5FkK+009E2 z3+!smf4dPlecW~W9s&di5FkK+009C72oNAZAa#MO;USNA5Bsobq#i!(BS3%v0RjXF z5FkK+009C7mdkZkDHkyyKd)xJ)XRr`1PBlyK!5-N0t5&UAV7dX|E+ajkB0%jqrp@h zAwYlt0RjXF5FkK+009C7k{7uBe}5(Lk9l~xc@#$o5FkK+009C72oNAZfB=Ey1)eLl zi{da~{P%SIo_zW+kN^P!1PBlyK!5-N0t5&UxO{kR#~%s&gzswd<-mm$(IiU2@oJafB*pk1PBlyK!5;&L7>BB$*1PBlyK!5-N0t5&UAVA=)bzhH%0l%ZcR2v~cfB*pk1PBlyK!5-N0tAv5 zxIJzsu?5V-%gv)ULVy4P0t5&UAV7cs0RjXFBrot>sa+H?VEp%V{hoaKFpvNN0t5&U zAV7cs0RjXF5V(AJZO6lapYUBxzI+%+fB*pk1PBlyK!5-N0t5*3zb09!hym^J*;0t5&UAV7cs0RjXF5J+C&YIw+F z3)qKEBl+@SAOQjd2oNAZfB*pk1PBlyuv}-AauEaa^J>;hK7ANSfB*pk1PBlyK!5-N z0t5)WweIWjFyMDIm}(;g2oNAZfB*pk1PBlyK!8B<0=LKQB({Kgc)5AhMhFlfK!5-N z0t5&UAV7csf#e0AE47Ov28{onuHTbS9|jU2K!5-N0t5&UAV7cs0Roo~ukCmk@DskP z$(IiU2@oJafB*pk1PBlyK!5;&{?{Zc6)~V4KD!HTga82o1PBlyK!5-N0t5&UNM2x9 zYgXf7!0F?zqm2+CK!5-N0t5&UAV7cs0RqVjTn!I-YytbQX(V4h3?x8+009C72oNAZ zfB*pk1eWWpQZ8aZeqPOb$)^tk2@oJafB*pk1PBlyK!5;&x7K|<9tQl522*W>009C7 z2oNAZfB*pk1PBmFUf}k)ox~O}4=*>5+6Vyx1PBlyK!5-N0t5&UAdtMkbES4s#DMYN z)Af7u>BB$*1PBlyK!5-N0t5&UAVA>q;k6wP1AfAHHTm*kAOQjd2oNAZfB*pk1PBly z(Eplbr6LBj!)JG)jSwI}fB*pk1PBlyK!5-N0?7;PYRzgq3^;wbx0Bcc z=Hcb$Q5zvZfB*pk1PBlyK!5-N0tAv5c&^kgiWo5dd%Av4K7ANSfB*pk1PBlyK!5-N z0t5(LKD@T$VZcxLt|nhT3?x8+009C72oNAZfB*pk1o~f-tW?B+cKGZrv=IUX2oNAZ zfB*pk1PBlyKp=U6U9DM-hXJRLyN)(OfB*pk1PBlyK!5-N0t5&oFK{(Hm{E)3?x8+009C72oNAZfB*pk1m0Tr z^>`TYI~q*25ds7V5FkK+009C72oNAZAbEk?<8~5Tz&yO%JZd8Z2oNAZfB*pk1PBly zK!8B<0?(D&MG*tWe^1x%$)^tk2@oJafB*pk1PBlyK!5;&%ZJx?JPh~=-__*Hhk*nL z5FkK+009C72oNAZfI$Cil9h@W&<>y7g*HNf009C72oNAZfB*pk1PCNAu&Xtz@i5@@ zao5pC2oNAZfB*pk1PBlyK!5;&yC`D7`0wfZJ^A!u zAOQjd2oNAZfB*pk1PBlyaQX1sj)wt1;k%lA`7n?G0RjXF5FkK+009C72oUIhO|nuE z1KQ!UyU<1m5FkK+009C72oNAZfB=Ey1$MP&H68|>KJGf&2mt~F2oNAZfB*pk1PBly zki5Xv@Q}wAun(I?^5w%o0t5&UAV7cs0RjXF5FkKcxy~x(A_nB=)vT9%`Y@0H0RjXF z5FkK+009C72oQK{-Phw`!0%`<)kX*qAV7cs0RjXF5FkK+0D$8zbBtQ3?x8+009C72oNAZfB*pk1TG(5 z+wm~qCwy0vFCPXHAV7cs0RjXF5FkK+009F1uSr%aVn925b{E5ds7V5FkK+009C72oNAZAbEl3 zO6{VE0pq`?>-Xf-hk*nL5FkK+009C72oNAZfWYO$YdanW{Dkjn^5w%o0t5&UAV7cs z0RjXF5FkLH|24@t1aQe9GXd?s& z5FkK+009C72oNAZfI#vBSHnXdTfjbS8p)Rr0|^iyK!5-N0t5&UAV7csf#o`@l#3XU zpI5V9^6A4s0t5&UAV7cs0RjXF5FkL{t#x0IhXKE%!BiU|K!5-N0t5&UAV7cs0RjY) z7q~rcC$R<0!^_R1HbQ^^0RjXF5FkK+009C72qZ7?T&Z0YF<|`nbp4)u`Y@0H0RjXF z5FkK+009C72oSh@cx}hSfS>SPO}=~>NPqwV0t5&UAV7cs0RjXF^uH!qsfYpX@Y!8x zBLoN#AV7cs0RjXF5FkK+K=J~+TC*At15O`z9c_dF0RjXF5FkK+009C72oOkK;A(it zV++`aO(Xg8VITnl1PBlyK!5-N0t5&UAh29#m2wdS^7CreOFn%VNPqwV0t5&UAV7cs z0RjXFytVG@@i5?bG?;251PBlyK!5-N0t5&UAV7dX@&dQV?IgBb)J6ypAV7cs z0RjXF5FkK+0D;0t5&UAV7cs0RjXF5J+BN zS8G<|VZiC*uA_|*AV7cs0RjXF5FkK+009EY3tSBkd29juuxTV;J`5y4fB*pk1PBly zK!5-N0tA-ptWqvwKz?4$dda5`0|^iyK!5-N0t5&UAV7csfw$IuJst-9js{b0ga82o z1PBlyK!5-N0t5&UNM7LfxShlnFb^*`kJ<}t(wJPbH}+;y}O0t5&UAV7cs z0RjXF5FkJxd4a3pA&)I!A2yBT%ZGsk2oNAZfB*pk1PBlyK!CtH>1PBly zK!5-N0t5&UAV45_fnBXxjfVlJkGqaGLVy4P0t5&UAV7cs0RjXFBrk9^Jmj$j?8By! zeEBet009C72oNAZfB*pk1PBmVuCq$HhynR|HR~mxJ`5y4fB*pk1PBlyK!5-N0tDV# z_w{%f@H-kzwGjdY2oNAZfB*pk1PBlyKp=U6+v9c;TfjWL+&pR{1PBlyK!5-N0t5&U zAV7dX@&eD5+C>oq#(z)O@5!eR0|^iyK!5-N0t5&UAV7csfy;;2c03ID3E$P^%ZGsk z2oNAZfB*pk1PBlyK!8C1Ym$|U7|;%%-Gw$nfB*pk1PBlyK!5-N0t5&oFR-gMtMM@4 z^l{hGMhFlfK!5-N0t5&UAV7csf#e0QhKD@1fPL6Bk}n?y5+Fc;009C72oNAZfB*pk z%XL;M7cn3|uV%gE(}#fs2oNAZfB*pk1PBlyK!Cto>%JZj1Aa$?sWw7@009C72oNAZ zfB*pk1PCNAaC_WNVhfmumzzgzga82o1PBlyK!5-N0t5&UNM7K%QoAT(!1(X!`aSvd zVITnl1PBlyK!5-N0t5&UAaMEc+Kz_-KjFKYeEBet009C72oNAZfB*pk1PBo5e@(Jd z5d+%cv%Ann2oNAZfB*pk1PBlyK!5;&g1CUB4%vJ`5y4fB*pk1PBlyK!5-N0t7A} zUfc07;3s@nlP@0z5+Fc;009C72oNAZfB*pk{jW(@Dq=u8e0CSw2mt~F2oNAZfB*pk z1PBlyki5XI)~v?EfYZlaM;jqPfB*pk1PBlyK!5-N0tAv5xEdbv*aG%p(@4I27)XEs z0RjXF5FkK+009C72rSoGrCh{-{Jfg=l20E75+Fc;009C72oNAZfB*pkZ>{@!JPi09 z4W`-%0RjXF5FkK+009C72oNBUyuj^oJBck|9$s!9wGjdY2oNAZfB*pk1PBlyKp=U6 z=SuCOhymljr|b9R(}#fs2oNAZfB*pk1PBlyK!CvI!)rSp2Ku4hc z2oNAZfB*pk1PBlyK!8B<0$0OB9$UaZY#Paz4+9AhAV7cs0RjXF5FkK+0D4~)=NHp7)XEs0RjXF5FkK+ z009C72)woK>+vw)cQlx4BLoN#AV7cs0RjXF5FkK+K=J~&$L%DxfO&YidDKP-5FkK+ z009C72oNAZfB=Ey1)eLliy{V$|DLYjlTRN85+Fc;009C72oNAZfB*pkmk+P)co^^# zzN^WX4+9AhAV7cs0RjXF5FkK+0D=D3Br6p$pdCKD3vGk|0RjXF5FkK+009C72oOkK zU{`Ba<6*$*`><&wUp@>ZK!5-N0t5&U zAV7cs0RjY;>#R~PVnBXg&3ehF4+9AhAV7cs0RjXF5FkK+0D-sGeLWrq{Eh}wZG->; z0t5&UAV7cs0RjXF5J+C&_PCwI7BCMlH;>u~0RjXF5FkK+009C72oNBUyufp%c2UHD z@!!++d-CbSKmr5^5FkK+009C72oNAZ;PT`P(fZx$zs*MmJK!5-N0t5&UAV7cs0RqVj+#a`+ z*aGI^<>pZvAwYlt0RjXF5FkK+009C7k{5Wc)Gmq`F#dbGeosDq7)XEs0RjXF5FkK+ z009C72wXnAw&P*IPx!7TUp@>ZK!5-N0t5&UAV7cs0Rja2Uz4m<#DI4A>@Kts0t5&U zAV7cs0RjXF5FkJxd4XN6S&fGQr;odiHbQ^^0RjXF5FkK+009C72qZ6XH9X|81?;0t5&UAV7cs0RjXF5J+C&xl+3*V!-(C>H0nS z^kE;n?~~G!$1NA2oNAZfB*pk1PBlyKw!DfD&-;uXfV}A2oNAZfB*pk1PBlyK!5;& zsErUHK!5-N0t5&UAV7cs0RqVjJXdNLMGP4KJzc*ipFRvEK!5-N0t5&UAV7cs0RjXr zA70z>FyJSASCcOv1`;4ZfB*pk1PBlyK!5-N0{yQ^Rw`mZJA8H*+6Vyx1PBlyK!5-N z0t5&UAdtMkuGXx^!+_JrT}K-sK!5-N0t5&UAV7cs0RjY)7q}W8^4J3QVbe&yd>BZ8 z009C72oNAZfB*pk1PCnGS*2XWfc(6g^^#8?1`;4ZfB*pk1PBlyK!5-N0&lJRdOQsH z9Sx@12mt~F2oNAZfB*pk1PBlyki5X{aXX1EU>;s>9<>ny1PBlyK!5-N0t5&UAV45_ zf#*u?qKEBZ8009C72oNAZfB*pk1PJuMCRwS70qyYF zU1%c&2oNAZfB*pk1PBlyK!8B<0=rtX8V>_bA9o#Xga82o1PBlyK!5-N0t5&UNM7J- zc*tW5*oRFc`SM{P0RjXF5FkK+009C72oNB!TxXSX5d-q`YSv3WeHci9009C72oNAZ zfB*pk1PHvf?(6X|;CD2bY9j;)5FkK+009C72oNAZfI#vBx5w=ywt#tfxp~w^2oNAZ zfB*pk1PBlyK!5;&Eo`WjSwI}fB*pk1PBlyK!5-N0?7+p4G(#20sF9NBws!ZBtU=w0RjXF z5FkK+009C7mg}riE@D7_Ud?*Rrw;=O5FkK+009C72oNAZfB=EF)_px52KwH*%we!_P(`SM{P0RjXF5FkK+009C7 z2oNC9|C(f_A_la>XLq5E5FkK+009C72oNAZfB*pk$qVdi&1yUhIDOo8v=IUX2oNAZ zfB*pk1PBlyKp=U6tKlJ!EnpuujpWORfdmK;AV7cs0RjXF5FkK+z;c~c%0&#w&#PH4 z`Sf8R0RjXF5FkK+009C72oNCf*1E69!+_t>V5*G}AV7cs0RjXF5FkK+009EY3)~*J zlh^|0;pOI08zDe|009C72oNAZfB*pk1dur!m5Lb94xinHHbQ^^0RjXF5FkK+009C72qZ7Ct2L|f zFyQoY*U?4@5FkK+009C72oNAZfB=Ey1+IpNJhp&+*ff$a9|jU2K!5-N0t5&UAV7cs z0Rqc)Rw)-TAV05Wz2wt}fdmK;AV7cs0RjXF5FkK+z+3CS9uEV4M}w(0LVy4P0t5&U zAV7cs0RjXFBrkA#+)iQ(n1`2}M{R@v0RjXF5FkK+009C72oOkK;JH$}C}P0)@9FwI z`Sf8R0RjXF5FkK+009C72oNA}`S99~hXFs~yPACYFpvNN0t5&UAV7cs0RjXF5a@qR zvQiNP+TpXi&_)OlAV7cs0RjXF5FkK+0Dgd{>h%9|jU2K!5-N0t5&UAV7cs0RsK6NmeRiKs$VP7upB`0t5&UAV7cs z0RjXF5Fn7ez^>M;#>0Tq$6ZGoAwYlt0RjXF5FkK+009C7k{7rd9`e`%_F>aVzI+%+ zfB*pk1PBlyK!5-N0t5&w*IA`p#DM&~n)Q-T9|jU2K!5-N0t5&UAV7cs0RnHW`+7VK z_#F+V+6Vyx1PBlyK!5-N0t5&UAdtMk?QuJaEnpsAZXUG}0t5&UAV7cs0RjXF5FkJx zd4cCj?V^YQmm$(IiU2@oJafB*pk1PBlyK!5;&L7>BB$*1PBlyK!5-N0t5&UAVA=)bzhH%0l%ZcR2v~cfB*pk1PBlyK!5-N z0tAv5xIJzsu?5V-%gv)ULVy4P0t5&UAV7cs0RjXFBrot>sa+H?VEp%V{hoaKFpvNN z0t5&UAV7cs0RjXF5V(AJZO6lapYUBxzI+%+fB*pk1PBlyK!5-N0t5*3zb09!hym^J z*;0t5&UAV7cs0RjXF5J+C& zYIw+F3)qKEBl+@SAOQjd2oNAZfB*pk1PBlyuv}-AauEaa^J>;hK7ANSfB*pk1PBly zK!5-N0t5)WweIWjFyMDIm}(;g2oNAZfB*pk1PBlyK!8B<0=LKQB({Kgc)5AhMhFlf zK!5-N0t5&UAV7csf#e0AE47Ov28{onuHTbS9|jU2K!5-N0t5&UAV7cs0Roo~ukCmk z@DskP$(IiU2@oJafB*pk1PBlyK!5;&{?{Zc6)~V4KD!HTga82o1PBlyK!5-N0t5&U zNM2x9YgXf7!0F?zqm2+CK!5-N0t5&UAV7cs0RqVjTn!I-YytbQX(V4h3?x8+009C7 z2oNAZfB*pk1eWWpQZ8aZeqPOb$)^tk2@oJafB*pk1PBlyK!5;&x7K|<9tQl522*W> z009C72oNAZfB*pk1PBmFUf}k)ox~O}4=*>5+6Vyx1PBlyK!5-N0t5&UAdtMkbES4s z#DMYN)Af7u>BB$*1PBlyK!5-N0t5&UAVA>q;k6wP1AfAHHTm*kAOQjd2oNAZfB*pk z1PBly(Eplbr6LBj!)JG)jSwI}fB*pk1PBlyK!5-N0?7;PYRzgq3^;wb zx0Bcc=Hcb$Q5zvZfB*pk1PBlyK!5-N0tAv5c&^kgiWo5dd%Av4K7ANSfB*pk1PBly zK!5-N0t5(LKD@T$VZcxLt|nhT3?x8+009C72oNAZfB*pk1o~f-tW?B+cKGZrv=IUX z2oNAZfB*pk1PBlyKp=U6U9DM-hXJRLyN)(OfB*pk1PBlyK!5-N0t5&oFK{(Hm{E)3?x8+009C72oNAZfB*pk z1m0Tr^>`TYI~q*25ds7V5FkK+009C72oNAZAbEk?<8~5Tz&yO%JZd8Z2oNAZfB*pk z1PBlyK!8B<0?(D&MG*tWe^1x%$)^tk2@oJafB*pk1PBlyK!5;&%ZJx?JPh~=-__*H zhk*nL5FkK+009C72oNAZfI$Cil9h@W&<>y7g*HNf009C72oNAZfB*pk1PCNAu&Xtz z@i5@@ao5pC2oNAZfB*pk1PBlyK!5;&yC`D7`0wfZ zJ^A!uAOQjd2oNAZfB*pk1PBlyaQX1sj)wt1;k%lA`7n?G0RjXF5FkK+009C72oUIh zO|nuE1KQ!UyU<1m5FkK+009C72oNAZfB=Ey1$MP&H68|>KJGf&2mt~F2oNAZfB*pk z1PBlyki5Xv@Q}wAun(I?^5w%o0t5&UAV7cs0RjXF5FkKcxy~x(A_nB=)vT9%`Y@0H z0RjXF5FkK+009C72oQK{-Phw`!0%`<)kX*qAV7cs0RjXF5FkK+0D$8zbBtQ3?x8+009C72oNAZfB*pk z1TG(5+wm~qCwy0vFCPXHAV7cs0RjXF5FkK+009F1uSr%aVn925b{E5ds7V5FkK+009C72oNAZ zAbEl3O6{VE0pq`?>-Xf-hk*nL5FkK+009C72oNAZfWYO$YdanW{Dkjn^5w%o0t5&U zAV7cs0RjXF5FkLH|24@t1aQe9G zXd?s&5FkK+009C72oNAZfI#vBSHnXdTfjbS8p)Rr0|^iyK!5-N0t5&UAV7csf#o`@ zl#3XUpI5V9^6A4s0t5&UAV7cs0RjXF5FkL{t#x0IhXKE%!BiU|K!5-N0t5&UAV7cs z0RjY)7q~rcC$R<0!^_R1HbQ^^0RjXF5FkK+009C72qZ7?T&Z0YF<|`nbp4)u`Y@0H z0RjXF5FkK+009C72oSh@cx}hSfS>SPO}=~>NPqwV0t5&UAV7cs0RjXF^uH!qsfYpX z@Y!8xBLoN#AV7cs0RjXF5FkK+K=J~+TC*At15O`z9c_dF0RjXF5FkK+009C72oOkK z;A(itV++`aO(Xg8VITnl1PBlyK!5-N0t5&UAh29#m2wdS^7CreOFn%VNPqwV0t5&U zAV7cs0RjXFytVG@@i5?bG?;251PBlyK!5-N0t5&UAV7dX@&dQV?IgBb)J6yp zAV7cs0RjXF5FkK+0D;0t5&UAV7cs0RjXF z5J+BNS8G<|VZiC*uA_|*AV7cs0RjXF5FkK+009EY3tSBkd29juuxTV;J`5y4fB*pk z1PBlyK!5-N0tA-ptWqvwKz?4$dda5`0|^iyK!5-N0t5&UAV7csfw$IuJst-9js{b0 zga82o1PBlyK!5-N0t5&UNM7LfxShlnFb^*`kJ<}t(wJPbH}+;y}O0t5&U zAV7cs0RjXF5FkJxd4a3pA&)I!A2yBT%ZGsk2oNAZfB*pk1PBlyK!CtH> z1PBlyK!5-N0t5&UAV45_fnBXxjfVlJkGqaGLVy4P0t5&UAV7cs0RjXFBrk9^Jmj$j z?8By!eEBet009C72oNAZfB*pk1PBmVuCq$HhynR|HR~mxJ`5y4fB*pk1PBlyK!5-N z0tDV#_w{%f@H-kzwGjdY2oNAZfB*pk1PBlyKp=U6+v9c;TfjWL+&pR{1PBlyK!5-N z0t5&UAV7dX@&eD5+C>oq#(z)O@5!eR0|^iyK!5-N0t5&UAV7csfy;;2c03ID3E$P^ z%ZGsk2oNAZfB*pk1PBlyK!8C1Ym$|U7|;%%-Gw$nfB*pk1PBlyK!5-N0t5&oFR-gM ztMM@4^l{hGMhFlfK!5-N0t5&UAV7csf#e0QhKD@1fPL6Bk}n?y5+Fc;009C72oNAZ zfB*pk%XL;M7cn3|uV%gE(}#fs2oNAZfB*pk1PBlyK!Cto>%JZj1Aa$?sWw7@009C7 z2oNAZfB*pk1PCNAaC_WNVhfmumzzgzga82o1PBlyK!5-N0t5&UNM7K%QoAT(!1(X! z`aSvdVITnl1PBlyK!5-N0t5&UAaMEc+Kz_-KjFKYeEBet009C72oNAZfB*pk1PBo5 ze@(Jd5d+%cv%Ann2oNAZfB*pk1PBlyK!5;&g1CUB4%vJ`5y4fB*pk1PBlyK!5-N z0t7A}Ufc07;3s@nlP@0z5+Fc;009C72oNAZfB*pk{jW(@Dq=u8e0CSw2mt~F2oNAZ zfB*pk1PBlyki5XI)~v?EfYZlaM;jqPfB*pk1PBlyK!5-N0tAv5xEdbv*aG%p(@4I2 z7)XEs0RjXF5FkK+009C72rSoGrCh{-{Jfg=l20E75+Fc;009C72oNAZfB*pkZ>{@! zJPi094W`-%0RjXF5FkK+009C72oNBUyuj^oJBck|9$s!9wGjdY2oNAZfB*pk1PBly zKp=U6=SuCOhymljr|b9R(}#fs2oNAZfB*pk1PBlyK!CvI!)rSp2Ku4hc2oNAZfB*pk1PBlyK!8B<0$0OB9$UaZY#Paz4+9AhAV7cs0RjXF5FkK+0D4~)=NHp7)XEs0RjXF z5FkK+009C72)woK>+vw)cQlx4BLoN#AV7cs0RjXF5FkK+K=J~&$L%DxfO&YidDKP- z5FkK+009C72oNAZfB=Ey1)eLliy{V$|DLYjlTRN85+Fc;009C72oNAZfB*pkmk+P) zco^^#zN^WX4+9AhAV7cs0RjXF5FkK+0D=D3Br6p$pdCKD3vGk|0RjXF5FkK+009C7 z2oOkKU{`Ba<6*$*`><&wUp@>ZK!5-N z0t5&UAV7cs0RjY;>#R~PVnBXg&3ehF4+9AhAV7cs0RjXF5FkK+0D-sGeLWrq{Eh}w zZG->;0t5&UAV7cs0RjXF5J+C&_PCwI7BCMlH;>u~0RjXF5FkK+009C72oNBUyufp% zc2UHD@!!++d-CbSKmr5^5FkK+009C72oNAZ;PT`P(fZx$zs*MmJK!5-N0t5&UAV7cs0RqVj z+#a`+*aGI^<>pZvAwYlt0RjXF5FkK+009C7k{5Wc)Gmq`F#dbGeosDq7)XEs0RjXF z5FkK+009C72wXnAw&P*IPx!7TUp@>ZK!5-N0t5&UAV7cs0Rja2Uz4m<#DI4A>@Kts z0t5&UAV7cs0RjXF5FkJxd4XN6S&fGQr;odiHbQ^^0RjXF5FkK+009C72qZ6XH9X|8 z1?;0t5&UAV7cs0RjXF5J+C&xl+3*V!-(C z>H0nS^kE;n?~~G!$1NA2oNAZfB*pk1PBlyKw!DfD&-;uXfV}A2oNAZfB*pk1PBlyK!5;&sErUHK!5-N0t5&UAV7cs0RqVjJXdNLMGP4KJzc*ipFRvEK!5-N0t5&UAV7cs z0RjXrA70z>FyJSASCcOv1`;4ZfB*pk1PBlyK!5-N0{yQ^Rw`mZJA8H*+6Vyx1PBly zK!5-N0t5&UAdtMkuGXx^!+_JrT}K-sK!5-N0t5&UAV7cs0RjY)7q}W8^4J3QVbe&y zd>BZ8009C72oNAZfB*pk1PCnGS*2XWfc(6g^^#8?1`;4ZfB*pk1PBlyK!5-N0&lJR zdOQsH9Sx@12mt~F2oNAZfB*pk1PBlyki5X{aXX1EU>;s>9<>ny1PBlyK!5-N0t5&U zAV45_f#*u?qKEBZ8009C72oNAZfB*pk1PJuMCRwS7 z0qyYFU1%c&2oNAZfB*pk1PBlyK!8B<0=rtX8V>_bA9o#Xga82o1PBlyK!5-N0t5&U zNM7J-c*tW5*oRFc`SM{P0RjXF5FkK+009C72oNB!TxXSX5d-q`YSv3WeHci9009C7 z2oNAZfB*pk1PHvf?(6X|;CD2bY9j;)5FkK+009C72oNAZfI#vBx5w=ywt#tfxp~w^ z2oNAZfB*pk1PBlyK!5;&Eo`WjSwI}fB*pk1PBlyK!5-N0?7+p4G(#20sF9NBws!ZBtU=w z0RjXF5FkK+009C7mg}riE@D7_Ud?*Rrw;=O5FkK+009C72oNAZfB=EF)_px52KwH*%we!_P(`SM{P0RjXF5FkK+ z009C72oNC9|C(f_A_la>XLq5E5FkK+009C72oNAZfB*pk$qVdi&1yUhIDOo8v=IUX z2oNAZfB*pk1PBlyKp=U6tKlJ!EnpuujpWORfdmK;AV7cs0RjXF5FkK+z;c~c%0&#w z&#PH4`Sf8R0RjXF5FkK+009C72oNCf*1E69!+_t>V5*G}AV7cs0RjXF5FkK+009EY z3)~*Jlh^|0;pOI08zDe|009C72oNAZfB*pk1dur!m5Lb94xinHHbQ^^0RjXF5FkK+009C72qZ7C zt2L|fFyQoY*U?4@5FkK+009C72oNAZfB=Ey1+IpNJhp&+*ff$a9|jU2K!5-N0t5&U zAV7cs0Rqc)Rw)-TAV05Wz2wt}fdmK;AV7cs0RjXF5FkK+z+3CS9uEV4M}w(0LVy4P z0t5&UAV7cs0RjXFBrkA#+)iQ(n1`2}M{R@v0RjXF5FkK+009C72oOkK;JH$}C}P0) z@9FwI`Sf8R0RjXF5FkK+009C72oNA}`S99~hXFs~yPACYFpvNN0t5&UAV7cs0RjXF z5a@qRvQiNP+TpXi&_)OlAV7cs0RjXF5FkK+0D:1234"}, - } - for _, tt := range tests { - t.Run(tt.result, func(t *testing.T) { - require.Equal(t, tt.result, tt.ipPort.String()) - }) - } -} - -func TestToIPPortError(t *testing.T) { - tests := []struct { - in string - out IPPort - expectedErr error - }{ - { - in: "", - out: IPPort{}, - expectedErr: errBadIP, - }, - { - in: ":", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: "abc:", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: ":abc", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: "abc:abc", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: "127.0.0.1:", - out: IPPort{}, - expectedErr: strconv.ErrSyntax, - }, - { - in: ":1", - out: IPPort{}, - expectedErr: errBadIP, - }, - { - in: "::1", - out: IPPort{}, - expectedErr: errBadIP, - }, - { - in: "::1:42", - out: IPPort{}, - expectedErr: errBadIP, - }, - } - for _, tt := range tests { - t.Run(tt.in, func(t *testing.T) { - require := require.New(t) - - result, err := ToIPPort(tt.in) - require.ErrorIs(err, tt.expectedErr) - require.Equal(tt.out, result) - }) - } -} - -func TestToIPPort(t *testing.T) { - tests := []struct { - in string - out IPPort - }{ - {"127.0.0.1:42", IPPort{net.ParseIP("127.0.0.1"), 42}}, - {"[::1]:42", IPPort{net.ParseIP("::1"), 42}}, - } - for _, tt := range tests { - t.Run(tt.in, func(t *testing.T) { - require := require.New(t) - - result, err := ToIPPort(tt.in) - require.NoError(err) - require.Equal(tt.out, result) - }) - } -} diff --git a/utils/ips/lookup.go b/utils/ips/lookup.go index cdf9176f9568..cf4158d233b3 100644 --- a/utils/ips/lookup.go +++ b/utils/ips/lookup.go @@ -6,6 +6,7 @@ package ips import ( "errors" "net" + "net/netip" ) var errNoIPsFound = errors.New("no IPs found") @@ -15,20 +16,22 @@ var errNoIPsFound = errors.New("no IPs found") // pick any of the IPs. // // Note: IPv4 is preferred because `net.Listen` prefers IPv4. -func Lookup(hostname string) (net.IP, error) { +func Lookup(hostname string) (netip.Addr, error) { ips, err := net.LookupIP(hostname) if err != nil { - return nil, err + return netip.Addr{}, err } if len(ips) == 0 { - return nil, errNoIPsFound + return netip.Addr{}, errNoIPsFound } for _, ip := range ips { ipv4 := ip.To4() if ipv4 != nil { - return ipv4, nil + addr, _ := AddrFromSlice(ipv4) + return addr, nil } } - return ips[0], nil + addr, _ := AddrFromSlice(ips[0]) + return addr, nil } diff --git a/utils/ips/lookup_test.go b/utils/ips/lookup_test.go index 9fecccc54593..4f5621dfce7b 100644 --- a/utils/ips/lookup_test.go +++ b/utils/ips/lookup_test.go @@ -4,7 +4,7 @@ package ips import ( - "net" + "net/netip" "testing" "github.com/stretchr/testify/require" @@ -13,23 +13,23 @@ import ( func TestLookup(t *testing.T) { tests := []struct { host string - ip net.IP + ip netip.Addr }{ { host: "127.0.0.1", - ip: net.ParseIP("127.0.0.1").To4(), + ip: netip.AddrFrom4([4]byte{127, 0, 0, 1}), }, { host: "localhost", - ip: net.ParseIP("127.0.0.1").To4(), + ip: netip.AddrFrom4([4]byte{127, 0, 0, 1}), }, { host: "::", - ip: net.IPv6zero, + ip: netip.IPv6Unspecified(), }, { host: "0.0.0.0", - ip: net.ParseIP("0.0.0.0").To4(), + ip: netip.IPv4Unspecified(), }, } for _, tt := range tests { diff --git a/utils/linked/hashmap.go b/utils/linked/hashmap.go new file mode 100644 index 000000000000..9e85e22f0081 --- /dev/null +++ b/utils/linked/hashmap.go @@ -0,0 +1,166 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package linked + +import "github.com/ava-labs/avalanchego/utils" + +type keyValue[K, V any] struct { + key K + value V +} + +// Hashmap provides an ordered O(1) mapping from keys to values. +// +// Entries are tracked by insertion order. +type Hashmap[K comparable, V any] struct { + entryMap map[K]*ListElement[keyValue[K, V]] + entryList *List[keyValue[K, V]] + freeList []*ListElement[keyValue[K, V]] +} + +func NewHashmap[K comparable, V any]() *Hashmap[K, V] { + return NewHashmapWithSize[K, V](0) +} + +func NewHashmapWithSize[K comparable, V any](initialSize int) *Hashmap[K, V] { + lh := &Hashmap[K, V]{ + entryMap: make(map[K]*ListElement[keyValue[K, V]], initialSize), + entryList: NewList[keyValue[K, V]](), + freeList: make([]*ListElement[keyValue[K, V]], initialSize), + } + for i := range lh.freeList { + lh.freeList[i] = &ListElement[keyValue[K, V]]{} + } + return lh +} + +func (lh *Hashmap[K, V]) Put(key K, value V) { + if e, ok := lh.entryMap[key]; ok { + lh.entryList.MoveToBack(e) + e.Value = keyValue[K, V]{ + key: key, + value: value, + } + return + } + + var e *ListElement[keyValue[K, V]] + if numFree := len(lh.freeList); numFree > 0 { + numFree-- + e = lh.freeList[numFree] + lh.freeList = lh.freeList[:numFree] + } else { + e = &ListElement[keyValue[K, V]]{} + } + + e.Value = keyValue[K, V]{ + key: key, + value: value, + } + lh.entryMap[key] = e + lh.entryList.PushBack(e) +} + +func (lh *Hashmap[K, V]) Get(key K) (V, bool) { + if e, ok := lh.entryMap[key]; ok { + return e.Value.value, true + } + return utils.Zero[V](), false +} + +func (lh *Hashmap[K, V]) Delete(key K) bool { + e, ok := lh.entryMap[key] + if ok { + lh.remove(e) + } + return ok +} + +func (lh *Hashmap[K, V]) Clear() { + for _, e := range lh.entryMap { + lh.remove(e) + } +} + +// remove assumes that [e] is currently in the Hashmap. +func (lh *Hashmap[K, V]) remove(e *ListElement[keyValue[K, V]]) { + delete(lh.entryMap, e.Value.key) + lh.entryList.Remove(e) + e.Value = keyValue[K, V]{} // Free the key value pair + lh.freeList = append(lh.freeList, e) +} + +func (lh *Hashmap[K, V]) Len() int { + return len(lh.entryMap) +} + +func (lh *Hashmap[K, V]) Oldest() (K, V, bool) { + if e := lh.entryList.Front(); e != nil { + return e.Value.key, e.Value.value, true + } + return utils.Zero[K](), utils.Zero[V](), false +} + +func (lh *Hashmap[K, V]) Newest() (K, V, bool) { + if e := lh.entryList.Back(); e != nil { + return e.Value.key, e.Value.value, true + } + return utils.Zero[K](), utils.Zero[V](), false +} + +func (lh *Hashmap[K, V]) NewIterator() *Iterator[K, V] { + return &Iterator[K, V]{lh: lh} +} + +// Iterates over the keys and values in a LinkedHashmap from oldest to newest. +// Assumes the underlying LinkedHashmap is not modified while the iterator is in +// use, except to delete elements that have already been iterated over. +type Iterator[K comparable, V any] struct { + lh *Hashmap[K, V] + key K + value V + next *ListElement[keyValue[K, V]] + initialized, exhausted bool +} + +func (it *Iterator[K, V]) Next() bool { + // If the iterator has been exhausted, there is no next value. + if it.exhausted { + it.key = utils.Zero[K]() + it.value = utils.Zero[V]() + it.next = nil + return false + } + + // If the iterator was not yet initialized, do it now. + if !it.initialized { + it.initialized = true + oldest := it.lh.entryList.Front() + if oldest == nil { + it.exhausted = true + it.key = utils.Zero[K]() + it.value = utils.Zero[V]() + it.next = nil + return false + } + it.next = oldest + } + + // It's important to ensure that [it.next] is not nil + // by not deleting elements that have not yet been iterated + // over from [it.lh] + it.key = it.next.Value.key + it.value = it.next.Value.value + it.next = it.next.Next() // Next time, return next element + it.exhausted = it.next == nil + return true +} + +func (it *Iterator[K, V]) Key() K { + return it.key +} + +func (it *Iterator[K, V]) Value() V { + return it.value +} diff --git a/utils/linkedhashmap/linkedhashmap_test.go b/utils/linked/hashmap_test.go similarity index 82% rename from utils/linkedhashmap/linkedhashmap_test.go rename to utils/linked/hashmap_test.go index 372bd24baa4c..25131888dcbc 100644 --- a/utils/linkedhashmap/linkedhashmap_test.go +++ b/utils/linked/hashmap_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package linkedhashmap +package linked import ( "testing" @@ -11,10 +11,10 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestLinkedHashmap(t *testing.T) { +func TestHashmap(t *testing.T) { require := require.New(t) - lh := New[ids.ID, int]() + lh := NewHashmap[ids.ID, int]() require.Zero(lh.Len(), "a new hashmap should be empty") key0 := ids.GenerateTestID() @@ -95,13 +95,30 @@ func TestLinkedHashmap(t *testing.T) { require.Equal(1, val1, "wrong value") } +func TestHashmapClear(t *testing.T) { + require := require.New(t) + + lh := NewHashmap[int, int]() + lh.Put(1, 1) + lh.Put(2, 2) + + lh.Clear() + + require.Empty(lh.entryMap) + require.Zero(lh.entryList.Len()) + require.Len(lh.freeList, 2) + for _, e := range lh.freeList { + require.Zero(e.Value) // Make sure the value is cleared + } +} + func TestIterator(t *testing.T) { require := require.New(t) id1, id2, id3 := ids.GenerateTestID(), ids.GenerateTestID(), ids.GenerateTestID() // Case: No elements { - lh := New[ids.ID, int]() + lh := NewHashmap[ids.ID, int]() iter := lh.NewIterator() require.NotNil(iter) // Should immediately be exhausted @@ -114,7 +131,7 @@ func TestIterator(t *testing.T) { // Case: 1 element { - lh := New[ids.ID, int]() + lh := NewHashmap[ids.ID, int]() iter := lh.NewIterator() require.NotNil(iter) lh.Put(id1, 1) @@ -141,7 +158,7 @@ func TestIterator(t *testing.T) { // Case: Multiple elements { - lh := New[ids.ID, int]() + lh := NewHashmap[ids.ID, int]() lh.Put(id1, 1) lh.Put(id2, 2) lh.Put(id3, 3) @@ -162,7 +179,7 @@ func TestIterator(t *testing.T) { // Case: Delete element that has been iterated over { - lh := New[ids.ID, int]() + lh := NewHashmap[ids.ID, int]() lh.Put(id1, 1) lh.Put(id2, 2) lh.Put(id3, 3) @@ -178,3 +195,28 @@ func TestIterator(t *testing.T) { require.False(iter.Next()) } } + +func Benchmark_Hashmap_Put(b *testing.B) { + key := "hello" + value := "world" + + lh := NewHashmap[string, string]() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + lh.Put(key, value) + } +} + +func Benchmark_Hashmap_PutDelete(b *testing.B) { + key := "hello" + value := "world" + + lh := NewHashmap[string, string]() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + lh.Put(key, value) + lh.Delete(key) + } +} diff --git a/utils/linked/list.go b/utils/linked/list.go new file mode 100644 index 000000000000..4a7f3eb0a421 --- /dev/null +++ b/utils/linked/list.go @@ -0,0 +1,217 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package linked + +// ListElement is an element of a linked list. +type ListElement[T any] struct { + next, prev *ListElement[T] + list *List[T] + Value T +} + +// Next returns the next element or nil. +func (e *ListElement[T]) Next() *ListElement[T] { + if p := e.next; e.list != nil && p != &e.list.sentinel { + return p + } + return nil +} + +// Prev returns the previous element or nil. +func (e *ListElement[T]) Prev() *ListElement[T] { + if p := e.prev; e.list != nil && p != &e.list.sentinel { + return p + } + return nil +} + +// List implements a doubly linked list with a sentinel node. +// +// See: https://en.wikipedia.org/wiki/Doubly_linked_list +// +// This datastructure is designed to be an almost complete drop-in replacement +// for the standard library's "container/list". +// +// The primary design change is to remove all memory allocations from the list +// definition. This allows these lists to be used in performance critical paths. +// Additionally the zero value is not useful. Lists must be created with the +// NewList method. +type List[T any] struct { + // sentinel is only used as a placeholder to avoid complex nil checks. + // sentinel.Value is never used. + sentinel ListElement[T] + length int +} + +// NewList creates a new doubly linked list. +func NewList[T any]() *List[T] { + l := &List[T]{} + l.sentinel.next = &l.sentinel + l.sentinel.prev = &l.sentinel + l.sentinel.list = l + return l +} + +// Len returns the number of elements in l. +func (l *List[_]) Len() int { + return l.length +} + +// Front returns the element at the front of l. +// If l is empty, nil is returned. +func (l *List[T]) Front() *ListElement[T] { + if l.length == 0 { + return nil + } + return l.sentinel.next +} + +// Back returns the element at the back of l. +// If l is empty, nil is returned. +func (l *List[T]) Back() *ListElement[T] { + if l.length == 0 { + return nil + } + return l.sentinel.prev +} + +// Remove removes e from l if e is in l. +func (l *List[T]) Remove(e *ListElement[T]) { + if e.list != l { + return + } + + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil + e.prev = nil + e.list = nil + l.length-- +} + +// PushFront inserts e at the front of l. +// If e is already in a list, l is not modified. +func (l *List[T]) PushFront(e *ListElement[T]) { + l.insertAfter(e, &l.sentinel) +} + +// PushBack inserts e at the back of l. +// If e is already in a list, l is not modified. +func (l *List[T]) PushBack(e *ListElement[T]) { + l.insertAfter(e, l.sentinel.prev) +} + +// InsertBefore inserts e immediately before location. +// If e is already in a list, l is not modified. +// If location is not in l, l is not modified. +func (l *List[T]) InsertBefore(e *ListElement[T], location *ListElement[T]) { + if location.list == l { + l.insertAfter(e, location.prev) + } +} + +// InsertAfter inserts e immediately after location. +// If e is already in a list, l is not modified. +// If location is not in l, l is not modified. +func (l *List[T]) InsertAfter(e *ListElement[T], location *ListElement[T]) { + if location.list == l { + l.insertAfter(e, location) + } +} + +// MoveToFront moves e to the front of l. +// If e is not in l, l is not modified. +func (l *List[T]) MoveToFront(e *ListElement[T]) { + // If e is already at the front of l, there is nothing to do. + if e != l.sentinel.next { + l.moveAfter(e, &l.sentinel) + } +} + +// MoveToBack moves e to the back of l. +// If e is not in l, l is not modified. +func (l *List[T]) MoveToBack(e *ListElement[T]) { + l.moveAfter(e, l.sentinel.prev) +} + +// MoveBefore moves e immediately before location. +// If the elements are equal or not in l, the list is not modified. +func (l *List[T]) MoveBefore(e, location *ListElement[T]) { + // Don't introduce a cycle by moving an element before itself. + if e != location { + l.moveAfter(e, location.prev) + } +} + +// MoveAfter moves e immediately after location. +// If the elements are equal or not in l, the list is not modified. +func (l *List[T]) MoveAfter(e, location *ListElement[T]) { + l.moveAfter(e, location) +} + +func (l *List[T]) insertAfter(e, location *ListElement[T]) { + if e.list != nil { + // Don't insert an element that is already in a list + return + } + + e.prev = location + e.next = location.next + e.prev.next = e + e.next.prev = e + e.list = l + l.length++ +} + +func (l *List[T]) moveAfter(e, location *ListElement[T]) { + if e.list != l || location.list != l || e == location { + // Don't modify an element that is in a different list. + // Don't introduce a cycle by moving an element after itself. + return + } + + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = location + e.next = location.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts v into a new element at the front of l. +func PushFront[T any](l *List[T], v T) { + l.PushFront(&ListElement[T]{ + Value: v, + }) +} + +// PushBack inserts v into a new element at the back of l. +func PushBack[T any](l *List[T], v T) { + l.PushBack(&ListElement[T]{ + Value: v, + }) +} + +// InsertBefore inserts v into a new element immediately before location. +// If location is not in l, l is not modified. +func InsertBefore[T any](l *List[T], v T, location *ListElement[T]) { + l.InsertBefore( + &ListElement[T]{ + Value: v, + }, + location, + ) +} + +// InsertAfter inserts v into a new element immediately after location. +// If location is not in l, l is not modified. +func InsertAfter[T any](l *List[T], v T, location *ListElement[T]) { + l.InsertAfter( + &ListElement[T]{ + Value: v, + }, + location, + ) +} diff --git a/utils/linked/list_test.go b/utils/linked/list_test.go new file mode 100644 index 000000000000..9618ccb379d7 --- /dev/null +++ b/utils/linked/list_test.go @@ -0,0 +1,168 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package linked + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func flattenForwards[T any](l *List[T]) []T { + var s []T + for e := l.Front(); e != nil; e = e.Next() { + s = append(s, e.Value) + } + return s +} + +func flattenBackwards[T any](l *List[T]) []T { + var s []T + for e := l.Back(); e != nil; e = e.Prev() { + s = append(s, e.Value) + } + return s +} + +func TestList_Empty(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + require.Empty(flattenForwards(l)) + require.Empty(flattenBackwards(l)) + require.Zero(l.Len()) +} + +func TestList_PushBack(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + for i := 0; i < 5; i++ { + l.PushBack(&ListElement[int]{ + Value: i, + }) + } + + require.Equal([]int{0, 1, 2, 3, 4}, flattenForwards(l)) + require.Equal([]int{4, 3, 2, 1, 0}, flattenBackwards(l)) + require.Equal(5, l.Len()) +} + +func TestList_PushBack_Duplicate(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + e := &ListElement[int]{ + Value: 0, + } + l.PushBack(e) + l.PushBack(e) + + require.Equal([]int{0}, flattenForwards(l)) + require.Equal([]int{0}, flattenBackwards(l)) + require.Equal(1, l.Len()) +} + +func TestList_PushFront(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + for i := 0; i < 5; i++ { + l.PushFront(&ListElement[int]{ + Value: i, + }) + } + + require.Equal([]int{4, 3, 2, 1, 0}, flattenForwards(l)) + require.Equal([]int{0, 1, 2, 3, 4}, flattenBackwards(l)) + require.Equal(5, l.Len()) +} + +func TestList_PushFront_Duplicate(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + e := &ListElement[int]{ + Value: 0, + } + l.PushFront(e) + l.PushFront(e) + + require.Equal([]int{0}, flattenForwards(l)) + require.Equal([]int{0}, flattenBackwards(l)) + require.Equal(1, l.Len()) +} + +func TestList_Remove(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + e0 := &ListElement[int]{ + Value: 0, + } + e1 := &ListElement[int]{ + Value: 1, + } + e2 := &ListElement[int]{ + Value: 2, + } + l.PushBack(e0) + l.PushBack(e1) + l.PushBack(e2) + + l.Remove(e1) + + require.Equal([]int{0, 2}, flattenForwards(l)) + require.Equal([]int{2, 0}, flattenBackwards(l)) + require.Equal(2, l.Len()) + require.Nil(e1.next) + require.Nil(e1.prev) + require.Nil(e1.list) +} + +func TestList_MoveToFront(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + e0 := &ListElement[int]{ + Value: 0, + } + e1 := &ListElement[int]{ + Value: 1, + } + l.PushFront(e0) + l.PushFront(e1) + l.MoveToFront(e0) + + require.Equal([]int{0, 1}, flattenForwards(l)) + require.Equal([]int{1, 0}, flattenBackwards(l)) + require.Equal(2, l.Len()) +} + +func TestList_MoveToBack(t *testing.T) { + require := require.New(t) + + l := NewList[int]() + + e0 := &ListElement[int]{ + Value: 0, + } + e1 := &ListElement[int]{ + Value: 1, + } + l.PushFront(e0) + l.PushFront(e1) + l.MoveToBack(e1) + + require.Equal([]int{0, 1}, flattenForwards(l)) + require.Equal([]int{1, 0}, flattenBackwards(l)) + require.Equal(2, l.Len()) +} diff --git a/utils/linkedhashmap/iterator.go b/utils/linkedhashmap/iterator.go deleted file mode 100644 index a2869aac2a54..000000000000 --- a/utils/linkedhashmap/iterator.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package linkedhashmap - -import ( - "container/list" - - "github.com/ava-labs/avalanchego/utils" -) - -var _ Iter[int, struct{}] = (*iterator[int, struct{}])(nil) - -// Iterates over the keys and values in a LinkedHashmap -// from oldest to newest elements. -// Assumes the underlying LinkedHashmap is not modified while -// the iterator is in use, except to delete elements that -// have already been iterated over. -type Iter[K, V any] interface { - Next() bool - Key() K - Value() V -} - -type iterator[K comparable, V any] struct { - lh *linkedHashmap[K, V] - key K - value V - next *list.Element - initialized, exhausted bool -} - -func (it *iterator[K, V]) Next() bool { - // If the iterator has been exhausted, there is no next value. - if it.exhausted { - it.key = utils.Zero[K]() - it.value = utils.Zero[V]() - it.next = nil - return false - } - - it.lh.lock.RLock() - defer it.lh.lock.RUnlock() - - // If the iterator was not yet initialized, do it now. - if !it.initialized { - it.initialized = true - oldest := it.lh.entryList.Front() - if oldest == nil { - it.exhausted = true - it.key = utils.Zero[K]() - it.value = utils.Zero[V]() - it.next = nil - return false - } - it.next = oldest - } - - // It's important to ensure that [it.next] is not nil - // by not deleting elements that have not yet been iterated - // over from [it.lh] - kv := it.next.Value.(keyValue[K, V]) - it.key = kv.key - it.value = kv.value - it.next = it.next.Next() // Next time, return next element - it.exhausted = it.next == nil - return true -} - -func (it *iterator[K, V]) Key() K { - return it.key -} - -func (it *iterator[K, V]) Value() V { - return it.value -} diff --git a/utils/linkedhashmap/linkedhashmap.go b/utils/linkedhashmap/linkedhashmap.go deleted file mode 100644 index 9ae5b83ad7ae..000000000000 --- a/utils/linkedhashmap/linkedhashmap.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package linkedhashmap - -import ( - "container/list" - "sync" - - "github.com/ava-labs/avalanchego/utils" -) - -var _ LinkedHashmap[int, struct{}] = (*linkedHashmap[int, struct{}])(nil) - -// Hashmap provides an O(1) mapping from a comparable key to any value. -// Comparable is defined by https://golang.org/ref/spec#Comparison_operators. -type Hashmap[K, V any] interface { - Put(key K, val V) - Get(key K) (val V, exists bool) - Delete(key K) (deleted bool) - Len() int -} - -// LinkedHashmap is a hashmap that keeps track of the oldest pairing an the -// newest pairing. -type LinkedHashmap[K, V any] interface { - Hashmap[K, V] - - Oldest() (key K, val V, exists bool) - Newest() (key K, val V, exists bool) - NewIterator() Iter[K, V] -} - -type keyValue[K, V any] struct { - key K - value V -} - -type linkedHashmap[K comparable, V any] struct { - lock sync.RWMutex - entryMap map[K]*list.Element - entryList *list.List -} - -func New[K comparable, V any]() LinkedHashmap[K, V] { - return &linkedHashmap[K, V]{ - entryMap: make(map[K]*list.Element), - entryList: list.New(), - } -} - -func (lh *linkedHashmap[K, V]) Put(key K, val V) { - lh.lock.Lock() - defer lh.lock.Unlock() - - lh.put(key, val) -} - -func (lh *linkedHashmap[K, V]) Get(key K) (V, bool) { - lh.lock.RLock() - defer lh.lock.RUnlock() - - return lh.get(key) -} - -func (lh *linkedHashmap[K, V]) Delete(key K) bool { - lh.lock.Lock() - defer lh.lock.Unlock() - - return lh.delete(key) -} - -func (lh *linkedHashmap[K, V]) Len() int { - lh.lock.RLock() - defer lh.lock.RUnlock() - - return lh.len() -} - -func (lh *linkedHashmap[K, V]) Oldest() (K, V, bool) { - lh.lock.RLock() - defer lh.lock.RUnlock() - - return lh.oldest() -} - -func (lh *linkedHashmap[K, V]) Newest() (K, V, bool) { - lh.lock.RLock() - defer lh.lock.RUnlock() - - return lh.newest() -} - -func (lh *linkedHashmap[K, V]) put(key K, value V) { - if e, ok := lh.entryMap[key]; ok { - lh.entryList.MoveToBack(e) - e.Value = keyValue[K, V]{ - key: key, - value: value, - } - } else { - lh.entryMap[key] = lh.entryList.PushBack(keyValue[K, V]{ - key: key, - value: value, - }) - } -} - -func (lh *linkedHashmap[K, V]) get(key K) (V, bool) { - if e, ok := lh.entryMap[key]; ok { - kv := e.Value.(keyValue[K, V]) - return kv.value, true - } - return utils.Zero[V](), false -} - -func (lh *linkedHashmap[K, V]) delete(key K) bool { - e, ok := lh.entryMap[key] - if ok { - lh.entryList.Remove(e) - delete(lh.entryMap, key) - } - return ok -} - -func (lh *linkedHashmap[K, V]) len() int { - return len(lh.entryMap) -} - -func (lh *linkedHashmap[K, V]) oldest() (K, V, bool) { - if val := lh.entryList.Front(); val != nil { - kv := val.Value.(keyValue[K, V]) - return kv.key, kv.value, true - } - return utils.Zero[K](), utils.Zero[V](), false -} - -func (lh *linkedHashmap[K, V]) newest() (K, V, bool) { - if val := lh.entryList.Back(); val != nil { - kv := val.Value.(keyValue[K, V]) - return kv.key, kv.value, true - } - return utils.Zero[K](), utils.Zero[V](), false -} - -func (lh *linkedHashmap[K, V]) NewIterator() Iter[K, V] { - return &iterator[K, V]{lh: lh} -} diff --git a/utils/logging/logger.go b/utils/logging/logger.go index 2ca95bff104c..f6b3b66a77b8 100644 --- a/utils/logging/logger.go +++ b/utils/logging/logger.go @@ -9,6 +9,10 @@ import ( "go.uber.org/zap" ) +// Func defines the method signature used for all logging methods on the Logger +// interface. +type Func func(msg string, fields ...zap.Field) + // Logger defines the interface that is used to keep a record of all events that // happen to the program type Logger interface { diff --git a/utils/metric/api_interceptor.go b/utils/metric/api_interceptor.go index 7d970b22b833..50027fde1478 100644 --- a/utils/metric/api_interceptor.go +++ b/utils/metric/api_interceptor.go @@ -29,27 +29,24 @@ type apiInterceptor struct { requestErrors *prometheus.CounterVec } -func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APIInterceptor, error) { +func NewAPIInterceptor(registerer prometheus.Registerer) (APIInterceptor, error) { requestDurationCount := prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "request_duration_count", - Help: "Number of times this type of request was made", + Name: "request_duration_count", + Help: "Number of times this type of request was made", }, []string{"method"}, ) requestDurationSum := prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "request_duration_sum", - Help: "Amount of time in nanoseconds that has been spent handling this type of request", + Name: "request_duration_sum", + Help: "Amount of time in nanoseconds that has been spent handling this type of request", }, []string{"method"}, ) requestErrors := prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespace, - Name: "request_error_count", + Name: "request_error_count", }, []string{"method"}, ) diff --git a/utils/metric/averager.go b/utils/metric/averager.go index e63e0007c0b8..d84e7875276a 100644 --- a/utils/metric/averager.go +++ b/utils/metric/averager.go @@ -23,23 +23,21 @@ type averager struct { sum prometheus.Gauge } -func NewAverager(namespace, name, desc string, reg prometheus.Registerer) (Averager, error) { +func NewAverager(name, desc string, reg prometheus.Registerer) (Averager, error) { errs := wrappers.Errs{} - a := NewAveragerWithErrs(namespace, name, desc, reg, &errs) + a := NewAveragerWithErrs(name, desc, reg, &errs) return a, errs.Err } -func NewAveragerWithErrs(namespace, name, desc string, reg prometheus.Registerer, errs *wrappers.Errs) Averager { +func NewAveragerWithErrs(name, desc string, reg prometheus.Registerer, errs *wrappers.Errs) Averager { a := averager{ count: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: name + "_count", - Help: "Total # of observations of " + desc, + Name: AppendNamespace(name, "count"), + Help: "Total # of observations of " + desc, }), sum: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: name + "_sum", - Help: "Sum of " + desc, + Name: AppendNamespace(name, "sum"), + Help: "Sum of " + desc, }), } diff --git a/utils/metric/namespace.go b/utils/metric/namespace.go index 4371bb1dc077..8d80a86266f4 100644 --- a/utils/metric/namespace.go +++ b/utils/metric/namespace.go @@ -5,6 +5,11 @@ package metric import "strings" +const ( + NamespaceSeparatorByte = '_' + NamespaceSeparator = string(NamespaceSeparatorByte) +) + func AppendNamespace(prefix, suffix string) string { switch { case len(prefix) == 0: @@ -12,6 +17,6 @@ func AppendNamespace(prefix, suffix string) string { case len(suffix) == 0: return prefix default: - return strings.Join([]string{prefix, suffix}, "_") + return strings.Join([]string{prefix, suffix}, NamespaceSeparator) } } diff --git a/utils/resource/metrics.go b/utils/resource/metrics.go index 3ce87ade258c..42d12f1ccc74 100644 --- a/utils/resource/metrics.go +++ b/utils/resource/metrics.go @@ -17,45 +17,40 @@ type metrics struct { numDiskWritesBytes *prometheus.GaugeVec } -func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { +func newMetrics(registerer prometheus.Registerer) (*metrics, error) { m := &metrics{ numCPUCycles: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_cpu_cycles", - Help: "Total number of CPU cycles", + Name: "num_cpu_cycles", + Help: "Total number of CPU cycles", }, []string{"processID"}, ), numDiskReads: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_reads", - Help: "Total number of disk reads", + Name: "num_disk_reads", + Help: "Total number of disk reads", }, []string{"processID"}, ), numDiskReadBytes: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_read_bytes", - Help: "Total number of disk read bytes", + Name: "num_disk_read_bytes", + Help: "Total number of disk read bytes", }, []string{"processID"}, ), numDiskWrites: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_writes", - Help: "Total number of disk writes", + Name: "num_disk_writes", + Help: "Total number of disk writes", }, []string{"processID"}, ), numDiskWritesBytes: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "num_disk_write_bytes", - Help: "Total number of disk write bytes", + Name: "num_disk_write_bytes", + Help: "Total number of disk write bytes", }, []string{"processID"}, ), diff --git a/utils/resource/usage.go b/utils/resource/usage.go index 32a9d1965c90..32ffbfe4aa85 100644 --- a/utils/resource/usage.go +++ b/utils/resource/usage.go @@ -94,7 +94,7 @@ func NewManager( diskHalflife time.Duration, metricsRegisterer prometheus.Registerer, ) (Manager, error) { - processMetrics, err := newMetrics("system_resources", metricsRegisterer) + processMetrics, err := newMetrics(metricsRegisterer) if err != nil { return nil, err } diff --git a/utils/sampler/uniform.go b/utils/sampler/uniform.go index 5ae9a21d8822..5cdf20bc5125 100644 --- a/utils/sampler/uniform.go +++ b/utils/sampler/uniform.go @@ -7,12 +7,12 @@ package sampler type Uniform interface { Initialize(sampleRange uint64) // Sample returns length numbers in the range [0,sampleRange). If there - // aren't enough numbers in the range, an error is returned. If length is + // aren't enough numbers in the range, false is returned. If length is // negative the implementation may panic. - Sample(length int) ([]uint64, error) + Sample(length int) ([]uint64, bool) + Next() (uint64, bool) Reset() - Next() (uint64, error) } // NewUniform returns a new sampler diff --git a/utils/sampler/uniform_best.go b/utils/sampler/uniform_best.go index 21f7870d5bdc..fda2579558f6 100644 --- a/utils/sampler/uniform_best.go +++ b/utils/sampler/uniform_best.go @@ -56,7 +56,7 @@ samplerLoop: start := s.clock.Time() for i := 0; i < s.benchmarkIterations; i++ { - if _, err := sampler.Sample(sampleSize); err != nil { + if _, ok := sampler.Sample(sampleSize); !ok { continue samplerLoop } } diff --git a/utils/sampler/uniform_replacer.go b/utils/sampler/uniform_replacer.go index 80666a238343..9d6f47c2d4d3 100644 --- a/utils/sampler/uniform_replacer.go +++ b/utils/sampler/uniform_replacer.go @@ -36,18 +36,18 @@ func (s *uniformReplacer) Initialize(length uint64) { s.drawsCount = 0 } -func (s *uniformReplacer) Sample(count int) ([]uint64, error) { +func (s *uniformReplacer) Sample(count int) ([]uint64, bool) { s.Reset() results := make([]uint64, count) for i := 0; i < count; i++ { - ret, err := s.Next() - if err != nil { - return nil, err + ret, hasNext := s.Next() + if !hasNext { + return nil, false } results[i] = ret } - return results, nil + return results, true } func (s *uniformReplacer) Reset() { @@ -55,9 +55,9 @@ func (s *uniformReplacer) Reset() { s.drawsCount = 0 } -func (s *uniformReplacer) Next() (uint64, error) { +func (s *uniformReplacer) Next() (uint64, bool) { if s.drawsCount >= s.length { - return 0, ErrOutOfRange + return 0, false } draw := s.rng.Uint64Inclusive(s.length-1-s.drawsCount) + s.drawsCount @@ -65,5 +65,5 @@ func (s *uniformReplacer) Next() (uint64, error) { s.drawn[draw] = s.drawn.get(s.drawsCount, s.drawsCount) s.drawsCount++ - return ret, nil + return ret, true } diff --git a/utils/sampler/uniform_resample.go b/utils/sampler/uniform_resample.go index b05ce62fe886..4325d759b1c0 100644 --- a/utils/sampler/uniform_resample.go +++ b/utils/sampler/uniform_resample.go @@ -23,28 +23,28 @@ func (s *uniformResample) Initialize(length uint64) { s.drawn = make(map[uint64]struct{}) } -func (s *uniformResample) Sample(count int) ([]uint64, error) { +func (s *uniformResample) Sample(count int) ([]uint64, bool) { s.Reset() results := make([]uint64, count) for i := 0; i < count; i++ { - ret, err := s.Next() - if err != nil { - return nil, err + ret, hasNext := s.Next() + if !hasNext { + return nil, false } results[i] = ret } - return results, nil + return results, true } func (s *uniformResample) Reset() { clear(s.drawn) } -func (s *uniformResample) Next() (uint64, error) { +func (s *uniformResample) Next() (uint64, bool) { i := uint64(len(s.drawn)) if i >= s.length { - return 0, ErrOutOfRange + return 0, false } for { @@ -53,6 +53,6 @@ func (s *uniformResample) Next() (uint64, error) { continue } s.drawn[draw] = struct{}{} - return draw, nil + return draw, true } } diff --git a/utils/sampler/uniform_test.go b/utils/sampler/uniform_test.go index eb9862e7656c..99334464b2d5 100644 --- a/utils/sampler/uniform_test.go +++ b/utils/sampler/uniform_test.go @@ -83,8 +83,8 @@ func UniformInitializeMaxUint64Test(t *testing.T, s Uniform) { s.Initialize(math.MaxUint64) for { - val, err := s.Next() - require.NoError(t, err) + val, hasNext := s.Next() + require.True(t, hasNext) if val > math.MaxInt64 { break @@ -95,8 +95,8 @@ func UniformInitializeMaxUint64Test(t *testing.T, s Uniform) { func UniformOutOfRangeTest(t *testing.T, s Uniform) { s.Initialize(0) - _, err := s.Sample(1) - require.ErrorIs(t, err, ErrOutOfRange) + _, ok := s.Sample(1) + require.False(t, ok) } func UniformEmptyTest(t *testing.T, s Uniform) { @@ -104,8 +104,8 @@ func UniformEmptyTest(t *testing.T, s Uniform) { s.Initialize(1) - val, err := s.Sample(0) - require.NoError(err) + val, ok := s.Sample(0) + require.True(ok) require.Empty(val) } @@ -114,8 +114,8 @@ func UniformSingletonTest(t *testing.T, s Uniform) { s.Initialize(1) - val, err := s.Sample(1) - require.NoError(err) + val, ok := s.Sample(1) + require.True(ok) require.Equal([]uint64{0}, val) } @@ -124,8 +124,8 @@ func UniformDistributionTest(t *testing.T, s Uniform) { s.Initialize(3) - val, err := s.Sample(3) - require.NoError(err) + val, ok := s.Sample(3) + require.True(ok) slices.Sort(val) require.Equal([]uint64{0, 1, 2}, val) @@ -134,8 +134,8 @@ func UniformDistributionTest(t *testing.T, s Uniform) { func UniformOverSampleTest(t *testing.T, s Uniform) { s.Initialize(3) - _, err := s.Sample(4) - require.ErrorIs(t, err, ErrOutOfRange) + _, ok := s.Sample(4) + require.False(t, ok) } func UniformLazilySample(t *testing.T, s Uniform) { @@ -146,15 +146,15 @@ func UniformLazilySample(t *testing.T, s Uniform) { for j := 0; j < 2; j++ { sampled := map[uint64]bool{} for i := 0; i < 3; i++ { - val, err := s.Next() - require.NoError(err) + val, hasNext := s.Next() + require.True(hasNext) require.False(sampled[val]) sampled[val] = true } - _, err := s.Next() - require.ErrorIs(err, ErrOutOfRange) + _, hasNext := s.Next() + require.False(hasNext) s.Reset() } diff --git a/utils/sampler/weighted.go b/utils/sampler/weighted.go index 2296da08e97a..64a6493ff860 100644 --- a/utils/sampler/weighted.go +++ b/utils/sampler/weighted.go @@ -3,15 +3,11 @@ package sampler -import "errors" - -var ErrOutOfRange = errors.New("out of range") - // Weighted defines how to sample a specified valued based on a provided // weighted distribution type Weighted interface { Initialize(weights []uint64) error - Sample(sampleValue uint64) (int, error) + Sample(sampleValue uint64) (int, bool) } // NewWeighted returns a new sampler diff --git a/utils/sampler/weighted_array.go b/utils/sampler/weighted_array.go index bbbf98914d68..faae08c0ccbf 100644 --- a/utils/sampler/weighted_array.go +++ b/utils/sampler/weighted_array.go @@ -81,9 +81,9 @@ func (s *weightedArray) Initialize(weights []uint64) error { return nil } -func (s *weightedArray) Sample(value uint64) (int, error) { +func (s *weightedArray) Sample(value uint64) (int, bool) { if len(s.arr) == 0 || s.arr[len(s.arr)-1].cumulativeWeight <= value { - return 0, ErrOutOfRange + return 0, false } minIndex := 0 maxIndex := len(s.arr) - 1 @@ -98,7 +98,7 @@ func (s *weightedArray) Sample(value uint64) (int, error) { currentElem := s.arr[index] currentWeight := currentElem.cumulativeWeight if previousWeight <= value && value < currentWeight { - return currentElem.index, nil + return currentElem.index, true } if value < previousWeight { diff --git a/utils/sampler/weighted_best.go b/utils/sampler/weighted_best.go index 59bf60019144..91ec2ae50135 100644 --- a/utils/sampler/weighted_best.go +++ b/utils/sampler/weighted_best.go @@ -60,7 +60,7 @@ samplerLoop: start := s.clock.Time() for _, sample := range samples { - if _, err := sampler.Sample(sample); err != nil { + if _, ok := sampler.Sample(sample); !ok { continue samplerLoop } } diff --git a/utils/sampler/weighted_heap.go b/utils/sampler/weighted_heap.go index f4002a857e4a..96971657c569 100644 --- a/utils/sampler/weighted_heap.go +++ b/utils/sampler/weighted_heap.go @@ -80,9 +80,9 @@ func (s *weightedHeap) Initialize(weights []uint64) error { return nil } -func (s *weightedHeap) Sample(value uint64) (int, error) { +func (s *weightedHeap) Sample(value uint64) (int, bool) { if len(s.heap) == 0 || s.heap[0].cumulativeWeight <= value { - return 0, ErrOutOfRange + return 0, false } index := 0 @@ -90,7 +90,7 @@ func (s *weightedHeap) Sample(value uint64) (int, error) { currentElement := s.heap[index] currentWeight := currentElement.weight if value < currentWeight { - return currentElement.index, nil + return currentElement.index, true } value -= currentWeight diff --git a/utils/sampler/weighted_linear.go b/utils/sampler/weighted_linear.go index d6f0c5d74fba..c66bd442ab55 100644 --- a/utils/sampler/weighted_linear.go +++ b/utils/sampler/weighted_linear.go @@ -68,15 +68,15 @@ func (s *weightedLinear) Initialize(weights []uint64) error { return nil } -func (s *weightedLinear) Sample(value uint64) (int, error) { +func (s *weightedLinear) Sample(value uint64) (int, bool) { if len(s.arr) == 0 || s.arr[len(s.arr)-1].cumulativeWeight <= value { - return 0, ErrOutOfRange + return 0, false } index := 0 for { if elem := s.arr[index]; value < elem.cumulativeWeight { - return elem.index, nil + return elem.index, true } index++ } diff --git a/utils/sampler/weighted_test.go b/utils/sampler/weighted_test.go index ea08230d175a..286b7f4823a6 100644 --- a/utils/sampler/weighted_test.go +++ b/utils/sampler/weighted_test.go @@ -97,8 +97,8 @@ func WeightedOutOfRangeTest(t *testing.T, s Weighted) { require.NoError(s.Initialize([]uint64{1})) - _, err := s.Sample(1) - require.ErrorIs(err, ErrOutOfRange) + _, ok := s.Sample(1) + require.False(ok) } func WeightedSingletonTest(t *testing.T, s Weighted) { @@ -106,8 +106,8 @@ func WeightedSingletonTest(t *testing.T, s Weighted) { require.NoError(s.Initialize([]uint64{1})) - index, err := s.Sample(0) - require.NoError(err) + index, ok := s.Sample(0) + require.True(ok) require.Zero(index) } @@ -116,8 +116,8 @@ func WeightedWithZeroTest(t *testing.T, s Weighted) { require.NoError(s.Initialize([]uint64{0, 1})) - index, err := s.Sample(0) - require.NoError(err) + index, ok := s.Sample(0) + require.True(ok) require.Equal(1, index) } @@ -128,8 +128,8 @@ func WeightedDistributionTest(t *testing.T, s Weighted) { counts := make([]int, 5) for i := uint64(0); i < 11; i++ { - index, err := s.Sample(i) - require.NoError(err) + index, ok := s.Sample(i) + require.True(ok) counts[index]++ } require.Equal([]int{1, 1, 2, 3, 4}, counts) diff --git a/utils/sampler/weighted_uniform.go b/utils/sampler/weighted_uniform.go index 22dbb6b5ebd5..44836450b3b8 100644 --- a/utils/sampler/weighted_uniform.go +++ b/utils/sampler/weighted_uniform.go @@ -61,9 +61,9 @@ func (s *weightedUniform) Initialize(weights []uint64) error { return nil } -func (s *weightedUniform) Sample(value uint64) (int, error) { +func (s *weightedUniform) Sample(value uint64) (int, bool) { if uint64(len(s.indices)) <= value { - return 0, ErrOutOfRange + return 0, false } - return s.indices[int(value)], nil + return s.indices[int(value)], true } diff --git a/utils/sampler/weighted_without_replacement.go b/utils/sampler/weighted_without_replacement.go index a6b619928742..a5585f0e4300 100644 --- a/utils/sampler/weighted_without_replacement.go +++ b/utils/sampler/weighted_without_replacement.go @@ -8,10 +8,10 @@ package sampler // indices. So duplicate indices can be returned. type WeightedWithoutReplacement interface { Initialize(weights []uint64) error - Sample(count int) ([]int, error) + Sample(count int) ([]int, bool) } -// NewWeightedWithoutReplacement returns a new sampler +// NewDeterministicWeightedWithoutReplacement returns a new sampler func NewDeterministicWeightedWithoutReplacement(source Source) WeightedWithoutReplacement { return &weightedWithoutReplacementGeneric{ u: NewDeterministicUniform(source), diff --git a/utils/sampler/weighted_without_replacement_generic.go b/utils/sampler/weighted_without_replacement_generic.go index c45d64d0b2b0..004ff797b90f 100644 --- a/utils/sampler/weighted_without_replacement_generic.go +++ b/utils/sampler/weighted_without_replacement_generic.go @@ -25,19 +25,20 @@ func (s *weightedWithoutReplacementGeneric) Initialize(weights []uint64) error { return s.w.Initialize(weights) } -func (s *weightedWithoutReplacementGeneric) Sample(count int) ([]int, error) { +func (s *weightedWithoutReplacementGeneric) Sample(count int) ([]int, bool) { s.u.Reset() indices := make([]int, count) for i := 0; i < count; i++ { - weight, err := s.u.Next() - if err != nil { - return nil, err + weight, ok := s.u.Next() + if !ok { + return nil, false } - indices[i], err = s.w.Sample(weight) - if err != nil { - return nil, err + + indices[i], ok = s.w.Sample(weight) + if !ok { + return nil, false } } - return indices, nil + return indices, true } diff --git a/utils/sampler/weighted_without_replacement_test.go b/utils/sampler/weighted_without_replacement_test.go index 8d3469141da1..9edbd8b9bf3b 100644 --- a/utils/sampler/weighted_without_replacement_test.go +++ b/utils/sampler/weighted_without_replacement_test.go @@ -99,8 +99,8 @@ func WeightedWithoutReplacementOutOfRangeTest( require.NoError(s.Initialize([]uint64{1})) - _, err := s.Sample(2) - require.ErrorIs(err, ErrOutOfRange) + _, ok := s.Sample(2) + require.False(ok) } func WeightedWithoutReplacementEmptyWithoutWeightTest( @@ -111,8 +111,8 @@ func WeightedWithoutReplacementEmptyWithoutWeightTest( require.NoError(s.Initialize(nil)) - indices, err := s.Sample(0) - require.NoError(err) + indices, ok := s.Sample(0) + require.True(ok) require.Empty(indices) } @@ -124,8 +124,8 @@ func WeightedWithoutReplacementEmptyTest( require.NoError(s.Initialize([]uint64{1})) - indices, err := s.Sample(0) - require.NoError(err) + indices, ok := s.Sample(0) + require.True(ok) require.Empty(indices) } @@ -137,8 +137,8 @@ func WeightedWithoutReplacementSingletonTest( require.NoError(s.Initialize([]uint64{1})) - indices, err := s.Sample(1) - require.NoError(err) + indices, ok := s.Sample(1) + require.True(ok) require.Equal([]int{0}, indices) } @@ -150,8 +150,8 @@ func WeightedWithoutReplacementWithZeroTest( require.NoError(s.Initialize([]uint64{0, 1})) - indices, err := s.Sample(1) - require.NoError(err) + indices, ok := s.Sample(1) + require.True(ok) require.Equal([]int{1}, indices) } @@ -163,8 +163,8 @@ func WeightedWithoutReplacementDistributionTest( require.NoError(s.Initialize([]uint64{1, 1, 2})) - indices, err := s.Sample(4) - require.NoError(err) + indices, ok := s.Sample(4) + require.True(ok) slices.Sort(indices) require.Equal([]int{0, 1, 2, 2}, indices) diff --git a/utils/timer/adaptive_timeout_manager.go b/utils/timer/adaptive_timeout_manager.go index 493769018ba2..5d8670bb56e2 100644 --- a/utils/timer/adaptive_timeout_manager.go +++ b/utils/timer/adaptive_timeout_manager.go @@ -92,8 +92,7 @@ type adaptiveTimeoutManager struct { func NewAdaptiveTimeoutManager( config *AdaptiveTimeoutConfig, - metricsNamespace string, - metricsRegister prometheus.Registerer, + reg prometheus.Registerer, ) (AdaptiveTimeoutManager, error) { switch { case config.InitialTimeout > config.MaximumTimeout: @@ -108,24 +107,20 @@ func NewAdaptiveTimeoutManager( tm := &adaptiveTimeoutManager{ networkTimeoutMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "current_timeout", - Help: "Duration of current network timeout in nanoseconds", + Name: "current_timeout", + Help: "Duration of current network timeout in nanoseconds", }), avgLatency: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "average_latency", - Help: "Average network latency in nanoseconds", + Name: "average_latency", + Help: "Average network latency in nanoseconds", }), numTimeouts: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Name: "timeouts", - Help: "Number of timed out requests", + Name: "timeouts", + Help: "Number of timed out requests", }), numPendingTimeouts: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "pending_timeouts", - Help: "Number of pending timeouts", + Name: "pending_timeouts", + Help: "Number of pending timeouts", }), minimumTimeout: config.MinimumTimeout, maximumTimeout: config.MaximumTimeout, @@ -139,10 +134,10 @@ func NewAdaptiveTimeoutManager( tm.averager = math.NewAverager(float64(config.InitialTimeout), config.TimeoutHalflife, tm.clock.Time()) err := utils.Err( - metricsRegister.Register(tm.networkTimeoutMetric), - metricsRegister.Register(tm.avgLatency), - metricsRegister.Register(tm.numTimeouts), - metricsRegister.Register(tm.numPendingTimeouts), + reg.Register(tm.networkTimeoutMetric), + reg.Register(tm.avgLatency), + reg.Register(tm.numTimeouts), + reg.Register(tm.numPendingTimeouts), ) return tm, err } diff --git a/utils/timer/adaptive_timeout_manager_test.go b/utils/timer/adaptive_timeout_manager_test.go index 5b725303f385..e522b525272e 100644 --- a/utils/timer/adaptive_timeout_manager_test.go +++ b/utils/timer/adaptive_timeout_manager_test.go @@ -83,7 +83,7 @@ func TestAdaptiveTimeoutManagerInit(t *testing.T) { } for _, test := range tests { - _, err := NewAdaptiveTimeoutManager(&test.config, "", prometheus.NewRegistry()) + _, err := NewAdaptiveTimeoutManager(&test.config, prometheus.NewRegistry()) require.ErrorIs(t, err, test.expectedErr) } } @@ -97,7 +97,6 @@ func TestAdaptiveTimeoutManager(t *testing.T) { TimeoutHalflife: 5 * time.Minute, TimeoutCoefficient: 1.25, }, - "", prometheus.NewRegistry(), ) require.NoError(t, err) diff --git a/utils/window/window_test.go b/utils/window/window_test.go index 332d20b3b329..43bfad82997d 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -127,7 +127,7 @@ func TestTTLAdd(t *testing.T) { require.Equal(4, oldest) } -// TestTTLReadOnly tests that elements are not evicted on Length +// TestTTLLength tests that elements are not evicted on Length func TestTTLLength(t *testing.T) { require := require.New(t) @@ -159,7 +159,7 @@ func TestTTLLength(t *testing.T) { require.Equal(3, window.Length()) } -// TestTTLReadOnly tests that stale elements are not evicted on calling Oldest +// TestTTLOldest tests that stale elements are not evicted on calling Oldest func TestTTLOldest(t *testing.T) { require := require.New(t) diff --git a/version/application.go b/version/application.go index 2be9d838a89e..f3740df362c5 100644 --- a/version/application.go +++ b/version/application.go @@ -9,12 +9,10 @@ import ( "sync" ) -const LegacyAppName = "avalanche" - var ( errDifferentMajor = errors.New("different major version") - _ fmt.Stringer = (*Semantic)(nil) + _ fmt.Stringer = (*Application)(nil) ) type Application struct { diff --git a/version/application_test.go b/version/application_test.go index deade1816e22..57a9ab92747d 100644 --- a/version/application_test.go +++ b/version/application_test.go @@ -14,13 +14,13 @@ func TestNewDefaultApplication(t *testing.T) { require := require.New(t) v := &Application{ - Name: LegacyAppName, + Name: Client, Major: 1, Minor: 2, Patch: 3, } - require.Equal("avalanche/1.2.3", v.String()) + require.Equal("avalanchego/1.2.3", v.String()) require.NoError(v.Compatible(v)) require.False(v.Before(v)) } diff --git a/version/compatibility.go b/version/compatibility.go index 1cde189954a8..998b6a9e32bc 100644 --- a/version/compatibility.go +++ b/version/compatibility.go @@ -30,9 +30,9 @@ type Compatibility interface { type compatibility struct { version *Application - minCompatable *Application - minCompatableTime time.Time - prevMinCompatable *Application + minCompatible *Application + minCompatibleTime time.Time + prevMinCompatible *Application clock mockable.Clock } @@ -40,15 +40,15 @@ type compatibility struct { // NewCompatibility returns a compatibility checker with the provided options func NewCompatibility( version *Application, - minCompatable *Application, - minCompatableTime time.Time, - prevMinCompatable *Application, + minCompatible *Application, + minCompatibleTime time.Time, + prevMinCompatible *Application, ) Compatibility { return &compatibility{ version: version, - minCompatable: minCompatable, - minCompatableTime: minCompatableTime, - prevMinCompatable: prevMinCompatable, + minCompatible: minCompatible, + minCompatibleTime: minCompatibleTime, + prevMinCompatible: prevMinCompatible, } } @@ -61,19 +61,19 @@ func (c *compatibility) Compatible(peer *Application) error { return err } - if !peer.Before(c.minCompatable) { + if !peer.Before(c.minCompatible) { // The peer is at least the minimum compatible version. return nil } - // The peer is going to be marked as incompatible at [c.minCompatableTime]. + // The peer is going to be marked as incompatible at [c.minCompatibleTime]. now := c.clock.Time() - if !now.Before(c.minCompatableTime) { + if !now.Before(c.minCompatibleTime) { return errIncompatible } - // The minCompatable check isn't being enforced yet. - if !peer.Before(c.prevMinCompatable) { + // The minCompatible check isn't being enforced yet. + if !peer.Before(c.prevMinCompatible) { // The peer is at least the previous minimum compatible version. return nil } diff --git a/version/compatibility.json b/version/compatibility.json index 896b312e3e20..a1596351f373 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,4 +1,18 @@ { + "35": [ + "v1.11.3", + "v1.11.4", + "v1.11.5", + "v1.11.6", + "v1.11.7", + "v1.11.8" + ], + "34": [ + "v1.11.2" + ], + "33": [ + "v1.11.0" + ], "31": [ "v1.10.18", "v1.10.19" diff --git a/version/compatibility_test.go b/version/compatibility_test.go index 09d3cdcb3336..f3051941dc4d 100644 --- a/version/compatibility_test.go +++ b/version/compatibility_test.go @@ -18,14 +18,14 @@ func TestCompatibility(t *testing.T) { Minor: 4, Patch: 3, } - minCompatable := &Application{ + minCompatible := &Application{ Name: Client, Major: 1, Minor: 4, Patch: 0, } - minCompatableTime := time.Unix(9000, 0) - prevMinCompatable := &Application{ + minCompatibleTime := time.Unix(9000, 0) + prevMinCompatible := &Application{ Name: Client, Major: 1, Minor: 3, @@ -34,9 +34,9 @@ func TestCompatibility(t *testing.T) { compatibility := NewCompatibility( v, - minCompatable, - minCompatableTime, - prevMinCompatable, + minCompatible, + minCompatibleTime, + prevMinCompatible, ).(*compatibility) require.Equal(t, v, compatibility.Version()) @@ -45,15 +45,6 @@ func TestCompatibility(t *testing.T) { time time.Time expectedErr error }{ - { - peer: &Application{ - Name: LegacyAppName, - Major: 1, - Minor: 5, - Patch: 0, - }, - time: minCompatableTime, - }, { peer: &Application{ Name: Client, @@ -61,7 +52,7 @@ func TestCompatibility(t *testing.T) { Minor: 5, Patch: 0, }, - time: minCompatableTime, + time: minCompatibleTime, }, { peer: &Application{ @@ -79,7 +70,7 @@ func TestCompatibility(t *testing.T) { Minor: 1, Patch: 0, }, - time: minCompatableTime, + time: minCompatibleTime, expectedErr: errDifferentMajor, }, { @@ -89,7 +80,7 @@ func TestCompatibility(t *testing.T) { Minor: 3, Patch: 5, }, - time: minCompatableTime, + time: minCompatibleTime, expectedErr: errIncompatible, }, { diff --git a/version/constants.go b/version/constants.go index 238ae5d8c98b..2899b37fac6b 100644 --- a/version/constants.go +++ b/version/constants.go @@ -18,15 +18,15 @@ const ( // RPCChainVMProtocol should be bumped anytime changes are made which // require the plugin vm to upgrade to latest avalanchego release to be // compatible. - RPCChainVMProtocol uint = 31 + RPCChainVMProtocol uint = 35 ) // These are globals that describe network upgrades and node versions var ( Current = &Semantic{ Major: 1, - Minor: 10, - Patch: 19, + Minor: 11, + Patch: 8, } CurrentApp = &Application{ Name: Client, @@ -37,13 +37,13 @@ var ( MinimumCompatibleVersion = &Application{ Name: Client, Major: 1, - Minor: 10, + Minor: 11, Patch: 0, } PrevMinimumCompatibleVersion = &Application{ Name: Client, Major: 1, - Minor: 9, + Minor: 10, Patch: 0, } @@ -136,8 +136,12 @@ var ( constants.FujiID: ids.FromStringOrPanic("2D1cmbiG36BqQMRyHt4kFhWarmatA1ighSpND3FeFgz3vFVtCZ"), } - // TODO: update this before release DurangoTimes = map[uint32]time.Time{ + constants.MainnetID: time.Date(2024, time.March, 6, 16, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2024, time.February, 13, 16, 0, 0, 0, time.UTC), + } + + EUpgradeTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), constants.FujiID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), } @@ -241,11 +245,18 @@ func GetDurangoTime(networkID uint32) time.Time { return DefaultUpgradeTime } +func GetEUpgradeTime(networkID uint32) time.Time { + if upgradeTime, exists := EUpgradeTimes[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + func GetCompatibility(networkID uint32) Compatibility { return NewCompatibility( CurrentApp, MinimumCompatibleVersion, - GetCortinaTime(networkID), + GetDurangoTime(networkID), PrevMinimumCompatibleVersion, ) } diff --git a/version/parser.go b/version/parser.go index 32cf153bb55c..68fc5fb5eb44 100644 --- a/version/parser.go +++ b/version/parser.go @@ -11,9 +11,8 @@ import ( ) var ( - errMissingVersionPrefix = errors.New("missing required version prefix") - errMissingApplicationPrefix = errors.New("missing required application prefix") - errMissingVersions = errors.New("missing version numbers") + errMissingVersionPrefix = errors.New("missing required version prefix") + errMissingVersions = errors.New("missing version numbers") ) func Parse(s string) (*Semantic, error) { @@ -34,27 +33,6 @@ func Parse(s string) (*Semantic, error) { }, nil } -// TODO: Remove after v1.11.x is activated -func ParseLegacyApplication(s string) (*Application, error) { - prefix := LegacyAppName + "/" - if !strings.HasPrefix(s, prefix) { - return nil, fmt.Errorf("%w: %q", errMissingApplicationPrefix, s) - } - - s = s[len(prefix):] - major, minor, patch, err := parseVersions(s) - if err != nil { - return nil, err - } - - return &Application{ - Name: Client, // Convert the legacy name to the current client name - Major: major, - Minor: minor, - Patch: patch, - }, nil -} - func parseVersions(s string) (int, int, int, error) { splitVersion := strings.SplitN(s, ".", 3) if numSeperators := len(splitVersion); numSeperators != 3 { diff --git a/version/parser_test.go b/version/parser_test.go index 42adb764c9c2..4cf0bc985e0b 100644 --- a/version/parser_test.go +++ b/version/parser_test.go @@ -64,57 +64,3 @@ func TestParse(t *testing.T) { }) } } - -func TestParseLegacyApplication(t *testing.T) { - v, err := ParseLegacyApplication("avalanche/1.2.3") - - require.NoError(t, err) - require.NotNil(t, v) - require.Equal(t, "avalanchego/1.2.3", v.String()) - require.Equal(t, "avalanchego", v.Name) - require.Equal(t, 1, v.Major) - require.Equal(t, 2, v.Minor) - require.Equal(t, 3, v.Patch) - require.NoError(t, v.Compatible(v)) - require.False(t, v.Before(v)) - - tests := []struct { - version string - expectedErr error - }{ - { - version: "", - expectedErr: errMissingApplicationPrefix, - }, - { - version: "avalanchego/v1.2.3", - expectedErr: errMissingApplicationPrefix, - }, - { - version: "avalanche/", - expectedErr: errMissingVersions, - }, - { - version: "avalanche/z.0.0", - expectedErr: strconv.ErrSyntax, - }, - { - version: "avalanche/0.z.0", - expectedErr: strconv.ErrSyntax, - }, - { - version: "avalanche/0.0.z", - expectedErr: strconv.ErrSyntax, - }, - { - version: "avalanche/0.0.0.0", - expectedErr: strconv.ErrSyntax, - }, - } - for _, test := range tests { - t.Run(test.version, func(t *testing.T) { - _, err := ParseLegacyApplication(test.version) - require.ErrorIs(t, err, test.expectedErr) - }) - } -} diff --git a/version/string.go b/version/string.go index 9abe555bcebb..80df9bea697a 100644 --- a/version/string.go +++ b/version/string.go @@ -9,32 +9,37 @@ import ( "strings" ) -var ( - // String is displayed when CLI arg --version is used - String string +// GitCommit is set in the build script at compile time +var GitCommit string - // GitCommit is set in the build script at compile time - GitCommit string -) +// Versions contains the versions relevant to a build of avalanchego. In +// addition to supporting construction of the string displayed by +// --version, it is used to produce the output of --version-json and can +// be used to unmarshal that output. +type Versions struct { + Application string `json:"application"` + Database string `json:"database"` + RPCChainVM uint64 `json:"rpcchainvm"` + // Commit may be empty if GitCommit was not set at compile time + Commit string `json:"commit"` + Go string `json:"go"` +} -func init() { - format := "%s [database=%s, rpcchainvm=%d" - args := []interface{}{ - CurrentApp, - CurrentDatabase, - RPCChainVMProtocol, - } - if GitCommit != "" { - format += ", commit=%s" - args = append(args, GitCommit) +func GetVersions() *Versions { + return &Versions{ + Application: CurrentApp.String(), + Database: CurrentDatabase.String(), + RPCChainVM: uint64(RPCChainVMProtocol), + Commit: GitCommit, + Go: strings.TrimPrefix(runtime.Version(), "go"), } +} - // add golang version - goVersion := runtime.Version() - goVersionNumber := strings.TrimPrefix(goVersion, "go") - format += ", go=%s" - args = append(args, goVersionNumber) - - format += "]\n" - String = fmt.Sprintf(format, args...) +func (v *Versions) String() string { + // This format maintains consistency with previous --version output + versionString := fmt.Sprintf("%s [database=%s, rpcchainvm=%d, ", v.Application, v.Database, v.RPCChainVM) + if len(v.Commit) > 0 { + versionString += fmt.Sprintf("commit=%s, ", v.Commit) + } + return versionString + fmt.Sprintf("go=%s]", v.Go) } diff --git a/version/string_test.go b/version/string_test.go new file mode 100644 index 000000000000..58f44668b3e0 --- /dev/null +++ b/version/string_test.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package version + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVersionsGetString(t *testing.T) { + versions := Versions{ + Application: "1", + Database: "2", + RPCChainVM: 3, + Commit: "4", + Go: "5", + } + require.Equal(t, "1 [database=2, rpcchainvm=3, commit=4, go=5]", versions.String()) + versions.Commit = "" + require.Equal(t, "1 [database=2, rpcchainvm=3, go=5]", versions.String()) +} diff --git a/vms/avm/block/block_test.go b/vms/avm/block/block_test.go index 6100f1d6d987..37f0e5f5e54b 100644 --- a/vms/avm/block/block_test.go +++ b/vms/avm/block/block_test.go @@ -29,7 +29,6 @@ func TestInvalidBlock(t *testing.T) { require := require.New(t) parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, @@ -45,7 +44,6 @@ func TestStandardBlocks(t *testing.T) { require := require.New(t) parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/vms/avm/block/builder/builder_test.go b/vms/avm/block/builder/builder_test.go index d28507671cf6..36159598b706 100644 --- a/vms/avm/block/builder/builder_test.go +++ b/vms/avm/block/builder/builder_test.go @@ -513,7 +513,6 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { require.True(ok) parser, err := block.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, @@ -545,7 +544,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { state.AddBlock(parentBlk) state.SetLastAccepted(parentBlk.ID()) - metrics, err := metrics.New("", registerer) + metrics, err := metrics.New(registerer) require.NoError(err) manager := blkexecutor.NewManager(mempool, metrics, state, backend, clk, onAccept) diff --git a/vms/avm/block/executor/block_test.go b/vms/avm/block/executor/block_test.go index 988f02be315e..8e72f4c21ebe 100644 --- a/vms/avm/block/executor/block_test.go +++ b/vms/avm/block/executor/block_test.go @@ -22,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/block" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/metrics" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -45,6 +46,7 @@ func TestBlockVerify(t *testing.T) { b := &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{}, }, } @@ -62,8 +64,10 @@ func TestBlockVerify(t *testing.T) { mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() mockBlock.EXPECT().MerkleRoot().Return(ids.GenerateTestID()).AnyTimes() return &Block{ - Block: mockBlock, - manager: &manager{}, + Block: mockBlock, + manager: &manager{ + backend: defaultTestBackend(false, nil), + }, } }, expectedErr: ErrUnexpectedMerkleRoot, @@ -82,7 +86,8 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ - clk: clk, + backend: defaultTestBackend(false, nil), + clk: clk, }, } }, @@ -99,6 +104,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{}, clk: &mockable.Clock{}, }, @@ -125,6 +131,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), mempool: mempool, metrics: metrics.NewMockMetrics(ctrl), blkIDToState: map[ids.ID]*blockState{}, @@ -157,6 +164,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), state: mockState, blkIDToState: map[ids.ID]*blockState{}, clk: &mockable.Clock{}, @@ -193,6 +201,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), state: mockState, blkIDToState: map[ids.ID]*blockState{}, clk: &mockable.Clock{}, @@ -232,6 +241,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ parentID: { onAcceptState: mockParentState, @@ -279,6 +289,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), mempool: mempool, metrics: metrics.NewMockMetrics(ctrl), blkIDToState: map[ids.ID]*blockState{ @@ -331,7 +342,7 @@ func TestBlockVerify(t *testing.T) { manager: &manager{ mempool: mempool, metrics: metrics.NewMockMetrics(ctrl), - backend: &executor.Backend{}, + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ parentID: { onAcceptState: mockParentState, @@ -409,7 +420,7 @@ func TestBlockVerify(t *testing.T) { manager: &manager{ mempool: mempool, metrics: metrics.NewMockMetrics(ctrl), - backend: &executor.Backend{}, + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ parentID: { onAcceptState: mockParentState, @@ -467,7 +478,7 @@ func TestBlockVerify(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ - backend: &executor.Backend{}, + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ parentID: { onAcceptState: mockParentState, @@ -519,7 +530,7 @@ func TestBlockVerify(t *testing.T) { manager: &manager{ mempool: mockMempool, metrics: metrics.NewMockMetrics(ctrl), - backend: &executor.Backend{}, + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ parentID: { onAcceptState: mockParentState, @@ -591,13 +602,9 @@ func TestBlockAccept(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ - mempool: mempool, - metrics: metrics.NewMockMetrics(ctrl), - backend: &executor.Backend{ - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - }, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{}, }, } @@ -627,11 +634,7 @@ func TestBlockAccept(t *testing.T) { manager: &manager{ state: mockManagerState, mempool: mempool, - backend: &executor.Backend{ - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - }, + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ blockID: { onAcceptState: mockOnAcceptState, @@ -670,12 +673,7 @@ func TestBlockAccept(t *testing.T) { manager: &manager{ state: mockManagerState, mempool: mempool, - backend: &executor.Backend{ - Ctx: &snow.Context{ - SharedMemory: mockSharedMemory, - Log: logging.NoLog{}, - }, - }, + backend: defaultTestBackend(false, mockSharedMemory), blkIDToState: map[ids.ID]*blockState{ blockID: { onAcceptState: mockOnAcceptState, @@ -718,12 +716,7 @@ func TestBlockAccept(t *testing.T) { state: mockManagerState, mempool: mempool, metrics: metrics, - backend: &executor.Backend{ - Ctx: &snow.Context{ - SharedMemory: mockSharedMemory, - Log: logging.NoLog{}, - }, - }, + backend: defaultTestBackend(false, mockSharedMemory), blkIDToState: map[ids.ID]*blockState{ blockID: { onAcceptState: mockOnAcceptState, @@ -769,12 +762,7 @@ func TestBlockAccept(t *testing.T) { state: mockManagerState, mempool: mempool, metrics: metrics, - backend: &executor.Backend{ - Ctx: &snow.Context{ - SharedMemory: mockSharedMemory, - Log: logging.NoLog{}, - }, - }, + backend: defaultTestBackend(false, mockSharedMemory), blkIDToState: map[ids.ID]*blockState{ blockID: { onAcceptState: mockOnAcceptState, @@ -869,13 +857,8 @@ func TestBlockReject(t *testing.T) { lastAccepted: lastAcceptedID, mempool: mempool, metrics: metrics.NewMockMetrics(ctrl), - backend: &executor.Backend{ - Bootstrapped: true, - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - }, - state: mockState, + backend: defaultTestBackend(true, nil), + state: mockState, blkIDToState: map[ids.ID]*blockState{ blockID: {}, }, @@ -927,13 +910,8 @@ func TestBlockReject(t *testing.T) { lastAccepted: lastAcceptedID, mempool: mempool, metrics: metrics.NewMockMetrics(ctrl), - backend: &executor.Backend{ - Bootstrapped: true, - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - }, - state: mockState, + backend: defaultTestBackend(true, nil), + state: mockState, blkIDToState: map[ids.ID]*blockState{ blockID: {}, }, @@ -981,6 +959,7 @@ func TestBlockStatus(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), lastAccepted: blockID, }, } @@ -996,6 +975,7 @@ func TestBlockStatus(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{ blockID: {}, }, @@ -1017,6 +997,7 @@ func TestBlockStatus(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{}, state: mockState, }, @@ -1037,6 +1018,7 @@ func TestBlockStatus(t *testing.T) { return &Block{ Block: mockBlock, manager: &manager{ + backend: defaultTestBackend(false, nil), blkIDToState: map[ids.ID]*blockState{}, state: mockState, }, @@ -1055,3 +1037,18 @@ func TestBlockStatus(t *testing.T) { }) } } + +func defaultTestBackend(bootstrapped bool, sharedMemory atomic.SharedMemory) *executor.Backend { + return &executor.Backend{ + Bootstrapped: bootstrapped, + Ctx: &snow.Context{ + SharedMemory: sharedMemory, + Log: logging.NoLog{}, + }, + Config: &config.Config{ + EUpgradeTime: mockable.MaxTime, + TxFee: 0, + CreateAssetTxFee: 0, + }, + } +} diff --git a/vms/avm/block/executor/manager_test.go b/vms/avm/block/executor/manager_test.go index 275bbabc9bac..14484fcbb559 100644 --- a/vms/avm/block/executor/manager_test.go +++ b/vms/avm/block/executor/manager_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) var ( @@ -123,7 +122,7 @@ func TestManagerVerifyTx(t *testing.T) { }, managerF: func(*gomock.Controller) *manager { return &manager{ - backend: &executor.Backend{}, + backend: defaultTestBackend(false, nil), } }, expectedErr: ErrChainNotSynced, @@ -139,9 +138,7 @@ func TestManagerVerifyTx(t *testing.T) { }, managerF: func(*gomock.Controller) *manager { return &manager{ - backend: &executor.Backend{ - Bootstrapped: true, - }, + backend: defaultTestBackend(true, nil), } }, expectedErr: errTestSyntacticVerifyFail, @@ -167,9 +164,7 @@ func TestManagerVerifyTx(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ - backend: &executor.Backend{ - Bootstrapped: true, - }, + backend: defaultTestBackend(true, nil), state: state, lastAccepted: lastAcceptedID, } @@ -199,9 +194,7 @@ func TestManagerVerifyTx(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ - backend: &executor.Backend{ - Bootstrapped: true, - }, + backend: defaultTestBackend(true, nil), state: state, lastAccepted: lastAcceptedID, } @@ -231,9 +224,7 @@ func TestManagerVerifyTx(t *testing.T) { state.EXPECT().GetTimestamp().Return(time.Time{}) return &manager{ - backend: &executor.Backend{ - Bootstrapped: true, - }, + backend: defaultTestBackend(true, nil), state: state, lastAccepted: lastAcceptedID, } diff --git a/vms/avm/block/parser.go b/vms/avm/block/parser.go index f0c359a513b0..bfae841093d1 100644 --- a/vms/avm/block/parser.go +++ b/vms/avm/block/parser.go @@ -5,7 +5,6 @@ package block import ( "reflect" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/utils" @@ -31,8 +30,8 @@ type parser struct { txs.Parser } -func NewParser(durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { - p, err := txs.NewParser(durangoTime, fxs) +func NewParser(fxs []fxs.Fx) (Parser, error) { + p, err := txs.NewParser(fxs) if err != nil { return nil, err } @@ -49,13 +48,12 @@ func NewParser(durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { } func NewCustomParser( - durangoTime time.Time, typeToFxIndex map[reflect.Type]int, clock *mockable.Clock, log logging.Logger, fxs []fxs.Fx, ) (Parser, error) { - p, err := txs.NewCustomParser(durangoTime, typeToFxIndex, clock, log, fxs) + p, err := txs.NewCustomParser(typeToFxIndex, clock, log, fxs) if err != nil { return nil, err } diff --git a/vms/avm/client.go b/vms/avm/client.go index 63df6543446e..d53ed9388c7a 100644 --- a/vms/avm/client.go +++ b/vms/avm/client.go @@ -5,6 +5,7 @@ package avm import ( "context" + "errors" "fmt" "time" @@ -19,7 +20,11 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = (*client)(nil) +var ( + _ Client = (*client)(nil) + + ErrRejected = errors.New("rejected") +) // Client for interacting with an AVM (X-Chain) instance type Client interface { @@ -35,12 +40,6 @@ type Client interface { // Deprecated: GetTxStatus only returns Accepted or Unknown, GetTx should be // used instead to determine if the tx was accepted. GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (choices.Status, error) - // ConfirmTx attempts to confirm [txID] by repeatedly checking its status. - // Note: ConfirmTx will block until either the context is done or the client - // returns a decided status. - // TODO: Move this function off of the Client interface into a utility - // function. - ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (choices.Status, error) // GetTx returns the byte representation of [txID] GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) // GetUTXOs returns the byte representation of the UTXOs controlled by [addrs] @@ -285,26 +284,6 @@ func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Op return res.Status, err } -func (c *client) ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (choices.Status, error) { - ticker := time.NewTicker(freq) - defer ticker.Stop() - - for { - status, err := c.GetTxStatus(ctx, txID, options...) - if err == nil { - if status.Decided() { - return status, nil - } - } - - select { - case <-ticker.C: - case <-ctx.Done(): - return status, ctx.Err() - } - } -} - func (c *client) GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) { res := &api.FormattedTx{} err := c.requester.SendRequest(ctx, "avm.getTx", &api.GetTxArgs{ @@ -766,3 +745,34 @@ func (c *client) Export( }, res, options...) return res.TxID, err } + +func AwaitTxAccepted( + c Client, + ctx context.Context, + txID ids.ID, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) + defer ticker.Stop() + + for { + status, err := c.GetTxStatus(ctx, txID, options...) + if err != nil { + return err + } + + switch status { + case choices.Accepted: + return nil + case choices.Rejected: + return ErrRejected + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/vms/avm/config.md b/vms/avm/config.md new file mode 100644 index 000000000000..58cbe1435c26 --- /dev/null +++ b/vms/avm/config.md @@ -0,0 +1,61 @@ +--- +tags: [Nodes, AvalancheGo] +description: Reference for all available X-chain config options and flags. +pagination_label: X-Chain Configs +sidebar_position: 2 +--- + +# X-Chain + +In order to specify a config for the X-Chain, a JSON config file should be +placed at `{chain-config-dir}/X/config.json`. + +For example if `chain-config-dir` has the default value which is +`$HOME/.avalanchego/configs/chains`, then `config.json` can be placed at +`$HOME/.avalanchego/configs/chains/X/config.json`. + +This allows you to specify a config to be passed into the X-Chain. The default +values for this config are: + +```json +{ + "index-transactions": false, + "index-allow-incomplete": false, + "checksums-enabled": false +} +``` + +Default values are overridden only if explicitly specified in the config. + +The parameters are as follows: + +## Transaction Indexing + +### `index-transactions` + +_Boolean_ + +Enables AVM transaction indexing if set to `true`. +When set to `true`, AVM transactions are indexed against the `address` and +`assetID` involved. This data is available via `avm.getAddressTxs` +[API](/reference/avalanchego/x-chain/api.md#avmgetaddresstxs). + +:::note +If `index-transactions` is set to true, it must always be set to true +for the node's lifetime. If set to `false` after having been set to `true`, the +node will refuse to start unless `index-allow-incomplete` is also set to `true` +(see below). +::: + +### `index-allow-incomplete` + +_Boolean_ + +Allows incomplete indices. This config value is ignored if there is no X-Chain indexed data in the DB and +`index-transactions` is set to `false`. + +### `checksums-enabled` + +_Boolean_ + +Enables checksums if set to `true`. diff --git a/vms/avm/config/config.go b/vms/avm/config/config.go index df6e4f7de2ae..0d026eb05dbb 100644 --- a/vms/avm/config/config.go +++ b/vms/avm/config/config.go @@ -13,6 +13,10 @@ type Config struct { // Fee that must be burned by every asset creating transaction CreateAssetTxFee uint64 - // Time of the Durango network upgrade - DurangoTime time.Time + // Time of the E network upgrade + EUpgradeTime time.Time +} + +func (c *Config) IsEActivated(timestamp time.Time) bool { + return !timestamp.Before(c.EUpgradeTime) } diff --git a/vms/avm/config_test.go b/vms/avm/config_test.go index 27481d78b901..4f04833c77b8 100644 --- a/vms/avm/config_test.go +++ b/vms/avm/config_test.go @@ -34,12 +34,20 @@ func TestParseConfig(t *testing.T) { }, }, { - name: "manually specified checksums enabled", + name: "manually specified network value", configBytes: []byte(`{"network":{"max-validator-set-staleness":1}}`), expectedConfig: Config{ Network: network.Config{ MaxValidatorSetStaleness: time.Nanosecond, TargetGossipSize: network.DefaultConfig.TargetGossipSize, + PushGossipPercentStake: network.DefaultConfig.PushGossipPercentStake, + PushGossipNumValidators: network.DefaultConfig.PushGossipNumValidators, + PushGossipNumPeers: network.DefaultConfig.PushGossipNumPeers, + PushRegossipNumValidators: network.DefaultConfig.PushRegossipNumValidators, + PushRegossipNumPeers: network.DefaultConfig.PushRegossipNumPeers, + PushGossipDiscardedCacheSize: network.DefaultConfig.PushGossipDiscardedCacheSize, + PushGossipMaxRegossipFrequency: network.DefaultConfig.PushGossipMaxRegossipFrequency, + PushGossipFrequency: network.DefaultConfig.PushGossipFrequency, PullGossipPollSize: network.DefaultConfig.PullGossipPollSize, PullGossipFrequency: network.DefaultConfig.PullGossipFrequency, PullGossipThrottlingPeriod: network.DefaultConfig.PullGossipThrottlingPeriod, @@ -47,7 +55,6 @@ func TestParseConfig(t *testing.T) { ExpectedBloomFilterElements: network.DefaultConfig.ExpectedBloomFilterElements, ExpectedBloomFilterFalsePositiveProbability: network.DefaultConfig.ExpectedBloomFilterFalsePositiveProbability, MaxBloomFilterFalsePositiveProbability: network.DefaultConfig.MaxBloomFilterFalsePositiveProbability, - LegacyPushGossipCacheSize: network.DefaultConfig.LegacyPushGossipCacheSize, }, IndexTransactions: DefaultConfig.IndexTransactions, IndexAllowIncomplete: DefaultConfig.IndexAllowIncomplete, diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index 7b8ec8902078..d4375aa092d1 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -24,13 +24,14 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/block/executor" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/txstest" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -39,7 +40,14 @@ import ( keystoreutils "github.com/ava-labs/avalanchego/vms/components/keystore" ) +type fork uint8 + const ( + durango fork = iota + eUpgrade + + latest = durango + testTxFee uint64 = 1000 startBalance uint64 = 50000 @@ -85,6 +93,7 @@ type user struct { } type envConfig struct { + fork fork isCustomFeeAsset bool keystoreUsers []*user vmStaticConfig *config.Config @@ -95,13 +104,12 @@ type envConfig struct { } type environment struct { - genesisBytes []byte - genesisTx *txs.Tx - sharedMemory *atomic.Memory - issuer chan common.Message - vm *VM - service *Service - walletService *WalletService + genesisBytes []byte + genesisTx *txs.Tx + sharedMemory *atomic.Memory + issuer chan common.Message + vm *VM + txBuilder *txstest.Builder } // setup the testing environment @@ -145,10 +153,7 @@ func setup(tb testing.TB, c *envConfig) *environment { require.NoError(keystoreUser.Close()) } - vmStaticConfig := config.Config{ - TxFee: testTxFee, - CreateAssetTxFee: testTxFee, - } + vmStaticConfig := staticConfig(tb, c.fork) if c.vmStaticConfig != nil { vmStaticConfig = *c.vmStaticConfig } @@ -198,13 +203,7 @@ func setup(tb testing.TB, c *envConfig) *environment { sharedMemory: m, issuer: issuer, vm: vm, - service: &Service{ - vm: vm, - }, - walletService: &WalletService{ - vm: vm, - pendingTxs: linkedhashmap.New[ids.ID, *txs.Tx](), - }, + txBuilder: txstest.New(vm.parser.Codec(), vm.ctx, &vm.Config, vm.feeAssetID, vm.state), } require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -218,9 +217,35 @@ func setup(tb testing.TB, c *envConfig) *environment { } require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + + tb.Cleanup(func() { + env.vm.ctx.Lock.Lock() + defer env.vm.ctx.Lock.Unlock() + + require.NoError(env.vm.Shutdown(context.Background())) + }) + return env } +func staticConfig(tb testing.TB, f fork) config.Config { + c := config.Config{ + TxFee: testTxFee, + CreateAssetTxFee: testTxFee, + EUpgradeTime: mockable.MaxTime, + } + + switch f { + case eUpgrade: + c.EUpgradeTime = time.Time{} + case durango: + default: + require.FailNow(tb, "unhandled fork", f) + } + + return c +} + // Returns: // // 1. tx in genesis that creates asset @@ -229,7 +254,6 @@ func getCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName st require := require.New(tb) parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, @@ -320,8 +344,8 @@ func sampleAddrs(tb testing.TB, addressFormatter avax.AddressManager, addrs []id sampler.Initialize(uint64(len(addrs))) numAddrs := 1 + rand.Intn(len(addrs)) // #nosec G404 - indices, err := sampler.Sample(numAddrs) - require.NoError(err) + indices, ok := sampler.Sample(numAddrs) + require.True(ok) for _, index := range indices { addr := addrs[index] addrStr, err := addressFormatter.FormatLocalAddress(addr) @@ -489,7 +513,7 @@ func issueAndAccept( issuer <-chan common.Message, tx *txs.Tx, ) { - txID, err := vm.issueTx(tx) + txID, err := vm.issueTxFromRPC(tx) require.NoError(err) require.Equal(tx.ID(), txID) diff --git a/vms/avm/fxs/fx.go b/vms/avm/fxs/fx.go index 2749ee4500a3..7dec9fdfb531 100644 --- a/vms/avm/fxs/fx.go +++ b/vms/avm/fxs/fx.go @@ -46,7 +46,7 @@ type Fx interface { // VerifyOperation verifies that the specified transaction can spend the // provided utxos conditioned on the result being restricted to the provided // outputs. If the transaction can't spend the output based on the input and - // credential, a non-nil error should be returned. + // credential, a non-nil error should be returned. VerifyOperation(tx, op, cred interface{}, utxos []interface{}) error } diff --git a/vms/avm/index_test.go b/vms/avm/index_test.go index d5978e0ed37c..3d8614d5ee6d 100644 --- a/vms/avm/index_test.go +++ b/vms/avm/index_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "testing" "github.com/prometheus/client_golang/prometheus" @@ -19,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" @@ -29,13 +27,8 @@ import ( func TestIndexTransaction_Ordered(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() key := keys[0] addr := key.PublicKey().Address() @@ -73,13 +66,8 @@ func TestIndexTransaction_Ordered(t *testing.T) { func TestIndexTransaction_MultipleTransactions(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() addressTxMap := map[ids.ShortID]*txs.Tx{} txAssetID := avax.Asset{ID: env.genesisTx.ID()} @@ -121,13 +109,8 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { func TestIndexTransaction_MultipleAddresses(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, - }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() addrs := make([]ids.ShortID, len(keys)) for i, key := range keys { @@ -164,11 +147,8 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { func TestIndexer_Read(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() // generate test address and asset IDs assetID := ids.GenerateTestID() @@ -258,7 +238,7 @@ func buildUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortID) *avax UTXOID: utxoID, Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ - Amt: 1000, + Amt: startBalance, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, @@ -276,14 +256,14 @@ func buildTX(chainID ids.ID, utxoID avax.UTXOID, txAssetID avax.Asset, address . UTXOID: utxoID, Asset: txAssetID, In: &secp256k1fx.TransferInput{ - Amt: 1000, + Amt: startBalance, Input: secp256k1fx.Input{SigIndices: []uint32{0}}, }, }}, Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ - Amt: 1000, + Amt: startBalance - testTxFee, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: address, diff --git a/vms/avm/metrics/metrics.go b/vms/avm/metrics/metrics.go index 9e4053e1fcc6..7d122ce6e42e 100644 --- a/vms/avm/metrics/metrics.go +++ b/vms/avm/metrics/metrics.go @@ -66,32 +66,26 @@ func (m *metrics) MarkTxAccepted(tx *txs.Tx) error { return tx.Unsigned.Visit(m.txMetrics) } -func New( - namespace string, - registerer prometheus.Registerer, -) (Metrics, error) { - txMetrics, err := newTxMetrics(namespace, registerer) +func New(registerer prometheus.Registerer) (Metrics, error) { + txMetrics, err := newTxMetrics(registerer) errs := wrappers.Errs{Err: err} m := &metrics{txMetrics: txMetrics} m.numTxRefreshes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refreshes", - Help: "Number of times unique txs have been refreshed", + Name: "tx_refreshes", + Help: "Number of times unique txs have been refreshed", }) m.numTxRefreshHits = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refresh_hits", - Help: "Number of times unique txs have not been unique, but were cached", + Name: "tx_refresh_hits", + Help: "Number of times unique txs have not been unique, but were cached", }) m.numTxRefreshMisses = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refresh_misses", - Help: "Number of times unique txs have not been unique and weren't cached", + Name: "tx_refresh_misses", + Help: "Number of times unique txs have not been unique and weren't cached", }) - apiRequestMetric, err := metric.NewAPIInterceptor(namespace, registerer) + apiRequestMetric, err := metric.NewAPIInterceptor(registerer) m.APIInterceptor = apiRequestMetric errs.Add( err, diff --git a/vms/avm/metrics/tx_metrics.go b/vms/avm/metrics/tx_metrics.go index 0e5cd184cc4b..3c8d1bac79ad 100644 --- a/vms/avm/metrics/tx_metrics.go +++ b/vms/avm/metrics/tx_metrics.go @@ -4,75 +4,67 @@ package metrics import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/avm/txs" ) -var _ txs.Visitor = (*txMetrics)(nil) +const txLabel = "tx" + +var ( + _ txs.Visitor = (*txMetrics)(nil) + + txLabels = []string{txLabel} +) type txMetrics struct { - numBaseTxs, - numCreateAssetTxs, - numOperationTxs, - numImportTxs, - numExportTxs prometheus.Counter + numTxs *prometheus.CounterVec } -func newTxMetrics( - namespace string, - registerer prometheus.Registerer, -) (*txMetrics, error) { - errs := wrappers.Errs{} +func newTxMetrics(registerer prometheus.Registerer) (*txMetrics, error) { m := &txMetrics{ - numBaseTxs: newTxMetric(namespace, "base", registerer, &errs), - numCreateAssetTxs: newTxMetric(namespace, "create_asset", registerer, &errs), - numOperationTxs: newTxMetric(namespace, "operation", registerer, &errs), - numImportTxs: newTxMetric(namespace, "import", registerer, &errs), - numExportTxs: newTxMetric(namespace, "export", registerer, &errs), + numTxs: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "txs_accepted", + Help: "number of transactions accepted", + }, + txLabels, + ), } - return m, errs.Err -} - -func newTxMetric( - namespace string, - txName string, - registerer prometheus.Registerer, - errs *wrappers.Errs, -) prometheus.Counter { - txMetric := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: txName + "_txs_accepted", - Help: fmt.Sprintf("Number of %s transactions accepted", txName), - }) - errs.Add(registerer.Register(txMetric)) - return txMetric + return m, registerer.Register(m.numTxs) } func (m *txMetrics) BaseTx(*txs.BaseTx) error { - m.numBaseTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "base", + }).Inc() return nil } func (m *txMetrics) CreateAssetTx(*txs.CreateAssetTx) error { - m.numCreateAssetTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "create_asset", + }).Inc() return nil } func (m *txMetrics) OperationTx(*txs.OperationTx) error { - m.numOperationTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "operation", + }).Inc() return nil } func (m *txMetrics) ImportTx(*txs.ImportTx) error { - m.numImportTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "import", + }).Inc() return nil } func (m *txMetrics) ExportTx(*txs.ExportTx) error { - m.numExportTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "export", + }).Inc() return nil } diff --git a/vms/avm/network/config.go b/vms/avm/network/config.go index 8536504d8383..797138ab93f9 100644 --- a/vms/avm/network/config.go +++ b/vms/avm/network/config.go @@ -12,6 +12,14 @@ import ( var DefaultConfig = Config{ MaxValidatorSetStaleness: time.Minute, TargetGossipSize: 20 * units.KiB, + PushGossipPercentStake: .9, + PushGossipNumValidators: 100, + PushGossipNumPeers: 0, + PushRegossipNumValidators: 10, + PushRegossipNumPeers: 0, + PushGossipDiscardedCacheSize: 16384, + PushGossipMaxRegossipFrequency: 30 * time.Second, + PushGossipFrequency: 500 * time.Millisecond, PullGossipPollSize: 1, PullGossipFrequency: 1500 * time.Millisecond, PullGossipThrottlingPeriod: 10 * time.Second, @@ -19,7 +27,6 @@ var DefaultConfig = Config{ ExpectedBloomFilterElements: 8 * 1024, ExpectedBloomFilterFalsePositiveProbability: .01, MaxBloomFilterFalsePositiveProbability: .05, - LegacyPushGossipCacheSize: 512, } type Config struct { @@ -30,6 +37,32 @@ type Config struct { // sent when pushing transactions and when responded to transaction pull // requests. TargetGossipSize int `json:"target-gossip-size"` + // PushGossipPercentStake is the percentage of total stake to push + // transactions to in the first round of gossip. Nodes with higher stake are + // preferred over nodes with less stake to minimize the number of messages + // sent over the p2p network. + PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` + // PushGossipNumValidators is the number of validators to push transactions + // to in the first round of gossip. + PushGossipNumValidators int `json:"push-gossip-num-validators"` + // PushGossipNumPeers is the number of peers to push transactions to in the + // first round of gossip. + PushGossipNumPeers int `json:"push-gossip-num-peers"` + // PushRegossipNumValidators is the number of validators to push + // transactions to after the first round of gossip. + PushRegossipNumValidators int `json:"push-regossip-num-validators"` + // PushRegossipNumPeers is the number of peers to push transactions to after + // the first round of gossip. + PushRegossipNumPeers int `json:"push-regossip-num-peers"` + // PushGossipDiscardedCacheSize is the number of txIDs to cache to avoid + // pushing transactions that were recently dropped from the mempool. + PushGossipDiscardedCacheSize int `json:"push-gossip-discarded-cache-size"` + // PushGossipMaxRegossipFrequency is the limit for how frequently a + // transaction will be push gossiped. + PushGossipMaxRegossipFrequency time.Duration `json:"push-gossip-max-regossip-frequency"` + // PushGossipFrequency is how frequently rounds of push gossip are + // performed. + PushGossipFrequency time.Duration `json:"push-gossip-frequency"` // PullGossipPollSize is the number of validators to sample when performing // a round of pull gossip. PullGossipPollSize int `json:"pull-gossip-poll-size"` @@ -57,10 +90,4 @@ type Config struct { // The smaller this number is, the more frequently that the bloom filter // will be regenerated. MaxBloomFilterFalsePositiveProbability float64 `json:"max-bloom-filter-false-positive-probability"` - // LegacyPushGossipCacheSize tracks the most recently received transactions - // and ensures to only gossip them once. - // - // Deprecated: The legacy push gossip mechanism is deprecated in favor of - // the p2p SDK's push gossip mechanism. - LegacyPushGossipCacheSize int `json:"legacy-push-gossip-cache-size"` } diff --git a/vms/avm/network/gossip.go b/vms/avm/network/gossip.go index 8dcfc53ec4f6..131cc51688fa 100644 --- a/vms/avm/network/gossip.go +++ b/vms/avm/network/gossip.go @@ -17,7 +17,9 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/txs/mempool" + + xmempool "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" ) var ( @@ -67,7 +69,7 @@ func (g *txParser) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { } func newGossipMempool( - mempool mempool.Mempool, + mempool xmempool.Mempool, registerer prometheus.Registerer, log logging.Logger, txVerifier TxVerifier, @@ -87,7 +89,7 @@ func newGossipMempool( } type gossipMempool struct { - mempool.Mempool + xmempool.Mempool log logging.Logger txVerifier TxVerifier parser txs.Parser @@ -120,10 +122,15 @@ func (g *gossipMempool) Add(tx *txs.Tx) error { return err } - return g.AddVerified(tx) + return g.AddWithoutVerification(tx) +} + +func (g *gossipMempool) Has(txID ids.ID) bool { + _, ok := g.Mempool.Get(txID) + return ok } -func (g *gossipMempool) AddVerified(tx *txs.Tx) error { +func (g *gossipMempool) AddWithoutVerification(tx *txs.Tx) error { if err := g.Mempool.Add(tx); err != nil { g.Mempool.MarkDropped(tx.ID(), err) return err diff --git a/vms/avm/network/gossip_test.go b/vms/avm/network/gossip_test.go index 0a19dccc1d73..e84f259cbe37 100644 --- a/vms/avm/network/gossip_test.go +++ b/vms/avm/network/gossip_test.go @@ -5,7 +5,6 @@ package network import ( "testing" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -34,7 +33,6 @@ func TestMarshaller(t *testing.T) { require := require.New(t) parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, @@ -65,7 +63,7 @@ func TestGossipMempoolAdd(t *testing.T) { baseMempool, err := mempool.New("", metrics, toEngine) require.NoError(err) - parser, err := txs.NewParser(time.Time{}, nil) + parser, err := txs.NewParser(nil) require.NoError(err) mempool, err := newGossipMempool( @@ -102,7 +100,7 @@ func TestGossipMempoolAddVerified(t *testing.T) { baseMempool, err := mempool.New("", metrics, toEngine) require.NoError(err) - parser, err := txs.NewParser(time.Time{}, nil) + parser, err := txs.NewParser(nil) require.NoError(err) mempool, err := newGossipMempool( @@ -128,6 +126,6 @@ func TestGossipMempoolAddVerified(t *testing.T) { TxID: ids.GenerateTestID(), } - require.NoError(mempool.AddVerified(tx)) + require.NoError(mempool.AddWithoutVerification(tx)) require.True(mempool.bloom.Has(tx)) } diff --git a/vms/avm/network/network.go b/vms/avm/network/network.go index 9cad3cb9aa63..ed565b1bd578 100644 --- a/vms/avm/network/network.go +++ b/vms/avm/network/network.go @@ -5,22 +5,18 @@ package network import ( "context" - "sync" "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/components/message" ) const txGossipHandlerID = 0 @@ -33,22 +29,22 @@ var ( type Network struct { *p2p.Network - txPushGossiper gossip.Accumulator[*txs.Tx] - txPullGossiper gossip.Gossiper - txPullGossipFrequency time.Duration - - ctx *snow.Context + log logging.Logger parser txs.Parser mempool *gossipMempool appSender common.AppSender - // gossip related attributes - recentTxsLock sync.Mutex - recentTxs *cache.LRU[ids.ID, struct{}] + txPushGossiper *gossip.PushGossiper[*txs.Tx] + txPushGossipFrequency time.Duration + txPullGossiper gossip.Gossiper + txPullGossipFrequency time.Duration } func New( - ctx *snow.Context, + log logging.Logger, + nodeID ids.NodeID, + subnetID ids.ID, + vdrs validators.State, parser txs.Parser, txVerifier TxVerifier, mempool mempool.Mempool, @@ -56,7 +52,7 @@ func New( registerer prometheus.Registerer, config Config, ) (*Network, error) { - p2pNetwork, err := p2p.NewNetwork(ctx.Log, appSender, registerer, "p2p") + p2pNetwork, err := p2p.NewNetwork(log, appSender, registerer, "p2p") if err != nil { return nil, err } @@ -66,9 +62,9 @@ func New( } validators := p2p.NewValidators( p2pNetwork.Peers, - ctx.Log, - ctx.SubnetID, - ctx.ValidatorState, + log, + subnetID, + vdrs, config.MaxValidatorSetStaleness, ) txGossipClient := p2pNetwork.NewClient( @@ -80,17 +76,10 @@ func New( return nil, err } - txPushGossiper := gossip.NewPushGossiper[*txs.Tx]( - marshaller, - txGossipClient, - txGossipMetrics, - config.TargetGossipSize, - ) - gossipMempool, err := newGossipMempool( mempool, registerer, - ctx.Log, + log, txVerifier, parser, config.ExpectedBloomFilterElements, @@ -101,9 +90,31 @@ func New( return nil, err } - var txPullGossiper gossip.Gossiper - txPullGossiper = gossip.NewPullGossiper[*txs.Tx]( - ctx.Log, + txPushGossiper, err := gossip.NewPushGossiper[*txs.Tx]( + marshaller, + gossipMempool, + validators, + txGossipClient, + txGossipMetrics, + gossip.BranchingFactor{ + StakePercentage: config.PushGossipPercentStake, + Validators: config.PushGossipNumValidators, + Peers: config.PushGossipNumPeers, + }, + gossip.BranchingFactor{ + Validators: config.PushRegossipNumValidators, + Peers: config.PushRegossipNumPeers, + }, + config.PushGossipDiscardedCacheSize, + config.TargetGossipSize, + config.PushGossipMaxRegossipFrequency, + ) + if err != nil { + return nil, err + } + + var txPullGossiper gossip.Gossiper = gossip.NewPullGossiper[*txs.Tx]( + log, marshaller, gossipMempool, txGossipClient, @@ -114,14 +125,13 @@ func New( // Gossip requests are only served if a node is a validator txPullGossiper = gossip.ValidatorGossiper{ Gossiper: txPullGossiper, - NodeID: ctx.NodeID, + NodeID: nodeID, Validators: validators, } handler := gossip.NewHandler[*txs.Tx]( - ctx.Log, + log, marshaller, - txPushGossiper, gossipMempool, txGossipMetrics, config.TargetGossipSize, @@ -134,10 +144,10 @@ func New( config.PullGossipThrottlingPeriod, config.PullGossipThrottlingLimit, ), - ctx.Log, + log, ), validators, - ctx.Log, + log, ) // We allow pushing txs between all peers, but only serve gossip requests @@ -153,144 +163,51 @@ func New( return &Network{ Network: p2pNetwork, - txPushGossiper: txPushGossiper, - txPullGossiper: txPullGossiper, - txPullGossipFrequency: config.PullGossipFrequency, - ctx: ctx, + log: log, parser: parser, mempool: gossipMempool, appSender: appSender, - - recentTxs: &cache.LRU[ids.ID, struct{}]{ - Size: config.LegacyPushGossipCacheSize, - }, + txPushGossiper: txPushGossiper, + txPushGossipFrequency: config.PushGossipFrequency, + txPullGossiper: txPullGossiper, + txPullGossipFrequency: config.PullGossipFrequency, }, nil } -func (n *Network) Gossip(ctx context.Context) { - gossip.Every(ctx, n.ctx.Log, n.txPullGossiper, n.txPullGossipFrequency) +func (n *Network) PushGossip(ctx context.Context) { + gossip.Every(ctx, n.log, n.txPushGossiper, n.txPushGossipFrequency) } -func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { - n.ctx.Log.Debug("called AppGossip message handler", - zap.Stringer("nodeID", nodeID), - zap.Int("messageLen", len(msgBytes)), - ) - - msgIntf, err := message.Parse(msgBytes) - if err != nil { - n.ctx.Log.Debug("forwarding AppGossip message to SDK network", - zap.String("reason", "failed to parse message"), - ) - - return n.Network.AppGossip(ctx, nodeID, msgBytes) - } - - msg, ok := msgIntf.(*message.Tx) - if !ok { - n.ctx.Log.Debug("dropping unexpected message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - tx, err := n.parser.ParseTx(msg.Tx) - if err != nil { - n.ctx.Log.Verbo("received invalid tx", - zap.Stringer("nodeID", nodeID), - zap.Binary("tx", msg.Tx), - zap.Error(err), - ) - return nil - } - - if err := n.mempool.Add(tx); err == nil { - txID := tx.ID() - n.txPushGossiper.Add(tx) - if err := n.txPushGossiper.Gossip(ctx); err != nil { - n.ctx.Log.Error("failed to gossip tx", - zap.Stringer("txID", tx.ID()), - zap.Error(err), - ) - } - n.gossipTxMessage(ctx, txID, msgBytes) - } - return nil +func (n *Network) PullGossip(ctx context.Context) { + gossip.Every(ctx, n.log, n.txPullGossiper, n.txPullGossipFrequency) } -// IssueTx attempts to add a tx to the mempool, after verifying it. If the tx is -// added to the mempool, it will attempt to push gossip the tx to random peers -// in the network using both the legacy and p2p SDK. +// IssueTxFromRPC attempts to add a tx to the mempool, after verifying it. If +// the tx is added to the mempool, it will attempt to push gossip the tx to +// random peers in the network. // // If the tx is already in the mempool, mempool.ErrDuplicateTx will be // returned. // If the tx is not added to the mempool, an error will be returned. -func (n *Network) IssueTx(ctx context.Context, tx *txs.Tx) error { +func (n *Network) IssueTxFromRPC(tx *txs.Tx) error { if err := n.mempool.Add(tx); err != nil { return err } - return n.gossipTx(ctx, tx) + n.txPushGossiper.Add(tx) + return nil } -// IssueVerifiedTx attempts to add a tx to the mempool, without first verifying -// it. If the tx is added to the mempool, it will attempt to push gossip the tx -// to random peers in the network using both the legacy and p2p SDK. +// IssueTxFromRPCWithoutVerification attempts to add a tx to the mempool, +// without first verifying it. If the tx is added to the mempool, it will +// attempt to push gossip the tx to random peers in the network. // // If the tx is already in the mempool, mempool.ErrDuplicateTx will be // returned. // If the tx is not added to the mempool, an error will be returned. -func (n *Network) IssueVerifiedTx(ctx context.Context, tx *txs.Tx) error { - if err := n.mempool.AddVerified(tx); err != nil { +func (n *Network) IssueTxFromRPCWithoutVerification(tx *txs.Tx) error { + if err := n.mempool.AddWithoutVerification(tx); err != nil { return err } - return n.gossipTx(ctx, tx) -} - -// gossipTx pushes the tx to peers using both the legacy and p2p SDK. -func (n *Network) gossipTx(ctx context.Context, tx *txs.Tx) error { n.txPushGossiper.Add(tx) - if err := n.txPushGossiper.Gossip(ctx); err != nil { - n.ctx.Log.Error("failed to gossip tx", - zap.Stringer("txID", tx.ID()), - zap.Error(err), - ) - } - - txBytes := tx.Bytes() - msg := &message.Tx{ - Tx: txBytes, - } - msgBytes, err := message.Build(msg) - if err != nil { - return err - } - - txID := tx.ID() - n.gossipTxMessage(ctx, txID, msgBytes) return nil } - -// gossipTxMessage pushes the tx message to peers using the legacy format. -// If the tx was recently gossiped, this function does nothing. -func (n *Network) gossipTxMessage(ctx context.Context, txID ids.ID, msgBytes []byte) { - n.recentTxsLock.Lock() - _, has := n.recentTxs.Get(txID) - n.recentTxs.Put(txID, struct{}{}) - n.recentTxsLock.Unlock() - - // Don't gossip a transaction if it has been recently gossiped. - if has { - return - } - - n.ctx.Log.Debug("gossiping tx", - zap.Stringer("txID", txID), - ) - - if err := n.appSender.SendAppGossip(ctx, msgBytes); err != nil { - n.ctx.Log.Error("failed to gossip tx", - zap.Stringer("txID", txID), - zap.Error(err), - ) - } -} diff --git a/vms/avm/network/network_test.go b/vms/avm/network/network_test.go index 0e4ff2990b6d..bdcfbda6a136 100644 --- a/vms/avm/network/network_test.go +++ b/vms/avm/network/network_test.go @@ -14,24 +14,31 @@ import ( "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm/block/executor" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/message" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/txs/mempool" + + xmempool "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" ) var ( testConfig = Config{ MaxValidatorSetStaleness: time.Second, TargetGossipSize: 1, + PushGossipNumValidators: 1, + PushGossipNumPeers: 0, + PushRegossipNumValidators: 1, + PushRegossipNumPeers: 0, + PushGossipDiscardedCacheSize: 1, + PushGossipMaxRegossipFrequency: time.Second, + PushGossipFrequency: time.Second, PullGossipPollSize: 1, PullGossipFrequency: time.Second, PullGossipThrottlingPeriod: time.Second, @@ -39,204 +46,15 @@ var ( ExpectedBloomFilterElements: 10, ExpectedBloomFilterFalsePositiveProbability: .1, MaxBloomFilterFalsePositiveProbability: .5, - LegacyPushGossipCacheSize: 512, } errTest = errors.New("test error") ) -func TestNetworkAppGossip(t *testing.T) { - testTx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: 1, - BlockchainID: ids.GenerateTestID(), - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - } - - parser, err := txs.NewParser( - time.Time{}, - []fxs.Fx{ - &secp256k1fx.Fx{}, - }, - ) - require.NoError(t, err) - require.NoError(t, testTx.Initialize(parser.Codec())) - - type test struct { - name string - msgBytesFunc func() []byte - mempoolFunc func(*gomock.Controller) mempool.Mempool - txVerifierFunc func(*gomock.Controller) TxVerifier - appSenderFunc func(*gomock.Controller) common.AppSender - } - - tests := []test{ - { - name: "invalid message bytes", - msgBytesFunc: func() []byte { - return []byte{0x00} - }, - }, - { - name: "invalid tx bytes", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: []byte{0x00}, - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - }, - { - name: "tx already in mempool", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Get(gomock.Any()).Return(testTx, true) - return mempool - }, - }, - { - name: "tx previously dropped", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Get(gomock.Any()).Return(nil, false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) - return mempool - }, - }, - { - name: "transaction invalid", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Get(gomock.Any()).Return(nil, false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) - mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) - return mempool - }, - txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { - txVerifier := executor.NewMockManager(ctrl) - txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(errTest) - return txVerifier - }, - }, - { - name: "happy path", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Get(gomock.Any()).Return(nil, false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) - mempool.EXPECT().Add(gomock.Any()).Return(nil) - mempool.EXPECT().Len().Return(0) - mempool.EXPECT().RequestBuildBlock() - return mempool - }, - txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { - txVerifier := executor.NewMockManager(ctrl) - txVerifier.EXPECT().VerifyTx(gomock.Any()).Return(nil) - return txVerifier - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) - return appSender - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - parser, err := txs.NewParser( - time.Time{}, - []fxs.Fx{ - &secp256k1fx.Fx{}, - &nftfx.Fx{}, - &propertyfx.Fx{}, - }, - ) - require.NoError(err) - - mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { - return mempool.NewMockMempool(ctrl) - } - if tt.mempoolFunc != nil { - mempoolFunc = tt.mempoolFunc - } - - txVerifierFunc := func(ctrl *gomock.Controller) TxVerifier { - return executor.NewMockManager(ctrl) - } - if tt.txVerifierFunc != nil { - txVerifierFunc = tt.txVerifierFunc - } - - appSenderFunc := func(ctrl *gomock.Controller) common.AppSender { - return common.NewMockSender(ctrl) - } - if tt.appSenderFunc != nil { - appSenderFunc = tt.appSenderFunc - } - - n, err := New( - &snow.Context{ - Log: logging.NoLog{}, - }, - parser, - txVerifierFunc(ctrl), - mempoolFunc(ctrl), - appSenderFunc(ctrl), - prometheus.NewRegistry(), - testConfig, - ) - require.NoError(err) - require.NoError(n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc())) - }) - } -} - -func TestNetworkIssueTx(t *testing.T) { +func TestNetworkIssueTxFromRPC(t *testing.T) { type test struct { name string - mempoolFunc func(*gomock.Controller) mempool.Mempool + mempoolFunc func(*gomock.Controller) xmempool.Mempool txVerifierFunc func(*gomock.Controller) TxVerifier appSenderFunc func(*gomock.Controller) common.AppSender expectedErr error @@ -245,8 +63,8 @@ func TestNetworkIssueTx(t *testing.T) { tests := []test{ { name: "mempool has transaction", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, true) return mempool }, @@ -254,8 +72,8 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "transaction marked as dropped in mempool", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) return mempool @@ -264,8 +82,8 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "transaction invalid", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) @@ -280,8 +98,8 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "can't add transaction to mempool", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(errTest) @@ -297,13 +115,14 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "happy path", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(nil) mempool.EXPECT().Len().Return(0) mempool.EXPECT().RequestBuildBlock() + mempool.EXPECT().Get(gomock.Any()).Return(nil, true).Times(2) return mempool }, txVerifierFunc: func(ctrl *gomock.Controller) TxVerifier { @@ -313,7 +132,7 @@ func TestNetworkIssueTx(t *testing.T) { }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) return appSender }, expectedErr: nil, @@ -326,7 +145,6 @@ func TestNetworkIssueTx(t *testing.T) { ctrl := gomock.NewController(t) parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, &nftfx.Fx{}, @@ -335,8 +153,8 @@ func TestNetworkIssueTx(t *testing.T) { ) require.NoError(err) - mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { - return mempool.NewMockMempool(ctrl) + mempoolFunc := func(ctrl *gomock.Controller) xmempool.Mempool { + return xmempool.NewMockMempool(ctrl) } if tt.mempoolFunc != nil { mempoolFunc = tt.mempoolFunc @@ -357,8 +175,16 @@ func TestNetworkIssueTx(t *testing.T) { } n, err := New( - &snow.Context{ - Log: logging.NoLog{}, + logging.NoLog{}, + ids.EmptyNodeID, + ids.Empty, + &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return nil, nil + }, }, parser, txVerifierFunc(ctrl), @@ -368,16 +194,18 @@ func TestNetworkIssueTx(t *testing.T) { testConfig, ) require.NoError(err) - err = n.IssueTx(context.Background(), &txs.Tx{}) + err = n.IssueTxFromRPC(&txs.Tx{}) require.ErrorIs(err, tt.expectedErr) + + require.NoError(n.txPushGossiper.Gossip(context.Background())) }) } } -func TestNetworkIssueVerifiedTx(t *testing.T) { +func TestNetworkIssueTxFromRPCWithoutVerification(t *testing.T) { type test struct { name string - mempoolFunc func(*gomock.Controller) mempool.Mempool + mempoolFunc func(*gomock.Controller) xmempool.Mempool appSenderFunc func(*gomock.Controller) common.AppSender expectedErr error } @@ -385,8 +213,8 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { tests := []test{ { name: "can't add transaction to mempool", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) mempool.EXPECT().Add(gomock.Any()).Return(errTest) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) return mempool @@ -395,8 +223,9 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { }, { name: "happy path", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) xmempool.Mempool { + mempool := xmempool.NewMockMempool(ctrl) + mempool.EXPECT().Get(gomock.Any()).Return(nil, true).Times(2) mempool.EXPECT().Add(gomock.Any()).Return(nil) mempool.EXPECT().Len().Return(0) mempool.EXPECT().RequestBuildBlock() @@ -404,7 +233,7 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) return appSender }, expectedErr: nil, @@ -417,7 +246,6 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { ctrl := gomock.NewController(t) parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, &nftfx.Fx{}, @@ -426,8 +254,8 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { ) require.NoError(err) - mempoolFunc := func(ctrl *gomock.Controller) mempool.Mempool { - return mempool.NewMockMempool(ctrl) + mempoolFunc := func(ctrl *gomock.Controller) xmempool.Mempool { + return xmempool.NewMockMempool(ctrl) } if tt.mempoolFunc != nil { mempoolFunc = tt.mempoolFunc @@ -441,8 +269,16 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { } n, err := New( - &snow.Context{ - Log: logging.NoLog{}, + logging.NoLog{}, + ids.EmptyNodeID, + ids.Empty, + &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return nil, nil + }, }, parser, executor.NewMockManager(ctrl), // Should never verify a tx @@ -452,48 +288,10 @@ func TestNetworkIssueVerifiedTx(t *testing.T) { testConfig, ) require.NoError(err) - err = n.IssueVerifiedTx(context.Background(), &txs.Tx{}) + err = n.IssueTxFromRPCWithoutVerification(&txs.Tx{}) require.ErrorIs(err, tt.expectedErr) + + require.NoError(n.txPushGossiper.Gossip(context.Background())) }) } } - -func TestNetworkGossipTx(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - parser, err := txs.NewParser( - time.Time{}, - []fxs.Fx{ - &secp256k1fx.Fx{}, - }, - ) - require.NoError(err) - - appSender := common.NewMockSender(ctrl) - - n, err := New( - &snow.Context{ - Log: logging.NoLog{}, - }, - parser, - executor.NewMockManager(ctrl), - mempool.NewMockMempool(ctrl), - appSender, - prometheus.NewRegistry(), - testConfig, - ) - require.NoError(err) - - // Case: Tx was recently gossiped - txID := ids.GenerateTestID() - n.recentTxs.Put(txID, struct{}{}) - n.gossipTxMessage(context.Background(), txID, []byte{}) - // Didn't make a call to SendAppGossip - - // Case: Tx was not recently gossiped - msgBytes := []byte{1, 2, 3} - appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) - n.gossipTxMessage(context.Background(), ids.GenerateTestID(), msgBytes) - // Did make a call to SendAppGossip -} diff --git a/vms/avm/service.go b/vms/avm/service.go index 4dcc210df813..bc6dadd8705f 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -213,7 +213,7 @@ func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, reply *api.JSO return err } - reply.TxID, err = s.vm.issueTx(tx) + reply.TxID, err = s.vm.issueTxFromRPC(tx) return err } @@ -433,7 +433,9 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, reply *api.G limit, ) } else { - utxos, endAddr, endUTXOID, err = s.vm.GetAtomicUTXOs( + utxos, endAddr, endUTXOID, err = avax.GetAtomicUTXOs( + s.vm.ctx.SharedMemory, + s.vm.parser.Codec(), sourceChain, addrSet, startAddr, @@ -714,7 +716,7 @@ func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *Ass return err } - assetID, err := s.vm.issueTx(tx) + assetID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -879,7 +881,7 @@ func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, repl return err } - assetID, err := s.vm.issueTx(tx) + assetID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1199,7 +1201,7 @@ func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *a return err } - txID, err := s.vm.issueTx(tx) + txID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1361,7 +1363,7 @@ func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChang return err } - txID, err := s.vm.issueTx(tx) + txID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1494,7 +1496,7 @@ func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxI return err } - txID, err := s.vm.issueTx(tx) + txID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1617,7 +1619,7 @@ func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxI return err } - txID, err := s.vm.issueTx(tx) + txID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1754,7 +1756,7 @@ func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) return err } - txID, err := s.vm.issueTx(tx) + txID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1782,7 +1784,15 @@ func (s *Service) buildImport(args *ImportArgs) (*txs.Tx, error) { return nil, err } - atomicUTXOs, _, _, err := s.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, int(maxPageSize)) + atomicUTXOs, _, _, err := avax.GetAtomicUTXOs( + s.vm.ctx.SharedMemory, + s.vm.parser.Codec(), + chainID, + kc.Addrs, + ids.ShortEmpty, + ids.Empty, + int(maxPageSize), + ) if err != nil { return nil, fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) } @@ -1885,7 +1895,7 @@ func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDC return err } - txID, err := s.vm.issueTx(tx) + txID, err := s.vm.issueTxFromRPC(tx) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } diff --git a/vms/avm/service.md b/vms/avm/service.md new file mode 100644 index 000000000000..dfba13b05f0e --- /dev/null +++ b/vms/avm/service.md @@ -0,0 +1,2319 @@ +--- +tags: [X-Chain, AvalancheGo APIs] +description: This page is an overview of the Exchange Chain (X-Chain) API associated with AvalancheGo. +sidebar_label: API +pagination_label: X-Chain API +--- + +# X-Chain API + +The [X-Chain](/learn/avalanche/avalanche-platform.md#x-chain), +Avalanche’s native platform for creating and trading assets, is an instance of the Avalanche Virtual +Machine (AVM). This API allows clients to create and trade assets on the X-Chain and other instances +of the AVM. + +## Format + +This API uses the `json 2.0` RPC format. For more information on making JSON RPC calls, see +[here](/reference/standards/guides/issuing-api-calls.md). + +## Endpoints + +`/ext/bc/X` to interact with the X-Chain. + +`/ext/bc/blockchainID` to interact with other AVM instances, where `blockchainID` is the ID of a +blockchain running the AVM. + +## Methods + +### `avm.buildGenesis` + +Given a JSON representation of this Virtual Machine’s genesis state, create the byte representation +of that state. + +#### **Endpoint** + +This call is made to the AVM’s static API endpoint: + +`/ext/vm/avm` + +Note: addresses should not include a chain prefix (that is `X-`) in calls to the static API endpoint +because these prefixes refer to a specific chain. + +**Signature:** + +```sh +avm.buildGenesis({ + networkID: int, + genesisData: JSON, + encoding: string, //optional +}) -> { + bytes: string, + encoding: string, +} +``` + +Encoding specifies the encoding format to use for arbitrary bytes, that is the genesis bytes that are +returned. Can only be `hex` when a value is provided. + +`genesisData` has this form: + +```json +{ +"genesisData" : + { + "assetAlias1": { // Each object defines an asset + "name": "human readable name", + "symbol":"AVAL", // Symbol is between 0 and 4 characters + "initialState": { + "fixedCap" : [ // Choose the asset type. + { // Can be "fixedCap", "variableCap", "limitedTransfer", "nonFungible" + "amount":1000, // At genesis, address A has + "address":"A" // 1000 units of asset + }, + { + "amount":5000, // At genesis, address B has + "address":"B" // 1000 units of asset + }, + ... // Can have many initial holders + ] + } + }, + "assetAliasCanBeAnythingUnique": { // Asset alias can be used in place of assetID in calls + "name": "human readable name", // names need not be unique + "symbol": "AVAL", // symbols need not be unique + "initialState": { + "variableCap" : [ // No units of the asset exist at genesis + { + "minters": [ // The signature of A or B can mint more of + "A", // the asset. + "B" + ], + "threshold":1 + }, + { + "minters": [ // The signatures of 2 of A, B and C can mint + "A", // more of the asset + "B", + "C" + ], + "threshold":2 + }, + ... // Can have many minter sets + ] + } + }, + ... // Can list more assets + } +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "id" : 1, + "method" : "avm.buildGenesis", + "params" : { + "networkId": 16, + "genesisData": { + "asset1": { + "name": "myFixedCapAsset", + "symbol":"MFCA", + "initialState": { + "fixedCap" : [ + { + "amount":100000, + "address": "avax13ery2kvdrkd2nkquvs892gl8hg7mq4a6ufnrn6" + }, + { + "amount":100000, + "address": "avax1rvks3vpe4cm9yc0rrk8d5855nd6yxxutfc2h2r" + }, + { + "amount":50000, + "address": "avax1ntj922dj4crc4pre4e0xt3dyj0t5rsw9uw0tus" + }, + { + "amount":50000, + "address": "avax1yk0xzmqyyaxn26sqceuky2tc2fh2q327vcwvda" + } + ] + } + }, + "asset2": { + "name": "myVarCapAsset", + "symbol":"MVCA", + "initialState": { + "variableCap" : [ + { + "minters": [ + "avax1kcfg6avc94ct3qh2mtdg47thsk8nrflnrgwjqr", + "avax14e2s22wxvf3c7309txxpqs0qe9tjwwtk0dme8e" + ], + "threshold":1 + }, + { + "minters": [ + "avax1y8pveyn82gjyqr7kqzp72pqym6xlch9gt5grck", + "avax1c5cmm0gem70rd8dcnpel63apzfnfxye9kd4wwe", + "avax12euam2lwtwa8apvfdl700ckhg86euag2hlhmyw" + ], + "threshold":2 + } + ] + } + } + }, + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/vm/avm +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "bytes": "0x0000000000010006617373657431000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f6d794669786564436170417373657400044d464341000000000100000000000000010000000700000000000186a10000000000000000000000010000000152b219bc1b9ab0a9f2e3f9216e4460bd5db8d153bfa57c3c", + "encoding": "hex" + }, + "id": 1 +} +``` + +### `avm.createAddress` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Create a new address controlled by the given user. + +**Signature:** + +```sh +avm.createAddress({ + username: string, + password: string +}) -> {address: string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "avm.createAddress", + "params": { + "username": "myUsername", + "password": "myPassword" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5" + }, + "id": 1 +} +``` + + + +### `avm.createFixedCapAsset` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Create a new fixed-cap, fungible asset. A quantity of it is created at initialization and then no +more is ever created. The asset can be sent with `avm.send`. + +**Signature:** + +```sh +avm.createFixedCapAsset({ + name: string, + symbol: string, + denomination: int, //optional + initialHolders: []{ + address: string, + amount: int + }, + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> +{ + assetID: string, + changeAddr: string +} +``` + +- `name` is a human-readable name for the asset. Not necessarily unique. +- `symbol` is a shorthand symbol for the asset. Between 0 and 4 characters. Not necessarily unique. + May be omitted. +- `denomination` determines how balances of this asset are displayed by user interfaces. If + `denomination` is 0, 100 units of this asset are displayed as 100. If `denomination` is 1, 100 + units of this asset are displayed as 10.0. If `denomination` is 2, 100 units of this asset are + displayed as 1.00, etc. Defaults to 0. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- `username` and `password` denote the user paying the transaction fee. +- Each element in `initialHolders` specifies that `address` holds `amount` units of the asset at + genesis. +- `assetID` is the ID of the new asset. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.createFixedCapAsset", + "params" :{ + "name": "myFixedCapAsset", + "symbol":"MFCA", + "initialHolders": [ + { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "amount": 10000 + }, + { + "address":"X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "amount":50000 + } + ], + "from":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr":"X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "assetID": "ZiKfqRXCZgHLgZ4rxGU9Qbycdzuq5DRY4tdSNS9ku8kcNxNLD", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `avm.createNFTAsset` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Create a new non-fungible asset. No units of the asset exist at initialization. Minters can mint +units of this asset using `avm.mintNFT`. + +**Signature:** + +```sh +avm.createNFTAsset({ + name: string, + symbol: string, + minterSets: []{ + minters: []string, + threshold: int + }, + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> + { + assetID: string, + changeAddr: string, +} +``` + +- `name` is a human-readable name for the asset. Not necessarily unique. +- `symbol` is a shorthand symbol for the asset. Between 0 and 4 characters. Not necessarily unique. + May be omitted. +- `minterSets` is a list where each element specifies that `threshold` of the addresses in `minters` + may together mint more of the asset by signing a minting transaction. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- `username` pays the transaction fee. +- `assetID` is the ID of the new asset. +- `changeAddr` in the result is the address where any change was sent. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.createNFTAsset", + "params" :{ + "name":"Coincert", + "symbol":"TIXX", + "minterSets":[ + { + "minters":[ + "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + ], + "threshold": 1 + } + ], + "from": ["X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8"], + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "assetID": "2KGdt2HpFKpTH5CtGZjYt5XPWs6Pv9DLoRBhiFfntbezdRvZWP", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + }, + "id": 1 +} +``` + +### `avm.createVariableCapAsset` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Create a new variable-cap, fungible asset. No units of the asset exist at initialization. Minters +can mint units of this asset using `avm.mint`. + +**Signature:** + +```sh +avm.createVariableCapAsset({ + name: string, + symbol: string, + denomination: int, //optional + minterSets: []{ + minters: []string, + threshold: int + }, + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> +{ + assetID: string, + changeAddr: string, +} +``` + +- `name` is a human-readable name for the asset. Not necessarily unique. +- `symbol` is a shorthand symbol for the asset. Between 0 and 4 characters. Not necessarily unique. + May be omitted. +- `denomination` determines how balances of this asset are displayed by user interfaces. If + denomination is 0, 100 units of this asset are displayed as 100. If denomination is 1, 100 units + of this asset are displayed as 10.0. If denomination is 2, 100 units of this asset are displays as + .100, etc. +- `minterSets` is a list where each element specifies that `threshold` of the addresses in `minters` + may together mint more of the asset by signing a minting transaction. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- `username` pays the transaction fee. +- `assetID` is the ID of the new asset. +- `changeAddr` in the result is the address where any change was sent. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.createVariableCapAsset", + "params" :{ + "name":"myVariableCapAsset", + "symbol":"MFCA", + "minterSets":[ + { + "minters":[ + "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5" + ], + "threshold": 1 + }, + { + "minters": [ + "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5" + ], + "threshold": 2 + } + ], + "from":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr":"X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "assetID": "2QbZFE7J4MAny9iXHUwq8Pz8SpFhWk3maCw4SkinVPv6wPmAbK", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `avm.export` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +::: +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Send an asset from the X-Chain to the P-Chain or C-Chain. + +**Signature:** + +```sh +avm.export({ + to: string, + amount: int, + assetID: string, + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string, +}) -> +{ + txID: string, + changeAddr: string, +} +``` + +- `to` is the P-Chain or C-Chain address the asset is sent to. +- `amount` is the amount of the asset to send. +- `assetID` is the asset id of the asset which is sent. Use `AVAX` for AVAX exports. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- The asset is sent from addresses controlled by `username` +- `password` is `username`‘s password. + +- `txID` is this transaction’s ID. +- `changeAddr` in the result is the address where any change was sent. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.export", + "params" :{ + "to":"C-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "amount": 10, + "assetID": "AVAX", + "from":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr":"X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "txID": "2Eu16yNaepP57XrrJgjKGpiEDandpiGWW8xbUm6wcTYny3fejj", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + }, + "id": 1 +} +``` + + + +### `avm.exportKey` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Get the private key that controls a given address. The returned private key can be added to a user +with [`avm.importKey`](/reference/avalanchego/x-chain/api.md#avmimportkey). + +**Signature:** + +```sh +avm.exportKey({ + username: string, + password: string, + address: string +}) -> {privateKey: string} +``` + +- `username` must control `address`. +- `privateKey` is the string representation of the private key that controls `address`. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.exportKey", + "params" :{ + "username":"myUsername", + "password":"myPassword", + "address":"X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "privateKey": "PrivateKey-2w4XiXxPfQK4TypYqnohRL8DRNTz9cGiGmwQ1zmgEqD9c9KWLq" + } +} +``` + +### `avm.getAddressTxs` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Returns all transactions that change the balance of the given address. A transaction is said to +change an address's balance if either is true: + +- A UTXO that the transaction consumes was at least partially owned by the address. +- A UTXO that the transaction produces is at least partially owned by the address. + +:::tip +Note: Indexing (`index-transactions`) must be enabled in the X-chain config. +::: + +**Signature:** + +```sh +avm.getAddressTxs({ + address: string, + cursor: uint64, // optional, leave empty to get the first page + assetID: string, + pageSize: uint64 // optional, defaults to 1024 +}) -> { + txIDs: []string, + cursor: uint64, +} +``` + +**Request Parameters:** + +- `address`: The address for which we're fetching related transactions +- `assetID`: Only return transactions that changed the balance of this asset. Must be an ID or an + alias for an asset. +- `pageSize`: Number of items to return per page. Optional. Defaults to 1024. + +**Response Parameter:** + +- `txIDs`: List of transaction IDs that affected the balance of this address. +- `cursor`: Page number or offset. Use this in request to get the next page. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.getAddressTxs", + "params" :{ + "address":"X-local1kpprmfpzzm5lxyene32f6lr7j0aj7gxsu6hp9y", + "assetID":"AVAX", + "pageSize":20 + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "txIDs": ["SsJF7KKwxiUJkczygwmgLqo3XVRotmpKP8rMp74cpLuNLfwf6"], + "cursor": "1" + }, + "id": 1 +} +``` + +### `avm.getAllBalances` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Get the balances of all assets controlled by a given address. + +**Signature:** + +```sh +avm.getAllBalances({address:string}) -> { + balances: []{ + asset: string, + balance: int + } +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.getAllBalances", + "params" :{ + "address":"X-avax1c79e0dd0susp7dc8udq34jgk2yvve7hapvdyht" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "balances": [ + { + "asset": "AVAX", + "balance": "102" + }, + { + "asset": "2sdnziCz37Jov3QSNMXcFRGFJ1tgauaj6L7qfk7yUcRPfQMC79", + "balance": "10000" + } + ] + }, + "id": 1 +} +``` + +### `avm.getAssetDescription` + +Get information about an asset. + +**Signature:** + +```sh +avm.getAssetDescription({assetID: string}) -> { + assetId: string, + name: string, + symbol: string, + denomination: int +} +``` + +- `assetID` is the id of the asset for which the information is requested. +- `name` is the asset’s human-readable, not necessarily unique name. +- `symbol` is the asset’s symbol. +- `denomination` determines how balances of this asset are displayed by user interfaces. If + denomination is 0, 100 units of this asset are displayed as 100. If denomination is 1, 100 units + of this asset are displayed as 10.0. If denomination is 2, 100 units of this asset are displays as + .100, etc. + +:::note + +The AssetID for AVAX differs depending on the network you are on. + +Mainnet: FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z + +Testnet: U8iRqJoiJm8xZHAacmvYyZVwqQx6uDNtQeP3CQ6fcgQk3JqnK + +For finding the `assetID` of other assets, this [info] might be useful. +Also, `avm.getUTXOs` returns the `assetID` in its output. + +::: + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.getAssetDescription", + "params" :{ + "assetID" :"FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "name": "Avalanche", + "symbol": "AVAX", + "denomination": "9" + }, + "id": 1 +}` +``` + +### `avm.getBalance` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Get the balance of an asset controlled by a given address. + +**Signature:** + +```sh +avm.getBalance({ + address: string, + assetID: string +}) -> {balance: int} +``` + +- `address` owner of the asset +- `assetID` id of the asset for which the balance is requested + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.getBalance", + "params" :{ + "address":"X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "assetID": "2pYGetDWyKdHxpFxh2LHeoLNCH6H5vxxCxHQtFnnFaYxLsqtHC" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "balance": "299999999999900", + "utxoIDs": [ + { + "txID": "WPQdyLNqHfiEKp4zcCpayRHYDVYuh1hqs9c1RqgZXS4VPgdvo", + "outputIndex": 1 + } + ] + } +} +``` + +### `avm.getBlock` + +Returns the block with the given id. + +**Signature:** + +```sh +avm.getBlock({ + blockID: string + encoding: string // optional +}) -> { + block: string, + encoding: string +} +``` + +**Request:** + +- `blockID` is the block ID. It should be in cb58 format. +- `encoding` is the encoding format to use. Can be either `hex` or `json`. Defaults to `hex`. + +**Response:** + +- `block` is the transaction encoded to `encoding`. +- `encoding` is the `encoding`. + +#### Hex Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "avm.getBlock", + "params": { + "blockID": "tXJ4xwmR8soHE6DzRNMQPtiwQvuYsHn6eLLBzo2moDqBquqy6", + "encoding": "hex" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "block": "0x00000000002000000000641ad33ede17f652512193721df87994f783ec806bb5640c39ee73676caffcc3215e0651000000000049a80a000000010000000e0000000100000000000000000000000000000000000000000000000000000000000000000000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000070000002e1a2a3910000000000000000000000001000000015cf998275803a7277926912defdf177b2e97b0b400000001e0d825c5069a7336671dd27eaa5c7851d2cf449e7e1cdc469c5c9e5a953955950000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000008908223b680000000100000000000000005e45d02fcc9e585544008f1df7ae5c94bf7f0f2600000000641ad3b600000000642d48b60000005aedf802580000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000070000005aedf80258000000000000000000000001000000015cf998275803a7277926912defdf177b2e97b0b40000000b000000000000000000000001000000012892441ba9a160bcdc596dcd2cc3ad83c3493589000000010000000900000001adf2237a5fe2dfd906265e8e14274aa7a7b2ee60c66213110598ba34fb4824d74f7760321c0c8fb1e8d3c5e86909248e48a7ae02e641da5559351693a8a1939800286d4fa2", + "encoding": "hex" + }, + "id": 1 +} +``` + +### `avm.getBlockByHeight` + +Returns block at the given height. + +**Signature:** + +```sh +avm.getBlockByHeight({ + height: string + encoding: string // optional +}) -> { + block: string, + encoding: string +} +``` + +**Request:** + +- `blockHeight` is the block height. It should be in `string` format. +- `encoding` is the encoding format to use. Can be either `hex` or `json`. Defaults to `hex`. + +**Response:** + +- `block` is the transaction encoded to `encoding`. +- `encoding` is the `encoding`. + +#### Hex Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "avm.getBlockByHeight”, + "params": { + “height”: “275686313486”, + "encoding": “hex” + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "block": "0x00000000002000000000642f6739d4efcdd07e4d4919a7fc2020b8a0f081dd64c262aaace5a6dad22be0b55fec0700000000004db9e100000001000000110000000100000000000000000000000000000000000000000000000000000000000000000000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000070000005c6ece390000000000000000000000000100000001930ab7bf5018bfc6f9435c8b15ba2fe1e619c0230000000000000000ed5f38341e436e5d46e2bb00b45d62ae97d1b050c64bc634ae10626739e35c4b00000001c6dda861341665c3b555b46227fb5e56dc0a870c5482809349f04b00348af2a80000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000005c6edd7b40000000010000000000000001000000090000000178688f4d5055bd8733801f9b52793da885bef424c90526c18e4dd97f7514bf6f0c3d2a0e9a5ea8b761bc41902eb4902c34ef034c4d18c3db7c83c64ffeadd93600731676de", + "encoding": "hex" + }, + "id": 1 +} +``` + +### `avm.getHeight` + +Returns the height of the last accepted block. + +**Signature:** + +```sh +avm.getHeight() -> +{ + height: uint64, +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "avm.getHeight", + "params": {}, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "height": "5094088" + }, + "id": 1 +} +``` + +### `avm.getTx` + +Returns the specified transaction. The `encoding` parameter sets the format of the returned +transaction. Can be either `"hex"` or `"json"`. Defaults to `"hex"`. + +**Signature:** + +```sh +avm.getTx({ + txID: string, + encoding: string, //optional +}) -> { + tx: string, + encoding: string, +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.getTx", + "params" :{ + "txID":"2oJCbb8pfdxEHAf9A8CdN4Afj9VSR3xzyzNkf8tDv7aM1sfNFL", + "encoding": "json" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "tx": { + "unsignedTx": { + "networkID": 1, + "blockchainID": "2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM", + "outputs": [], + "inputs": [ + { + "txID": "2jbZUvi6nHy3Pgmk8xcMpSg5cW6epkPqdKkHSCweb4eRXtq4k9", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 2570382395, + "signatureIndices": [0] + } + } + ], + "memo": "0x", + "destinationChain": "11111111111111111111111111111111LpoYY", + "exportedOutputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": ["X-avax1tnuesf6cqwnjw7fxjyk7lhch0vhf0v95wj5jvy"], + "amount": 2569382395, + "locktime": 0, + "threshold": 1 + } + } + ] + }, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "0x46ebcbcfbee3ece1fd15015204045cf3cb77f42c48d0201fc150341f91f086f177cfca8894ca9b4a0c55d6950218e4ea8c01d5c4aefb85cd7264b47bd57d224400" + ] + } + } + ], + "id": "2oJCbb8pfdxEHAf9A8CdN4Afj9VSR3xzyzNkf8tDv7aM1sfNFL" + }, + "encoding": "json" + }, + "id": 1 +} +``` + +Where: + +- `credentials` is a list of this transaction's credentials. Each credential proves that this + transaction's creator is allowed to consume one of this transaction's inputs. Each credential is a + list of signatures. +- `unsignedTx` is the non-signature portion of the transaction. +- `networkID` is the ID of the network this transaction happened on. (Avalanche Mainnet is `1`.) +- `blockchainID` is the ID of the blockchain this transaction happened on. (Avalanche Mainnet + X-Chain is `2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM`.) +- Each element of `outputs` is an output (UTXO) of this transaction that is not being exported to + another chain. +- Each element of `inputs` is an input of this transaction which has not been imported from another + chain. +- Import Transactions have additional fields `sourceChain` and `importedInputs`, which specify the + blockchain ID that assets are being imported from, and the inputs that are being imported. +- Export Transactions have additional fields `destinationChain` and `exportedOutputs`, which specify + the blockchain ID that assets are being exported to, and the UTXOs that are being exported. + +An output contains: + +- `assetID`: The ID of the asset being transferred. (The Mainnet Avax ID is + `FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z`.) +- `fxID`: The ID of the FX this output uses. +- `output`: The FX-specific contents of this output. + +Most outputs use the secp256k1 FX, look like this: + +```json +{ + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": ["X-avax126rd3w35xwkmj8670zvf7y5r8k36qa9z9803wm"], + "amount": 1530084210, + "locktime": 0, + "threshold": 1 + } +} +``` + +The above output can be consumed after Unix time `locktime` by a transaction that has signatures +from `threshold` of the addresses in `addresses`. + +### `avm.getTxStatus` + +:::caution +Deprecated as of **v1.10.0**. +::: + +Get the status of a transaction sent to the network. + +**Signature:** + +```sh +avm.getTxStatus({txID: string}) -> {status: string} +``` + +`status` is one of: + +- `Accepted`: The transaction is (or will be) accepted by every node +- `Processing`: The transaction is being voted on by this node +- `Rejected`: The transaction will never be accepted by any node in the network +- `Unknown`: The transaction hasn’t been seen by this node + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.getTxStatus", + "params" :{ + "txID":"2QouvFWUbjuySRxeX5xMbNCuAaKWfbk5FeEa2JmoF85RKLk2dD" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "status": "Accepted" + } +} +``` + +### `avm.getUTXOs` + +Gets the UTXOs that reference a given address. If `sourceChain` is specified, then it will retrieve +the atomic UTXOs exported from that chain to the X Chain. + +**Signature:** + +```sh +avm.getUTXOs({ + addresses: []string, + limit: int, //optional + startIndex: { //optional + address: string, + utxo: string + }, + sourceChain: string, //optional + encoding: string //optional +}) -> { + numFetched: int, + utxos: []string, + endIndex: { + address: string, + utxo: string + }, + sourceChain: string, //optional + encoding: string +} +``` + +- `utxos` is a list of UTXOs such that each UTXO references at least one address in `addresses`. +- At most `limit` UTXOs are returned. If `limit` is omitted or greater than 1024, it is set to 1024. +- This method supports pagination. `endIndex` denotes the last UTXO returned. To get the next set of + UTXOs, use the value of `endIndex` as `startIndex` in the next call. +- If `startIndex` is omitted, will fetch all UTXOs up to `limit`. +- When using pagination (when `startIndex` is provided), UTXOs are not guaranteed to be unique + across multiple calls. That is, a UTXO may appear in the result of the first call, and then again + in the second call. +- When using pagination, consistency is not guaranteed across multiple calls. That is, the UTXO set + of the addresses may have changed between calls. +- `encoding` sets the format for the returned UTXOs. Can only be `hex` when a value is provided. + +#### **Example** + +Suppose we want all UTXOs that reference at least one of +`X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5` and `X-avax1d09qn852zcy03sfc9hay2llmn9hsgnw4tp3dv6`. + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.getUTXOs", + "params" :{ + "addresses":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", "X-avax1d09qn852zcy03sfc9hay2llmn9hsgnw4tp3dv6"], + "limit":5, + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +This gives response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "5", + "utxos": [ + "0x0000a195046108a85e60f7a864bb567745a37f50c6af282103e47cc62f036cee404700000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216c1f01765", + "0x0000ae8b1b94444eed8de9a81b1222f00f1b4133330add23d8ac288bffa98b85271100000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216473d042a", + "0x0000731ce04b1feefa9f4291d869adc30a33463f315491e164d89be7d6d2d7890cfc00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f21600dd3047", + "0x0000b462030cc4734f24c0bc224cf0d16ee452ea6b67615517caffead123ab4fbf1500000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216c71b387e", + "0x000054f6826c39bc957c0c6d44b70f961a994898999179cc32d21eb09c1908d7167b00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f2166290e79d" + ], + "endIndex": { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "kbUThAUfmBXUmRgTpgD6r3nLj7rJUGho6xyht5nouNNypH45j" + }, + "encoding": "hex" + }, + "id": 1 +} +``` + +Since `numFetched` is the same as `limit`, we can tell that there may be more UTXOs that were not +fetched. We call the method again, this time with `startIndex`: + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :2, + "method" :"avm.getUTXOs", + "params" :{ + "addresses":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "limit":5, + "startIndex": { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "kbUThAUfmBXUmRgTpgD6r3nLj7rJUGho6xyht5nouNNypH45j" + }, + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +This gives response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "4", + "utxos": [ + "0x000020e182dd51ee4dcd31909fddd75bb3438d9431f8e4efce86a88a684f5c7fa09300000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f21662861d59", + "0x0000a71ba36c475c18eb65dc90f6e85c4fd4a462d51c5de3ac2cbddf47db4d99284e00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f21665f6f83f", + "0x0000925424f61cb13e0fbdecc66e1270de68de9667b85baa3fdc84741d048daa69fa00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216afecf76a", + "0x000082f30327514f819da6009fad92b5dba24d27db01e29ad7541aa8e6b6b554615c00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216779c2d59" + ], + "endIndex": { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "21jG2RfqyHUUgkTLe2tUp6ETGLriSDTW3th8JXFbPRNiSZ11jK" + }, + "encoding": "hex" + }, + "id": 1 +} +``` + +Since `numFetched` is less than `limit`, we know that we are done fetching UTXOs and don’t need to +call this method again. + +Suppose we want to fetch the UTXOs exported from the P Chain to the X Chain in order to build an +ImportTx. Then we need to call GetUTXOs with the `sourceChain` argument in order to retrieve the +atomic UTXOs: + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.getUTXOs", + "params" :{ + "addresses":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", "X-avax1d09qn852zcy03sfc9hay2llmn9hsgnw4tp3dv6"], + "limit":5, + "sourceChain": "P", + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +This gives response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "1", + "utxos": [ + "0x00001f989ffaf18a18a59bdfbf209342aa61c6a62a67e8639d02bb3c8ddab315c6fa0000000039c33a499ce4c33a3b09cdd2cfa01ae70dbf2d18b2d7d168524440e55d550088000000070011c304cd7eb5c0000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29c83497819" + ], + "endIndex": { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "2Sz2XwRYqUHwPeiKoRnZ6ht88YqzAF1SQjMYZQQaB5wBFkAqST" + }, + "encoding": "hex" + }, + "id": 1 +} +``` + +### `avm.import` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Finalize a transfer of an asset from the P-Chain or C-Chain to the X-Chain. + +**Signature:** + +```sh +avm.import({ + to: string, + sourceChain: string, + username: string, + password: string, +}) -> {txID: string} +``` + +- `to` is the address the AVAX is sent to. This must be the same as the `to` argument in the + corresponding call to the P-Chain’s `exportAVAX` or C-Chain's `export`. +- `sourceChain` is the ID or alias of the chain the AVAX is being imported from. To import funds + from the C-Chain, use `"C"`. +- `username` is the user that controls `to`. +- `txID` is the ID of the newly created atomic transaction. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.import", + "params" :{ + "to":"X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "sourceChain":"C", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "txID": "2gXpf4jFoMAWQ3rxBfavgFfSdLkL2eFUYprKsUQuEdB5H6Jo1H" + }, + "id": 1 +} +``` + + + +### `avm.importKey` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Give a user control over an address by providing the private key that controls the address. + +**Signature:** + +```sh +avm.importKey({ + username: string, + password: string, + privateKey: string +}) -> {address: string} +``` + +- Add `privateKey` to `username`‘s set of private keys. `address` is the address `username` now + controls with the private key. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.importKey", + "params" :{ + "username":"myUsername", + "password":"myPassword", + "privateKey":"PrivateKey-2w4XiXxPfQK4TypYqnohRL8DRNTz9cGiGmwQ1zmgEqD9c9KWLq" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "address": "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5" + } +} +``` + +### `avm.issueTx` + +Send a signed transaction to the network. `encoding` specifies the format of the signed transaction. +Can only be `hex` when a value is provided. + +**Signature:** + +```sh +avm.issueTx({ + tx: string, + encoding: string, //optional +}) -> { + txID: string +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.issueTx", + "params" :{ + "tx":"0x00000009de31b4d8b22991d51aa6aa1fc733f23a851a8c9400000000000186a0000000005f041280000000005f9ca900000030390000000000000001fceda8f90fcb5d30614b99d79fc4baa29307762668f16eb0259a57c2d3b78c875c86ec2045792d4df2d926c40f829196e0bb97ee697af71f5b0a966dabff749634c8b729855e937715b0e44303fd1014daedc752006011b730", + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "NUPLwbt2hsYxpQg4H2o451hmTWQ4JZx2zMzM4SinwtHgAdX1JLPHXvWSXEnpecStLj" + } +} +``` + +### `avm.listAddresses` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +List addresses controlled by the given user. + +**Signature:** + +```sh +avm.listAddresses({ + username: string, + password: string +}) -> {addresses: []string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "avm.listAddresses", + "params": { + "username":"myUsername", + "password":"myPassword" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "addresses": ["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"] + }, + "id": 1 +} +``` + +### `avm.mint` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Mint units of a variable-cap asset created with +[`avm.createVariableCapAsset`](/reference/avalanchego/x-chain/api.md#avmcreatevariablecapasset). + +**Signature:** + +```sh +avm.mint({ + amount: int, + assetID: string, + to: string, + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> +{ + txID: string, + changeAddr: string, +} +``` + +- `amount` units of `assetID` will be created and controlled by address `to`. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- `username` is the user that pays the transaction fee. `username` must hold keys giving it + permission to mint more of this asset. That is, it must control at least _threshold_ keys for one + of the minter sets. +- `txID` is this transaction’s ID. +- `changeAddr` in the result is the address where any change was sent. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.mint", + "params" :{ + "amount":10000000, + "assetID":"i1EqsthjiFTxunrj8WD2xFSrQ5p2siEKQacmCCB5qBFVqfSL2", + "to":"X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "from":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr":"X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "2oGdPdfw2qcNUHeqjw8sU2hPVrFyNUTgn6A8HenDra7oLCDtja", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `avm.mintNFT` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Mint non-fungible tokens which were created with +[`avm.createNFTAsset`](/reference/avalanchego/x-chain/api.md#avmcreatenftasset). + +**Signature:** + +```sh +avm.mintNFT({ + assetID: string, + payload: string, + to: string, + encoding: string, //optional + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> +{ + txID: string, + changeAddr: string, +} +``` + +- `assetID` is the assetID of the newly created NFT asset. +- `payload` is an arbitrary payload of up to 1024 bytes. Its encoding format is specified by the + `encoding` argument. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- `username` is the user that pays the transaction fee. `username` must hold keys giving it + permission to mint more of this asset. That is, it must control at least _threshold_ keys for one + of the minter sets. +- `txID` is this transaction’s ID. +- `changeAddr` in the result is the address where any change was sent. +- `encoding` is the encoding format to use for the payload argument. Can only be `hex` when a value + is provided. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"avm.mintNFT", + "params" :{ + "assetID":"2KGdt2HpFKpTH5CtGZjYt5XPWs6Pv9DLoRBhiFfntbezdRvZWP", + "payload":"0x415641204c61627338259aed", + "to":"X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "from":["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr":"X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username":"myUsername", + "password":"myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "2oGdPdfw2qcNUHeqjw8sU2hPVrFyNUTgn6A8HenDra7oLCDtja", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `avm.send` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Send a quantity of an asset to an address. + +**Signature:** + +```sh +avm.send({ + amount: int, + assetID: string, + to: string, + memo: string, //optional + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> {txID: string, changeAddr: string} +``` + +- Sends `amount` units of asset with ID `assetID` to address `to`. `amount` is denominated in the + smallest increment of the asset. For AVAX this is 1 nAVAX (one billionth of 1 AVAX.) +- `to` is the X-Chain address the asset is sent to. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- You can attach a `memo`, whose length can be up to 256 bytes. +- The asset is sent from addresses controlled by user `username`. (Of course, that user will need to + hold at least the balance of the asset being sent.) + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.send", + "params" :{ + "assetID" : "AVAX", + "amount" : 10000, + "to" : "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "from" : ["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "memo" : "hi, mom!", + "username" : "userThatControlsAtLeast10000OfThisAsset", + "password" : "myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "2iXSVLPNVdnFqn65rRvLrsu8WneTFqBJRMqkBJx5vZTwAQb8c1", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `avm.sendMultiple` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Sends multiple transfers of `amount` of `assetID`, to a specified address from a list of owned +addresses. + +**Signature:** + +```sh +avm.sendMultiple({ + outputs: []{ + assetID: string, + amount: int, + to: string + }, + from: []string, //optional + changeAddr: string, //optional + memo: string, //optional + username: string, + password: string +}) -> {txID: string, changeAddr: string} +``` + +- `outputs` is an array of object literals which each contain an `assetID`, `amount` and `to`. +- `memo` is an optional message, whose length can be up to 256 bytes. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- The asset is sent from addresses controlled by user `username`. (Of course, that user will need to + hold at least the balance of the asset being sent.) + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.sendMultiple", + "params" :{ + "outputs": [ + { + "assetID" : "AVAX", + "to" : "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "amount" : 1000000000 + }, + { + "assetID" : "26aqSTpZuWDAVtRmo44fjCx4zW6PDEx3zy9Qtp2ts1MuMFn9FB", + "to" : "X-avax18knvhxx8uhc0mwlgrfyzjcm2wrd6e60w37xrjq", + "amount" : 10 + } + ], + "memo" : "hi, mom!", + "from" : ["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username" : "username", + "password" : "myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "2iXSVLPNVdnFqn65rRvLrsu8WneTFqBJRMqkBJx5vZTwAQb8c1", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `avm.sendNFT` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Send a non-fungible token. + +**Signature:** + +```sh +avm.sendNFT({ + assetID: string, + groupID: number, + to: string, + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> {txID: string} +``` + +- `assetID` is the asset ID of the NFT being sent. +- `groupID` is the NFT group from which to send the NFT. NFT creation allows multiple groups under + each NFT ID. You can issue multiple NFTs to each group. +- `to` is the X-Chain address the NFT is sent to. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. `changeAddr` is the address any change will be sent to. If omitted, change is + sent to one of the addresses controlled by the user. +- The asset is sent from addresses controlled by user `username`. (Of course, that user will need to + hold at least the balance of the NFT being sent.) + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"avm.sendNFT", + "params" :{ + "assetID" : "2KGdt2HpFKpTH5CtGZjYt5XPWs6Pv9DLoRBhiFfntbezdRvZWP", + "groupID" : 0, + "to" : "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "from" : ["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username" : "myUsername", + "password" : "myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "txID": "DoR2UtG1Trd3Q8gWXVevNxD666Q3DPqSFmBSMPQ9dWTV8Qtuy", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + }, + "id": 1 +} +``` + +### `wallet.issueTx` + +Send a signed transaction to the network and assume the TX will be accepted. `encoding` specifies +the format of the signed transaction. Can only be `hex` when a value is provided. + +This call is made to the wallet API endpoint: + +`/ext/bc/X/wallet` + +:::caution + +Endpoint deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +**Signature:** + +```sh +wallet.issueTx({ + tx: string, + encoding: string, //optional +}) -> { + txID: string +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"wallet.issueTx", + "params" :{ + "tx":"0x00000009de31b4d8b22991d51aa6aa1fc733f23a851a8c9400000000000186a0000000005f041280000000005f9ca900000030390000000000000001fceda8f90fcb5d30614b99d79fc4baa29307762668f16eb0259a57c2d3b78c875c86ec2045792d4df2d926c40f829196e0bb97ee697af71f5b0a966dabff749634c8b729855e937715b0e44303fd1014daedc752006011b730", + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X/wallet +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "NUPLwbt2hsYxpQg4H2o451hmTWQ4JZx2zMzM4SinwtHgAdX1JLPHXvWSXEnpecStLj" + } +} +``` + +### `wallet.send` + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Send a quantity of an asset to an address and assume the TX will be accepted so that future calls +can use the modified UTXO set. + +This call is made to the wallet API endpoint: + +`/ext/bc/X/wallet` + +:::caution + +Endpoint deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +**Signature:** + +```sh +wallet.send({ + amount: int, + assetID: string, + to: string, + memo: string, //optional + from: []string, //optional + changeAddr: string, //optional + username: string, + password: string +}) -> {txID: string, changeAddr: string} +``` + +- Sends `amount` units of asset with ID `assetID` to address `to`. `amount` is denominated in the + smallest increment of the asset. For AVAX this is 1 nAVAX (one billionth of 1 AVAX.) +- `to` is the X-Chain address the asset is sent to. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- You can attach a `memo`, whose length can be up to 256 bytes. +- The asset is sent from addresses controlled by user `username`. (Of course, that user will need to + hold at least the balance of the asset being sent.) + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"wallet.send", + "params" :{ + "assetID" : "AVAX", + "amount" : 10000, + "to" : "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "memo" : "hi, mom!", + "from" : ["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username" : "userThatControlsAtLeast10000OfThisAsset", + "password" : "myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X/wallet +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "2iXSVLPNVdnFqn65rRvLrsu8WneTFqBJRMqkBJx5vZTwAQb8c1", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### `wallet.sendMultiple` + +:::warning +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). +::: + +Send multiple transfers of `amount` of `assetID`, to a specified address from a list of owned of +addresses and assume the TX will be accepted so that future calls can use the modified UTXO set. + +This call is made to the wallet API endpoint: + +`/ext/bc/X/wallet` + +:::caution + +Endpoint deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +**Signature:** + +```sh +wallet.sendMultiple({ + outputs: []{ + assetID: string, + amount: int, + to: string + }, + from: []string, //optional + changeAddr: string, //optional + memo: string, //optional + username: string, + password: string +}) -> {txID: string, changeAddr: string} +``` + +- `outputs` is an array of object literals which each contain an `assetID`, `amount` and `to`. +- `from` are the addresses that you want to use for this operation. If omitted, uses any of your + addresses as needed. +- `changeAddr` is the address any change will be sent to. If omitted, change is sent to one of the + addresses controlled by the user. +- You can attach a `memo`, whose length can be up to 256 bytes. +- The asset is sent from addresses controlled by user `username`. (Of course, that user will need to + hold at least the balance of the asset being sent.) + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"wallet.sendMultiple", + "params" :{ + "outputs": [ + { + "assetID" : "AVAX", + "to" : "X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "amount" : 1000000000 + }, + { + "assetID" : "26aqSTpZuWDAVtRmo44fjCx4zW6PDEx3zy9Qtp2ts1MuMFn9FB", + "to" : "X-avax18knvhxx8uhc0mwlgrfyzjcm2wrd6e60w37xrjq", + "amount" : 10 + } + ], + "memo" : "hi, mom!", + "from" : ["X-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8", + "username" : "username", + "password" : "myPassword" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/X/wallet +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "txID": "2iXSVLPNVdnFqn65rRvLrsu8WneTFqBJRMqkBJx5vZTwAQb8c1", + "changeAddr": "X-avax1turszjwn05lflpewurw96rfrd3h6x8flgs5uf8" + } +} +``` + +### Events + +Listen for transactions on a specified address. + +This call is made to the events API endpoint: + +`/ext/bc/X/events` + +:::caution + +Endpoint deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +#### **Golang Example** + +```go +package main + +import ( + "encoding/json" + "log" + "net" + "net/http" + "sync" + + "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/pubsub" + "github.com/gorilla/websocket" +) + +func main() { + dialer := websocket.Dialer{ + NetDial: func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, addr) + }, + } + + httpHeader := http.Header{} + conn, _, err := dialer.Dial("ws://localhost:9650/ext/bc/X/events", httpHeader) + if err != nil { + panic(err) + } + + waitGroup := &sync.WaitGroup{} + waitGroup.Add(1) + + readMsg := func() { + defer waitGroup.Done() + + for { + mt, msg, err := conn.ReadMessage() + if err != nil { + log.Println(err) + return + } + switch mt { + case websocket.TextMessage: + log.Println(string(msg)) + default: + log.Println(mt, string(msg)) + } + } + } + + go readMsg() + + cmd := &pubsub.Command{NewSet: &pubsub.NewSet{}} + cmdmsg, err := json.Marshal(cmd) + if err != nil { + panic(err) + } + err = conn.WriteMessage(websocket.TextMessage, cmdmsg) + if err != nil { + panic(err) + } + + var addresses []string + addresses = append(addresses, " X-fuji....") + cmd = &pubsub.Command{AddAddresses: &pubsub.AddAddresses{JSONAddresses: api.JSONAddresses{Addresses: addresses}}} + cmdmsg, err = json.Marshal(cmd) + if err != nil { + panic(err) + } + + err = conn.WriteMessage(websocket.TextMessage, cmdmsg) + if err != nil { + panic(err) + } + + waitGroup.Wait() +} +``` + +**Operations:** + +| Command | Description | Example | Arguments | +| :--------------- | :--------------------------- | :------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------- | +| **NewSet** | create a new address map set | `{"newSet":{}}` | | +| **NewBloom** | create a new bloom set. | `{"newBloom":{"maxElements":"1000","collisionProb":"0.0100"}}` | `maxElements` - number of elements in filter must be > 0 `collisionProb` - allowed collision probability must be > 0 and <= 1 | +| **AddAddresses** | add an address to the set | `{"addAddresses":{"addresses":\["X-fuji..."\]}}` | addresses - list of addresses to match | + +Calling **NewSet** or **NewBloom** resets the filter, and must be followed with **AddAddresses**. +**AddAddresses** can be called multiple times. + +**Set details:** + +- **NewSet** performs absolute address matches, if the address is in the set you will be sent the + transaction. +- **NewBloom** [Bloom filtering](https://en.wikipedia.org/wiki/Bloom_filter) can produce false + positives, but can allow a greater number of addresses to be filtered. If the addresses is in the + filter, you will be sent the transaction. + +**Example Response:** + +```json +2021/05/11 15:59:35 {"txID":"22HWKHrREyXyAiDnVmGp3TQQ79tHSSVxA9h26VfDEzoxvwveyk"} +``` diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 9d1879871551..d9dbb8db6f12 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "encoding/json" "strings" "testing" @@ -28,6 +27,8 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/block/executor" "github.com/ava-labs/avalanchego/vms/avm/config" @@ -46,18 +47,15 @@ import ( func TestServiceIssueTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - txArgs := &api.FormattedTx{} txReply := &api.JSONTxID{} - err := env.service.IssueTx(nil, txArgs, txReply) + err := service.IssueTx(nil, txArgs, txReply) require.ErrorIs(err, codec.ErrCantUnpackVersion) tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") @@ -65,41 +63,38 @@ func TestServiceIssueTx(t *testing.T) { require.NoError(err) txArgs.Encoding = formatting.Hex txReply = &api.JSONTxID{} - require.NoError(env.service.IssueTx(nil, txArgs, txReply)) + require.NoError(service.IssueTx(nil, txArgs, txReply)) require.Equal(tx.ID(), txReply.TxID) } func TestServiceGetTxStatus(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - statusArgs := &api.JSONTxID{} statusReply := &GetTxStatusReply{} - err := env.service.GetTxStatus(nil, statusArgs, statusReply) + err := service.GetTxStatus(nil, statusArgs, statusReply) require.ErrorIs(err, errNilTxID) - newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + newTx := newAvaxBaseTxWithOutputs(t, env) txID := newTx.ID() statusArgs = &api.JSONTxID{ TxID: txID, } statusReply = &GetTxStatusReply{} - require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) + require.NoError(service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Unknown, statusReply.Status) issueAndAccept(require, env.vm, env.issuer, newTx) statusReply = &GetTxStatusReply{} - require.NoError(env.service.GetTxStatus(nil, statusArgs, statusReply)) + require.NoError(service.GetTxStatus(nil, statusArgs, statusReply)) require.Equal(choices.Accepted, statusReply.Status) } @@ -107,12 +102,10 @@ func TestServiceGetTxStatus(t *testing.T) { func TestServiceGetBalanceStrict(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() @@ -148,7 +141,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply := &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(uint64(1337), uint64(balanceReply.Balance)) require.Len(balanceReply.UTXOIDs, 1) @@ -159,7 +152,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(balanceReply.Balance) require.Empty(balanceReply.UTXOIDs) @@ -195,7 +188,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(uint64(1337+1337), uint64(balanceReply.Balance)) require.Len(balanceReply.UTXOIDs, 2) @@ -206,7 +199,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(balanceReply.Balance) require.Empty(balanceReply.UTXOIDs) @@ -244,7 +237,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(uint64(1337*3), uint64(balanceReply.Balance)) require.Len(balanceReply.UTXOIDs, 3) @@ -255,7 +248,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(service.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(balanceReply.Balance) require.Empty(balanceReply.UTXOIDs) @@ -263,15 +256,14 @@ func TestServiceGetBalanceStrict(t *testing.T) { func TestServiceGetTxs(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} + var err error env.vm.addressTxsIndexer, err = index.NewIndexer(env.vm.db, env.vm.ctx.Log, "", prometheus.NewRegistry(), false) require.NoError(err) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() @@ -290,14 +282,14 @@ func TestServiceGetTxs(t *testing.T) { AssetID: assetID.String(), } getTxsReply := &GetAddressTxsReply{} - require.NoError(env.service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.NoError(service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) require.Len(getTxsReply.TxIDs, 10) require.Equal(getTxsReply.TxIDs, testTxs[:10]) // get the second page getTxsArgs.Cursor = getTxsReply.Cursor getTxsReply = &GetAddressTxsReply{} - require.NoError(env.service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.NoError(service.GetAddressTxs(nil, getTxsArgs, getTxsReply)) require.Len(getTxsReply.TxIDs, 10) require.Equal(getTxsReply.TxIDs, testTxs[10:20]) } @@ -305,12 +297,10 @@ func TestServiceGetTxs(t *testing.T) { func TestServiceGetAllBalances(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() @@ -344,7 +334,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply := &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 1) require.Equal(assetID.String(), reply.Balances[0].AssetID) @@ -355,7 +345,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) require.Empty(reply.Balances) env.vm.ctx.Lock.Lock() @@ -388,7 +378,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 1) require.Equal(assetID.String(), reply.Balances[0].AssetID) @@ -399,7 +389,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Empty(reply.Balances) @@ -435,7 +425,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 1) @@ -446,7 +436,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Empty(reply.Balances) @@ -480,7 +470,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(reply.Balances, 2) gotAssetIDs := []string{reply.Balances[0].AssetID, reply.Balances[1].AssetID} @@ -495,7 +485,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(env.service.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(service.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Empty(reply.Balances) } @@ -503,19 +493,16 @@ func TestServiceGetAllBalances(t *testing.T) { func TestServiceGetTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - txID := env.genesisTx.ID() reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.Hex, }, &reply)) @@ -531,19 +518,17 @@ func TestServiceGetTx(t *testing.T) { func TestServiceGetTxJSON_BaseTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - newTx := newAvaxBaseTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + newTx := newAvaxBaseTxWithOutputs(t, env) issueAndAccept(require, env.vm, env.issuer, newTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: newTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -565,7 +550,19 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { "addresses": [ "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" ], - "amount": 49000, + "amount": 1000, + "locktime": 0, + "threshold": 1 + } + }, + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1d6kkj0qh4wcmus3tk59npwt3rluc6en72ngurd" + ], + "amount": 48000, "locktime": 0, "threshold": 1 } @@ -614,19 +611,17 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { func TestServiceGetTxJSON_ExportTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - newTx := newAvaxExportTxWithOutputs(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.TxFee, env.vm.parser) + newTx := buildTestExportTx(t, env, env.vm.ctx.CChainID) issueAndAccept(require, env.vm, env.issuer, newTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: newTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -639,7 +634,20 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], "inputs": [ { "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -655,7 +663,7 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { } ], "memo": "0x", - "destinationChain": "11111111111111111111111111111111LpoYY", + "destinationChain": "2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w", "exportedOutputs": [ { "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", @@ -664,7 +672,7 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { "addresses": [ "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" ], - "amount": 49000, + "amount": 1000, "locktime": 0, "threshold": 1 } @@ -699,24 +707,65 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &nftfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: createAssetTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -730,8 +779,34 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 49000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "outputIndex": 2, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 50000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "name": "Team Rocket", "symbol": "TR", @@ -752,6 +827,7 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { "addresses": [ "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" ], + "groupID": 0, "locktime": 0, "threshold": 1 } @@ -801,13 +877,27 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { } ] }, - "credentials": null, + "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + } + ], "id": "PLACEHOLDER_TX_ID" }` expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", createAssetTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", createAssetTx.Unsigned.(*txs.CreateAssetTx).BlockchainID.String(), 1) + sigStr, err := formatting.Encode(formatting.HexNC, createAssetTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) + require.NoError(err) + + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -815,29 +905,43 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildNFTxMintOp(createAssetTx, key, 2, 1)) - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + op := buildNFTxMintOp(createAssetTx, key, 1, 1) + mintNFTTx := buildOperationTxWithOps(t, env, op) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintNFTTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -851,8 +955,34 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "rSiY2aqcahSU5vyJeMiNBnwtPwfJFxsxskAGbU3HxHvAkrdpy", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -860,7 +990,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 2 + "outputIndex": 1 } ], "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", @@ -886,6 +1016,14 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { @@ -902,10 +1040,10 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -914,32 +1052,46 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &nftfx.MintOutput{ + GroupID: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintOp1 := buildNFTxMintOp(createAssetTx, key, 2, 1) - mintOp2 := buildNFTxMintOp(createAssetTx, key, 3, 2) - mintNFTTx := buildOperationTxWithOp(env.vm.ctx.ChainID, mintOp1, mintOp2) - - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + mintOp1 := buildNFTxMintOp(createAssetTx, key, 1, 0) + mintOp2 := buildNFTxMintOp(createAssetTx, key, 2, 1) + mintNFTTx := buildOperationTxWithOps(t, env, mintOp1, mintOp2) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintNFTTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -953,8 +1105,34 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "BBhSA95iv6ueXc7xrMSka1bByBqcwJxyvMiyjy5H8ccAgxy4P", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -962,7 +1140,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 2 + "outputIndex": 1 } ], "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", @@ -972,7 +1150,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { 0 ] }, - "groupID": 1, + "groupID": 0, "payload": "0x68656c6c6f", "outputs": [ { @@ -990,7 +1168,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 3 + "outputIndex": 2 } ], "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", @@ -1000,7 +1178,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { 0 ] }, - "groupID": 2, + "groupID": 1, "payload": "0x68656c6c6f", "outputs": [ { @@ -1016,6 +1194,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "qd2U4HDWUvMrVUeTcCHp6xH3Qpnn1XbU5MDdnBoiifFqvgXwT", "credential": { @@ -1040,10 +1226,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintNFTTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintNFTTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[0].Credential.(*nftfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintNFTTx.Creds[1].Credential.(*nftfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1052,29 +1238,40 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &nftfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildSecpMintOp(createAssetTx, key, 0)) - require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + op := buildSecpMintOp(createAssetTx, key, 1) + mintSecpOpTx := buildOperationTxWithOps(t, env, op) issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintSecpOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1088,8 +1285,34 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2YhAg3XUdub5syHHePZG7q3yFjKAy7ahsvQDxq5SMrYbN1s5Gn", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1097,7 +1320,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 0 + "outputIndex": 1 } ], "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", @@ -1127,6 +1350,14 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { @@ -1146,7 +1377,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1155,32 +1386,44 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: durango, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 0: { + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }, + 1: { + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - op1 := buildSecpMintOp(createAssetTx, key, 0) - op2 := buildSecpMintOp(createAssetTx, key, 1) - mintSecpOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) - - require.NoError(mintSecpOpTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + op1 := buildSecpMintOp(createAssetTx, key, 1) + op2 := buildSecpMintOp(createAssetTx, key, 2) + mintSecpOpTx := buildOperationTxWithOps(t, env, op1, op2) issueAndAccept(require, env.vm, env.issuer, mintSecpOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintSecpOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1194,8 +1437,34 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2vxorPLUw5sneb7Mdhhjuws3H5AqaDp1V8ETz6fEuzvn835rVX", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1203,7 +1472,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 0 + "outputIndex": 1 } ], "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", @@ -1235,7 +1504,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 1 + "outputIndex": 2 } ], "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", @@ -1273,6 +1542,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { ] } }, + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", "credential": { @@ -1292,7 +1569,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { sigStr, err := formatting.Encode(formatting.HexNC, mintSecpOpTx.Creds[0].Credential.(*secp256k1fx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1301,29 +1578,35 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, buildPropertyFxMintOp(createAssetTx, key, 4)) - require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + op := buildPropertyFxMintOp(createAssetTx, key, 1) + mintPropertyFxOpTx := buildOperationTxWithOps(t, env, op) issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintPropertyFxOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1337,8 +1620,34 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "nNUGBjszswU3ZmhCb8hBNWmg335UZqGWmNrYTAGyMF4bFpMXm", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1346,7 +1655,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 4 + "outputIndex": 1 } ], "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", @@ -1373,6 +1682,14 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { @@ -1389,10 +1706,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 1) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) require.Equal(expectedReplyTxString, string(replyTxBytes)) } @@ -1401,32 +1718,42 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env.vm.ctx.ChainID, env.vm.parser) + initialStates := map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + } + createAssetTx := newAvaxCreateAssetTxWithOutputs(t, env, initialStates) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - op1 := buildPropertyFxMintOp(createAssetTx, key, 4) - op2 := buildPropertyFxMintOp(createAssetTx, key, 5) - mintPropertyFxOpTx := buildOperationTxWithOp(env.vm.ctx.ChainID, op1, op2) - - require.NoError(mintPropertyFxOpTx.SignPropertyFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) + op1 := buildPropertyFxMintOp(createAssetTx, key, 1) + op2 := buildPropertyFxMintOp(createAssetTx, key, 2) + mintPropertyFxOpTx := buildOperationTxWithOps(t, env, op1, op2) issueAndAccept(require, env.vm, env.issuer, mintPropertyFxOpTx) reply := api.GetTxReply{} - require.NoError(env.service.GetTx(nil, &api.GetTxArgs{ + require.NoError(service.GetTx(nil, &api.GetTxArgs{ TxID: mintPropertyFxOpTx.ID(), Encoding: formatting.JSON, }, &reply)) @@ -1440,8 +1767,34 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "unsignedTx": { "networkID": 10, "blockchainID": "PLACEHOLDER_BLOCKCHAIN_ID", - "outputs": null, - "inputs": null, + "outputs": [ + { + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e" + ], + "amount": 48000, + "locktime": 0, + "threshold": 1 + } + } + ], + "inputs": [ + { + "txID": "2NV5AGoQQHVRY6VkT8sht8bhZDHR7uwta7fk7JwAZpacqMRWCa", + "outputIndex": 0, + "assetID": "2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 49000, + "signatureIndices": [ + 0 + ] + } + } + ], "memo": "0x", "operations": [ { @@ -1449,7 +1802,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 4 + "outputIndex": 1 } ], "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", @@ -1478,7 +1831,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) "inputIDs": [ { "txID": "PLACEHOLDER_CREATE_ASSET_TX_ID", - "outputIndex": 5 + "outputIndex": 2 } ], "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", @@ -1505,6 +1858,14 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) ] }, "credentials": [ + { + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "credential": { + "signatures": [ + "PLACEHOLDER_SIGNATURE" + ] + } + }, { "fxID": "rXJsCSEYXg2TehWxCEEGj6JU2PWKTkd6cBdNLjoe2SpsKD9cy", "credential": { @@ -1529,169 +1890,76 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_TX_ID", mintPropertyFxOpTx.ID().String(), 1) expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_BLOCKCHAIN_ID", mintPropertyFxOpTx.Unsigned.(*txs.OperationTx).BlockchainID.String(), 1) - sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[0].Credential.(*propertyfx.Credential).Sigs[0][:]) + sigStr, err := formatting.Encode(formatting.HexNC, mintPropertyFxOpTx.Creds[1].Credential.(*propertyfx.Credential).Sigs[0][:]) require.NoError(err) - expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 2) + expectedReplyTxString = strings.Replace(expectedReplyTxString, "PLACEHOLDER_SIGNATURE", sigStr, 3) require.Equal(expectedReplyTxString, string(replyTxBytes)) } -func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { - avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") - key := keys[0] - tx := buildBaseTx(avaxTx, chainID, fee, key) - require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - return tx -} - -func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, chainID ids.ID, fee uint64, parser txs.Parser) *txs.Tx { - avaxTx := getCreateTxFromGenesisTest(t, genesisBytes, "AVAX") - key := keys[0] - tx := buildExportTx(avaxTx, chainID, fee, key) - require.NoError(t, tx.SignSECP256K1Fx(parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - return tx -} - -func newAvaxCreateAssetTxWithOutputs(t *testing.T, chainID ids.ID, parser txs.Parser) *txs.Tx { - key := keys[0] - tx := buildCreateAssetTx(chainID, key) - require.NoError(t, tx.Initialize(parser.Codec())) - return tx -} - -func buildBaseTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { - return &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - fee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }, - }} -} - -func buildExportTx(avaxTx *txs.Tx, chainID ids.ID, fee uint64, key *secp256k1.PrivateKey) *txs.Tx { - return &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }, - }, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxTx.ID()}, +func newAvaxBaseTxWithOutputs(t *testing.T, env *environment) *txs.Tx { + var ( + memo = []byte{1, 2, 3, 4, 5, 6, 7, 8} + key = keys[0] + changeKey = keys[1] + kc = secp256k1fx.NewKeychain(key) + ) + + tx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - fee, + Amt: units.MicroAvax, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, }}, - }} + memo, + kc, + changeKey.PublicKey().Address(), + ) + require.NoError(t, err) + return tx } -func buildCreateAssetTx(chainID ids.ID, key *secp256k1.PrivateKey) *txs.Tx { - return &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }, - }, - { - FxIndex: 1, - Outs: []verify.State{ - &nftfx.MintOutput{ - GroupID: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - &nftfx.MintOutput{ - GroupID: 2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }, - }, - { - FxIndex: 2, - Outs: []verify.State{ - &propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - &propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - }, - }} +func newAvaxCreateAssetTxWithOutputs(t *testing.T, env *environment, initialStates map[uint32][]verify.State) *txs.Tx { + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + tx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(t, err) + return tx +} + +func buildTestExportTx(t *testing.T, env *environment, chainID ids.ID) *txs.Tx { + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + to = key.PublicKey().Address() + ) + + tx, err := env.txBuilder.ExportTx( + chainID, + to, + env.vm.feeAssetID, + units.MicroAvax, + kc, + key.Address(), + ) + require.NoError(t, err) + return tx } func buildNFTxMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputIndex, groupID uint32) *txs.Operation { @@ -1767,57 +2035,55 @@ func buildSecpMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputInd } } -func buildOperationTxWithOp(chainID ids.ID, op ...*txs.Operation) *txs.Tx { - return &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: chainID, - }}, - Ops: op, - }} +func buildOperationTxWithOps(t *testing.T, env *environment, op ...*txs.Operation) *txs.Tx { + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + tx, err := env.txBuilder.Operation( + op, + kc, + key.Address(), + ) + require.NoError(t, err) + return tx } func TestServiceGetNilTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := api.GetTxReply{} - err := env.service.GetTx(nil, &api.GetTxArgs{}, &reply) + err := service.GetTx(nil, &api.GetTxArgs{}, &reply) require.ErrorIs(err, errNilTxID) } func TestServiceGetUnknownTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := api.GetTxReply{} - err := env.service.GetTx(nil, &api.GetTxArgs{TxID: ids.GenerateTestID()}, &reply) + err := service.GetTx(nil, &api.GetTxArgs{TxID: ids.GenerateTestID()}, &reply) require.ErrorIs(err, database.ErrNotFound) } func TestServiceGetUTXOs(t *testing.T) { - env := setup(t, &envConfig{}) - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} + env.vm.ctx.Lock.Unlock() rawAddr := ids.GenerateTestShortID() rawEmptyAddr := ids.GenerateTestShortID() @@ -1889,8 +2155,6 @@ func TestServiceGetUTXOs(t *testing.T) { xEmptyAddr, err := env.vm.FormatLocalAddress(rawEmptyAddr) require.NoError(t, err) - env.vm.ctx.Lock.Unlock() - tests := []struct { label string count int @@ -2054,7 +2318,7 @@ func TestServiceGetUTXOs(t *testing.T) { t.Run(test.label, func(t *testing.T) { require := require.New(t) reply := &api.GetUTXOsReply{} - err := env.service.GetUTXOs(nil, test.args, reply) + err := service.GetUTXOs(nil, test.args, reply) require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { return @@ -2067,19 +2331,16 @@ func TestServiceGetUTXOs(t *testing.T) { func TestGetAssetDescription(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - avaxAssetID := env.genesisTx.ID() reply := GetAssetDescriptionReply{} - require.NoError(env.service.GetAssetDescription(nil, &GetAssetDescriptionArgs{ + require.NoError(service.GetAssetDescription(nil, &GetAssetDescriptionArgs{ AssetID: avaxAssetID.String(), }, &reply)) @@ -2090,21 +2351,18 @@ func TestGetAssetDescription(t *testing.T) { func TestGetBalance(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - avaxAssetID := env.genesisTx.ID() reply := GetBalanceReply{} addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) - require.NoError(env.service.GetBalance(nil, &GetBalanceArgs{ + require.NoError(service.GetBalance(nil, &GetBalanceArgs{ Address: addrStr, AssetID: avaxAssetID.String(), }, &reply)) @@ -2125,14 +2383,9 @@ func TestCreateFixedCapAsset(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := AssetIDChangeAddr{} addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) @@ -2141,7 +2394,7 @@ func TestCreateFixedCapAsset(t *testing.T) { require.NoError(err) _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) - require.NoError(env.service.CreateFixedCapAsset(nil, &CreateAssetArgs{ + require.NoError(service.CreateFixedCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2176,21 +2429,16 @@ func TestCreateVariableCapAsset(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - reply := AssetIDChangeAddr{} minterAddrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) _, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) changeAddrStr := fromAddrsStr[0] - require.NoError(env.service.CreateVariableCapAsset(nil, &CreateAssetArgs{ + require.NoError(service.CreateVariableCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2229,7 +2477,7 @@ func TestCreateVariableCapAsset(t *testing.T) { To: minterAddrStr, // Send newly minted tokens to this address } mintReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.Mint(nil, mintArgs, mintReply)) + require.NoError(service.Mint(nil, mintArgs, mintReply)) require.Equal(changeAddrStr, mintReply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, mintReply.TxID) @@ -2250,7 +2498,7 @@ func TestCreateVariableCapAsset(t *testing.T) { }, } sendReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.Send(nil, sendArgs, sendReply)) + require.NoError(service.Send(nil, sendArgs, sendReply)) require.Equal(changeAddrStr, sendReply.ChangeAddr) }) } @@ -2269,14 +2517,9 @@ func TestNFTWorkflow(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - fromAddrs, fromAddrsStr := sampleAddrs(t, env.vm.AddressManager, addrs) // Test minting of the created variable cap asset @@ -2304,7 +2547,7 @@ func TestNFTWorkflow(t *testing.T) { }, } createReply := &AssetIDChangeAddr{} - require.NoError(env.service.CreateNFTAsset(nil, createArgs, createReply)) + require.NoError(service.CreateNFTAsset(nil, createArgs, createReply)) require.Equal(fromAddrsStr[0], createReply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, createReply.AssetID) @@ -2317,7 +2560,7 @@ func TestNFTWorkflow(t *testing.T) { require.NoError(err) reply := &GetBalanceReply{} - require.NoError(env.service.GetBalance(nil, + require.NoError(service.GetBalance(nil, &GetBalanceArgs{ Address: addrStr, AssetID: env.vm.feeAssetID.String(), @@ -2355,7 +2598,7 @@ func TestNFTWorkflow(t *testing.T) { } mintReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.MintNFT(nil, mintArgs, mintReply)) + require.NoError(service.MintNFT(nil, mintArgs, mintReply)) require.Equal(fromAddrsStr[0], createReply.ChangeAddr) // Accept the transaction so that we can send the newly minted NFT @@ -2375,7 +2618,7 @@ func TestNFTWorkflow(t *testing.T) { To: addrStr, } sendReply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.SendNFT(nil, sendArgs, sendReply)) + require.NoError(service.SendNFT(nil, sendArgs, sendReply)) require.Equal(fromAddrsStr[0], sendReply.ChangeAddr) }) } @@ -2390,14 +2633,9 @@ func TestImportExportKey(t *testing.T) { password: password, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - sk, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -2409,7 +2647,7 @@ func TestImportExportKey(t *testing.T) { PrivateKey: sk, } importReply := &api.JSONAddress{} - require.NoError(env.service.ImportKey(nil, importArgs, importReply)) + require.NoError(service.ImportKey(nil, importArgs, importReply)) addrStr, err := env.vm.FormatLocalAddress(sk.PublicKey().Address()) require.NoError(err) @@ -2421,7 +2659,7 @@ func TestImportExportKey(t *testing.T) { Address: addrStr, } exportReply := &ExportKeyReply{} - require.NoError(env.service.ExportKey(nil, exportArgs, exportReply)) + require.NoError(service.ExportKey(nil, exportArgs, exportReply)) require.Equal(sk.Bytes(), exportReply.PrivateKey.Bytes()) } @@ -2434,14 +2672,9 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { password: password, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - sk, err := secp256k1.NewPrivateKey() require.NoError(err) args := ImportKeyArgs{ @@ -2452,7 +2685,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { PrivateKey: sk, } reply := api.JSONAddress{} - require.NoError(env.service.ImportKey(nil, &args, &reply)) + require.NoError(service.ImportKey(nil, &args, &reply)) expectedAddress, err := env.vm.FormatLocalAddress(sk.PublicKey().Address()) require.NoError(err) @@ -2460,7 +2693,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { require.Equal(expectedAddress, reply.Address) reply2 := api.JSONAddress{} - require.NoError(env.service.ImportKey(nil, &args, &reply2)) + require.NoError(service.ImportKey(nil, &args, &reply2)) require.Equal(expectedAddress, reply2.Address) @@ -2469,7 +2702,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { Password: password, } addrsReply := api.JSONAddresses{} - require.NoError(env.service.ListAddresses(nil, &addrsArgs, &addrsReply)) + require.NoError(service.ListAddresses(nil, &addrsArgs, &addrsReply)) require.Len(addrsReply.Addresses, 1) require.Equal(expectedAddress, addrsReply.Addresses[0]) @@ -2485,14 +2718,9 @@ func TestSend(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() @@ -2518,7 +2746,7 @@ func TestSend(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.Send(nil, args, reply)) + require.NoError(service.Send(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, reply.TxID) @@ -2536,15 +2764,13 @@ func TestSendMultiple(t *testing.T) { password: password, initialKeys: keys, }}, + vmStaticConfig: &config.Config{ + EUpgradeTime: mockable.MaxTime, + }, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() @@ -2577,7 +2803,7 @@ func TestSendMultiple(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - require.NoError(env.service.SendMultiple(nil, args, reply)) + require.NoError(service.SendMultiple(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, reply.TxID) @@ -2594,21 +2820,16 @@ func TestCreateAndListAddresses(t *testing.T) { password: password, }}, }) + service := &Service{vm: env.vm} env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createArgs := &api.UserPass{ Username: username, Password: password, } createReply := &api.JSONAddress{} - require.NoError(env.service.CreateAddress(nil, createArgs, createReply)) + require.NoError(service.CreateAddress(nil, createArgs, createReply)) newAddr := createReply.Address @@ -2618,7 +2839,7 @@ func TestCreateAndListAddresses(t *testing.T) { } listReply := &api.JSONAddresses{} - require.NoError(env.service.ListAddresses(nil, listArgs, listReply)) + require.NoError(service.ListAddresses(nil, listArgs, listReply)) require.Contains(listReply.Addresses, newAddr) } @@ -2635,12 +2856,9 @@ func TestImport(t *testing.T) { initialKeys: keys, }}, }) + service := &Service{vm: env.vm} + env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() assetID := env.genesisTx.ID() addr0 := keys[0].PublicKey().Address() @@ -2672,8 +2890,6 @@ func TestImport(t *testing.T) { }, })) - env.vm.ctx.Lock.Unlock() - addrStr, err := env.vm.FormatLocalAddress(keys[0].PublicKey().Address()) require.NoError(err) args := &ImportArgs{ @@ -2685,7 +2901,7 @@ func TestImport(t *testing.T) { To: addrStr, } reply := &api.JSONTxID{} - require.NoError(env.service.Import(nil, args, reply)) + require.NoError(service.Import(nil, args, reply)) }) } } diff --git a/vms/avm/state/mock_state.go b/vms/avm/state/mock_state.go index cb5138c90369..13ede9805d0b 100644 --- a/vms/avm/state/mock_state.go +++ b/vms/avm/state/mock_state.go @@ -11,12 +11,10 @@ package state import ( reflect "reflect" - sync "sync" time "time" database "github.com/ava-labs/avalanchego/database" ids "github.com/ava-labs/avalanchego/ids" - logging "github.com/ava-labs/avalanchego/utils/logging" block "github.com/ava-labs/avalanchego/vms/avm/block" txs "github.com/ava-labs/avalanchego/vms/avm/txs" avax "github.com/ava-labs/avalanchego/vms/components/avax" @@ -464,20 +462,6 @@ func (mr *MockStateMockRecorder) IsInitialized() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInitialized", reflect.TypeOf((*MockState)(nil).IsInitialized)) } -// Prune mocks base method. -func (m *MockState) Prune(arg0 sync.Locker, arg1 logging.Logger) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Prune", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Prune indicates an expected call of Prune. -func (mr *MockStateMockRecorder) Prune(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prune", reflect.TypeOf((*MockState)(nil).Prune), arg0, arg1) -} - // SetInitialized mocks base method. func (m *MockState) SetInitialized() error { m.ctrl.T.Helper() diff --git a/vms/avm/state/state.go b/vms/avm/state/state.go index 297a7e76d39a..5005eb3dfc14 100644 --- a/vms/avm/state/state.go +++ b/vms/avm/state/state.go @@ -4,15 +4,10 @@ package state import ( - "bytes" - "errors" "fmt" - "math" - "sync" "time" "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" @@ -20,30 +15,20 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" ) const ( - statusCacheSize = 8192 txCacheSize = 8192 blockIDCacheSize = 8192 blockCacheSize = 2048 - - pruneCommitLimit = 1024 - pruneCommitSleepMultiplier = 5 - pruneCommitSleepCap = 10 * time.Second - pruneUpdateFrequency = 30 * time.Second ) var ( utxoPrefix = []byte("utxo") - statusPrefix = []byte("status") txPrefix = []byte("tx") blockIDPrefix = []byte("blockID") blockPrefix = []byte("block") @@ -53,8 +38,6 @@ var ( timestampKey = []byte{0x01} lastAcceptedKey = []byte{0x02} - errStatusWithoutTx = errors.New("unexpected status without transactions") - _ State = (*state)(nil) ) @@ -106,19 +89,6 @@ type State interface { // pending changes to the base database. CommitBatch() (database.Batch, error) - // Asynchronously removes unneeded state from disk. - // - // Specifically, this removes: - // - All transaction statuses - // - All non-accepted transactions - // - All UTXOs that were consumed by accepted transactions - // - // [lock] is the AVM's context lock and is assumed to be unlocked when this - // method is called. - // - // TODO: remove after v1.11.x is activated - Prune(lock sync.Locker, log logging.Logger) error - // Checksums returns the current TxChecksum and UTXOChecksum. Checksums() (txChecksum ids.ID, utxoChecksum ids.ID) @@ -129,8 +99,6 @@ type State interface { * VMDB * |- utxos * | '-- utxoDB - * |- statuses - * | '-- statusDB * |-. txs * | '-- txID -> tx bytes * |-. blockIDs @@ -150,10 +118,6 @@ type state struct { utxoDB database.Database utxoState avax.UTXOState - statusesPruned bool - statusCache cache.Cacher[ids.ID, *choices.Status] // cache of id -> choices.Status. If the entry is nil, it is not in the database - statusDB database.Database - addedTxs map[ids.ID]*txs.Tx // map of txID -> *txs.Tx txCache cache.Cacher[ids.ID, *txs.Tx] // cache of txID -> *txs.Tx. If the entry is nil, it is not in the database txDB database.Database @@ -182,21 +146,11 @@ func New( trackChecksums bool, ) (State, error) { utxoDB := prefixdb.New(utxoPrefix, db) - statusDB := prefixdb.New(statusPrefix, db) txDB := prefixdb.New(txPrefix, db) blockIDDB := prefixdb.New(blockIDPrefix, db) blockDB := prefixdb.New(blockPrefix, db) singletonDB := prefixdb.New(singletonPrefix, db) - statusCache, err := metercacher.New[ids.ID, *choices.Status]( - "status_cache", - metrics, - &cache.LRU[ids.ID, *choices.Status]{Size: statusCacheSize}, - ) - if err != nil { - return nil, err - } - txCache, err := metercacher.New[ids.ID, *txs.Tx]( "tx_cache", metrics, @@ -237,9 +191,6 @@ func New( utxoDB: utxoDB, utxoState: utxoState, - statusCache: statusCache, - statusDB: statusDB, - addedTxs: make(map[ids.ID]*txs.Tx), txCache: txCache, txDB: txDB, @@ -281,69 +232,7 @@ func (s *state) DeleteUTXO(utxoID ids.ID) { s.modifiedUTXOs[utxoID] = nil } -// TODO: After v1.11.x has activated we can rename [getTx] to [GetTx] and delete -// [getStatus]. func (s *state) GetTx(txID ids.ID) (*txs.Tx, error) { - tx, err := s.getTx(txID) - if err != nil { - return nil, err - } - - // Before the linearization, transactions were persisted before they were - // marked as Accepted. However, this function aims to only return accepted - // transactions. - status, err := s.getStatus(txID) - if err == database.ErrNotFound { - // If the status wasn't persisted, then the transaction was written - // after the linearization, and is accepted. - return tx, nil - } - if err != nil { - return nil, err - } - - // If the status was persisted, then the transaction was written before the - // linearization. If it wasn't marked as accepted, then we treat it as if it - // doesn't exist. - if status != choices.Accepted { - return nil, database.ErrNotFound - } - return tx, nil -} - -func (s *state) getStatus(id ids.ID) (choices.Status, error) { - if s.statusesPruned { - return choices.Unknown, database.ErrNotFound - } - - if _, ok := s.addedTxs[id]; ok { - return choices.Unknown, database.ErrNotFound - } - if status, found := s.statusCache.Get(id); found { - if status == nil { - return choices.Unknown, database.ErrNotFound - } - return *status, nil - } - - val, err := database.GetUInt32(s.statusDB, id[:]) - if err == database.ErrNotFound { - s.statusCache.Put(id, nil) - return choices.Unknown, database.ErrNotFound - } - if err != nil { - return choices.Unknown, err - } - - status := choices.Status(val) - if err := status.Valid(); err != nil { - return choices.Unknown, err - } - s.statusCache.Put(id, &status) - return status, nil -} - -func (s *state) getTx(txID ids.ID) (*txs.Tx, error) { if tx, exists := s.addedTxs[txID]; exists { return tx, nil } @@ -521,7 +410,6 @@ func (s *state) CommitBatch() (database.Batch, error) { func (s *state) Close() error { return utils.Err( s.utxoDB.Close(), - s.statusDB.Close(), s.txDB.Close(), s.blockIDDB.Close(), s.blockDB.Close(), @@ -564,13 +452,9 @@ func (s *state) writeTxs() error { delete(s.addedTxs, txID) s.txCache.Put(txID, tx) - s.statusCache.Put(txID, nil) if err := s.txDB.Put(txID[:], txBytes); err != nil { return fmt.Errorf("failed to add tx: %w", err) } - if err := s.statusDB.Delete(txID[:]); err != nil { - return fmt.Errorf("failed to delete status: %w", err) - } } return nil } @@ -618,225 +502,6 @@ func (s *state) writeMetadata() error { return nil } -func (s *state) Prune(lock sync.Locker, log logging.Logger) error { - lock.Lock() - // It is possible that more txs are added after grabbing this iterator. No - // new txs will write a status, so we don't need to check those txs. - statusIter := s.statusDB.NewIterator() - // Releasing is done using a closure to ensure that updating statusIter will - // result in having the most recent iterator released when executing the - // deferred function. - defer func() { - statusIter.Release() - }() - - if !statusIter.Next() { - // If there are no statuses on disk, pruning was previously run and - // finished. - lock.Unlock() - - log.Info("state already pruned") - - return statusIter.Error() - } - - startTxIDBytes := statusIter.Key() - txIter := s.txDB.NewIteratorWithStart(startTxIDBytes) - // Releasing is done using a closure to ensure that updating statusIter will - // result in having the most recent iterator released when executing the - // deferred function. - defer func() { - txIter.Release() - }() - - // While we are pruning the disk, we disable caching of the data we are - // modifying. Caching is re-enabled when pruning finishes. - // - // Note: If an unexpected error occurs the caches are never re-enabled. - // That's fine as the node is going to be in an unhealthy state regardless. - oldTxCache := s.txCache - s.statusCache = &cache.Empty[ids.ID, *choices.Status]{} - s.txCache = &cache.Empty[ids.ID, *txs.Tx]{} - lock.Unlock() - - startTime := time.Now() - lastCommit := startTime - lastUpdate := startTime - startProgress := timer.ProgressFromHash(startTxIDBytes) - - startStatusBytes := statusIter.Value() - if err := s.cleanupTx(lock, startTxIDBytes, startStatusBytes, txIter); err != nil { - return err - } - - numPruned := 1 - for statusIter.Next() { - txIDBytes := statusIter.Key() - statusBytes := statusIter.Value() - if err := s.cleanupTx(lock, txIDBytes, statusBytes, txIter); err != nil { - return err - } - - numPruned++ - - if numPruned%pruneCommitLimit == 0 { - // We must hold the lock during committing to make sure we don't - // attempt to commit to disk while a block is concurrently being - // accepted. - lock.Lock() - err := utils.Err( - s.Commit(), - statusIter.Error(), - txIter.Error(), - ) - lock.Unlock() - if err != nil { - return err - } - - // We release the iterators here to allow the underlying database to - // clean up deleted state. - statusIter.Release() - txIter.Release() - - now := time.Now() - if now.Sub(lastUpdate) > pruneUpdateFrequency { - lastUpdate = now - - progress := timer.ProgressFromHash(txIDBytes) - eta := timer.EstimateETA( - startTime, - progress-startProgress, - math.MaxUint64-startProgress, - ) - log.Info("committing state pruning", - zap.Int("numPruned", numPruned), - zap.Duration("eta", eta), - ) - } - - // We take the minimum here because it's possible that the node is - // currently bootstrapping. This would mean that grabbing the lock - // could take an extremely long period of time; which we should not - // delay processing for. - pruneDuration := now.Sub(lastCommit) - sleepDuration := min( - pruneCommitSleepMultiplier*pruneDuration, - pruneCommitSleepCap, - ) - time.Sleep(sleepDuration) - - // Make sure not to include the sleep duration into the next prune - // duration. - lastCommit = time.Now() - - // We shouldn't need to grab the lock here, but doing so ensures - // that we see a consistent view across both the statusDB and the - // txDB. - lock.Lock() - statusIter = s.statusDB.NewIteratorWithStart(txIDBytes) - txIter = s.txDB.NewIteratorWithStart(txIDBytes) - lock.Unlock() - } - } - - lock.Lock() - defer lock.Unlock() - - err := utils.Err( - s.Commit(), - statusIter.Error(), - txIter.Error(), - ) - - // Make sure we flush the original cache before re-enabling it to prevent - // surfacing any stale data. - oldTxCache.Flush() - s.statusesPruned = true - s.txCache = oldTxCache - - log.Info("finished state pruning", - zap.Int("numPruned", numPruned), - zap.Duration("duration", time.Since(startTime)), - ) - - return err -} - -// Assumes [lock] is unlocked. -func (s *state) cleanupTx(lock sync.Locker, txIDBytes []byte, statusBytes []byte, txIter database.Iterator) error { - // After the linearization, we write txs to disk without statuses to mark - // them as accepted. This means that there may be more txs than statuses and - // we need to skip over them. - // - // Note: We do not need to remove UTXOs consumed after the linearization, as - // those UTXOs are guaranteed to have already been deleted. - if err := skipTo(txIter, txIDBytes); err != nil { - return err - } - // txIter.Key() is now `txIDBytes` - - statusInt, err := database.ParseUInt32(statusBytes) - if err != nil { - return err - } - status := choices.Status(statusInt) - - if status == choices.Accepted { - txBytes := txIter.Value() - tx, err := s.parser.ParseGenesisTx(txBytes) - if err != nil { - return err - } - - utxos := tx.Unsigned.InputUTXOs() - - // Locking is done here to make sure that any concurrent verification is - // performed with a valid view of the state. - lock.Lock() - defer lock.Unlock() - - // Remove all the UTXOs consumed by the accepted tx. Technically we only - // need to remove UTXOs consumed by operations, but it's easy to just - // remove all of them. - for _, UTXO := range utxos { - if err := s.utxoState.DeleteUTXO(UTXO.InputID()); err != nil { - return err - } - } - } else { - lock.Lock() - defer lock.Unlock() - - // This tx wasn't accepted, so we can remove it entirely from disk. - if err := s.txDB.Delete(txIDBytes); err != nil { - return err - } - } - // By removing the status, we will treat the tx as accepted if it is still - // on disk. - return s.statusDB.Delete(txIDBytes) -} - -// skipTo advances [iter] until its key is equal to [targetKey]. If [iter] does -// not contain [targetKey] an error will be returned. -// -// Note: [iter.Next()] will always be called at least once. -func skipTo(iter database.Iterator, targetKey []byte) error { - for { - if !iter.Next() { - return fmt.Errorf("%w: 0x%x", database.ErrNotFound, targetKey) - } - key := iter.Key() - switch bytes.Compare(targetKey, key) { - case -1: - return fmt.Errorf("%w: 0x%x", database.ErrNotFound, targetKey) - case 0: - return nil - } - } -} - func (s *state) Checksums() (ids.ID, ids.ID) { return s.txChecksum, s.utxoState.Checksum() } @@ -848,27 +513,9 @@ func (s *state) initTxChecksum() error { txIt := s.txDB.NewIterator() defer txIt.Release() - statusIt := s.statusDB.NewIterator() - defer statusIt.Release() - statusHasNext := statusIt.Next() for txIt.Next() { txIDBytes := txIt.Key() - if statusHasNext { // if status was exhausted, everything is accepted - statusIDBytes := statusIt.Key() - if bytes.Equal(txIDBytes, statusIDBytes) { // if the status key doesn't match this was marked as accepted - statusInt, err := database.ParseUInt32(statusIt.Value()) - if err != nil { - return err - } - - statusHasNext = statusIt.Next() // we processed the txID, so move on to the next status - - if choices.Status(statusInt) != choices.Accepted { // the status isn't accepted, so we skip the txID - continue - } - } - } txID, err := ids.ToID(txIDBytes) if err != nil { @@ -878,14 +525,7 @@ func (s *state) initTxChecksum() error { s.updateTxChecksum(txID) } - if statusHasNext { - return errStatusWithoutTx - } - - return utils.Err( - txIt.Error(), - statusIt.Error(), - ) + return txIt.Error() } func (s *state) updateTxChecksum(modifiedID ids.ID) { diff --git a/vms/avm/state/state_test.go b/vms/avm/state/state_test.go index 3657da0e3b2f..a6170c62c405 100644 --- a/vms/avm/state/state_test.go +++ b/vms/avm/state/state_test.go @@ -38,7 +38,6 @@ var ( func init() { var err error parser, err = block.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index b17604b2ba62..35744fdc63e8 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "math" "testing" @@ -24,6 +23,7 @@ func TestSetsAndGets(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ + fork: latest, additionalFxs: []*common.Fx{{ ID: ids.GenerateTestID(), Fx: &FxTest{ @@ -34,10 +34,7 @@ func TestSetsAndGets(t *testing.T) { }, }}, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ @@ -86,6 +83,7 @@ func TestSetsAndGets(t *testing.T) { func TestFundingNoAddresses(t *testing.T) { env := setup(t, &envConfig{ + fork: latest, additionalFxs: []*common.Fx{{ ID: ids.GenerateTestID(), Fx: &FxTest{ @@ -96,10 +94,7 @@ func TestFundingNoAddresses(t *testing.T) { }, }}, }) - defer func() { - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ @@ -118,6 +113,7 @@ func TestFundingAddresses(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ + fork: latest, additionalFxs: []*common.Fx{{ ID: ids.GenerateTestID(), Fx: &FxTest{ @@ -128,10 +124,7 @@ func TestFundingAddresses(t *testing.T) { }, }}, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ diff --git a/vms/avm/static_service.go b/vms/avm/static_service.go index 31a7a5885a62..35a3554ef1d6 100644 --- a/vms/avm/static_service.go +++ b/vms/avm/static_service.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "net/http" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" @@ -79,7 +78,6 @@ type BuildGenesisReply struct { // referenced in the UTXO. func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, &nftfx.Fx{}, diff --git a/vms/avm/txs/base_tx_test.go b/vms/avm/txs/base_tx_test.go index 5fb40642433a..5454554ba3d8 100644 --- a/vms/avm/txs/base_tx_test.go +++ b/vms/avm/txs/base_tx_test.go @@ -5,7 +5,6 @@ package txs import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -127,7 +126,6 @@ func TestBaseTxSerialization(t *testing.T) { }}} parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/vms/avm/txs/create_asset_tx_test.go b/vms/avm/txs/create_asset_tx_test.go index 9ef548eedea2..83e6c99914fe 100644 --- a/vms/avm/txs/create_asset_tx_test.go +++ b/vms/avm/txs/create_asset_tx_test.go @@ -5,7 +5,6 @@ package txs import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -195,7 +194,6 @@ func TestCreateAssetTxSerialization(t *testing.T) { }} parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, @@ -367,7 +365,6 @@ func TestCreateAssetTxSerializationAgain(t *testing.T) { } parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/vms/avm/txs/executor/executor_test.go b/vms/avm/txs/executor/executor_test.go index d110e4a24a59..6be98183c1ca 100644 --- a/vms/avm/txs/executor/executor_test.go +++ b/vms/avm/txs/executor/executor_test.go @@ -5,7 +5,6 @@ package executor import ( "testing" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -38,7 +37,6 @@ func TestBaseTxExecutor(t *testing.T) { secpFx := &secp256k1fx.Fx{} parser, err := block.NewParser( - time.Time{}, []fxs.Fx{secpFx}, ) require.NoError(err) @@ -146,7 +144,6 @@ func TestCreateAssetTxExecutor(t *testing.T) { secpFx := &secp256k1fx.Fx{} parser, err := block.NewParser( - time.Time{}, []fxs.Fx{secpFx}, ) require.NoError(err) @@ -292,7 +289,6 @@ func TestOperationTxExecutor(t *testing.T) { secpFx := &secp256k1fx.Fx{} parser, err := block.NewParser( - time.Time{}, []fxs.Fx{secpFx}, ) require.NoError(err) diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index 5187dd9a38ad..db89e1e5a5e9 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -6,7 +6,6 @@ package executor import ( "reflect" "testing" - "time" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -36,7 +35,6 @@ func TestSemanticVerifierBaseTx(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( - time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -394,7 +392,6 @@ func TestSemanticVerifierExportTx(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( - time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -763,7 +760,6 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) secpFx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( - time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, @@ -880,7 +876,6 @@ func TestSemanticVerifierImportTx(t *testing.T) { typeToFxIndex := make(map[reflect.Type]int) fx := &secp256k1fx.Fx{} parser, err := txs.NewCustomParser( - time.Time{}, typeToFxIndex, new(mockable.Clock), logging.NoWarn{}, diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index 108ac9e94a60..c5762a81c74b 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -7,7 +7,6 @@ import ( "math" "strings" "testing" - "time" "github.com/stretchr/testify/require" @@ -15,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -30,6 +30,7 @@ var ( feeConfig = config.Config{ TxFee: 2, CreateAssetTxFee: 3, + EUpgradeTime: mockable.MaxTime, } ) @@ -38,7 +39,6 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ fx, }, @@ -411,7 +411,6 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ fx, }, @@ -1021,7 +1020,6 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ fx, }, @@ -1511,7 +1509,6 @@ func TestSyntacticVerifierImportTx(t *testing.T) { fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ fx, }, @@ -1912,7 +1909,6 @@ func TestSyntacticVerifierExportTx(t *testing.T) { fx := &secp256k1fx.Fx{} parser, err := txs.NewParser( - time.Time{}, []fxs.Fx{ fx, }, diff --git a/vms/avm/txs/export_tx_test.go b/vms/avm/txs/export_tx_test.go index 0b714a911fa0..d7c71f6bd632 100644 --- a/vms/avm/txs/export_tx_test.go +++ b/vms/avm/txs/export_tx_test.go @@ -5,7 +5,6 @@ package txs import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -110,7 +109,6 @@ func TestExportTxSerialization(t *testing.T) { }} parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/vms/avm/txs/import_tx_test.go b/vms/avm/txs/import_tx_test.go index 1f2bbe0ea341..b0cdd8198a5b 100644 --- a/vms/avm/txs/import_tx_test.go +++ b/vms/avm/txs/import_tx_test.go @@ -5,7 +5,6 @@ package txs import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -110,7 +109,6 @@ func TestImportTxSerialization(t *testing.T) { }} parser, err := NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, }, diff --git a/vms/avm/txs/initial_state_test.go b/vms/avm/txs/initial_state_test.go index 5f61deb3e7c6..48c15ae196c4 100644 --- a/vms/avm/txs/initial_state_test.go +++ b/vms/avm/txs/initial_state_test.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "testing" - "time" "github.com/stretchr/testify/require" @@ -24,7 +23,7 @@ var errTest = errors.New("non-nil error") func TestInitialStateVerifySerialization(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) @@ -81,7 +80,7 @@ func TestInitialStateVerifySerialization(t *testing.T) { func TestInitialStateVerifyNil(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 @@ -94,7 +93,7 @@ func TestInitialStateVerifyNil(t *testing.T) { func TestInitialStateVerifyUnknownFxID(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 @@ -109,7 +108,7 @@ func TestInitialStateVerifyUnknownFxID(t *testing.T) { func TestInitialStateVerifyNilOutput(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 @@ -125,7 +124,7 @@ func TestInitialStateVerifyNilOutput(t *testing.T) { func TestInitialStateVerifyInvalidOutput(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&avax.TestState{})) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) @@ -142,7 +141,7 @@ func TestInitialStateVerifyInvalidOutput(t *testing.T) { func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&avax.TestTransferable{})) m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(CodecVersion, c)) diff --git a/vms/avm/txs/mempool/mempool.go b/vms/avm/txs/mempool/mempool.go index 4ac275a21305..5825c883371d 100644 --- a/vms/avm/txs/mempool/mempool.go +++ b/vms/avm/txs/mempool/mempool.go @@ -4,81 +4,29 @@ package mempool import ( - "errors" - "fmt" - "sync" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/setmap" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/txs" -) - -const ( - // MaxTxSize is the maximum number of bytes a transaction can use to be - // allowed into the mempool. - MaxTxSize = 64 * units.KiB - - // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache - droppedTxIDsCacheSize = 64 - // maxMempoolSize is the maximum number of bytes allowed in the mempool - maxMempoolSize = 64 * units.MiB + txmempool "github.com/ava-labs/avalanchego/vms/txs/mempool" ) -var ( - _ Mempool = (*mempool)(nil) - - ErrDuplicateTx = errors.New("duplicate tx") - ErrTxTooLarge = errors.New("tx too large") - ErrMempoolFull = errors.New("mempool is full") - ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") -) +var _ Mempool = (*mempool)(nil) // Mempool contains transactions that have not yet been put into a block. type Mempool interface { - Add(tx *txs.Tx) error - Get(txID ids.ID) (*txs.Tx, bool) - // Remove [txs] and any conflicts of [txs] from the mempool. - Remove(txs ...*txs.Tx) - - // Peek returns the oldest tx in the mempool. - Peek() (tx *txs.Tx, exists bool) - - // Iterate over transactions from oldest to newest until the function - // returns false or there are no more transactions. - Iterate(f func(tx *txs.Tx) bool) + txmempool.Mempool[*txs.Tx] // RequestBuildBlock notifies the consensus engine that a block should be // built if there is at least one transaction in the mempool. RequestBuildBlock() - - // Note: Dropped txs are added to droppedTxIDs but not evicted from - // unissued. This allows previously dropped txs to be possibly reissued. - MarkDropped(txID ids.ID, reason error) - GetDropReason(txID ids.ID) error - - // Len returns the number of txs in the mempool. - Len() int } type mempool struct { - lock sync.RWMutex - unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] - consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs - bytesAvailable int - droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> Verification error + txmempool.Mempool[*txs.Tx] toEngine chan<- common.Message - - numTxs prometheus.Gauge - bytesAvailableMetric prometheus.Gauge } func New( @@ -86,128 +34,21 @@ func New( registerer prometheus.Registerer, toEngine chan<- common.Message, ) (Mempool, error) { - m := &mempool{ - unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), - consumedUTXOs: setmap.New[ids.ID, ids.ID](), - bytesAvailable: maxMempoolSize, - droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, - toEngine: toEngine, - numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "count", - Help: "Number of transactions in the mempool", - }), - bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bytes_available", - Help: "Number of bytes of space currently available in the mempool", - }), + metrics, err := txmempool.NewMetrics(namespace, registerer) + if err != nil { + return nil, err } - m.bytesAvailableMetric.Set(maxMempoolSize) - - err := utils.Err( - registerer.Register(m.numTxs), - registerer.Register(m.bytesAvailableMetric), + pool := txmempool.New[*txs.Tx]( + metrics, ) - return m, err -} - -func (m *mempool) Add(tx *txs.Tx) error { - txID := tx.ID() - - m.lock.Lock() - defer m.lock.Unlock() - - if _, ok := m.unissuedTxs.Get(txID); ok { - return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) - } - - txSize := len(tx.Bytes()) - if txSize > MaxTxSize { - return fmt.Errorf("%w: %s size (%d) > max size (%d)", - ErrTxTooLarge, - txID, - txSize, - MaxTxSize, - ) - } - if txSize > m.bytesAvailable { - return fmt.Errorf("%w: %s size (%d) > available space (%d)", - ErrMempoolFull, - txID, - txSize, - m.bytesAvailable, - ) - } - - inputs := tx.Unsigned.InputIDs() - if m.consumedUTXOs.HasOverlap(inputs) { - return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) - } - - m.bytesAvailable -= txSize - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - - m.unissuedTxs.Put(txID, tx) - m.numTxs.Inc() - - // Mark these UTXOs as consumed in the mempool - m.consumedUTXOs.Put(txID, inputs) - - // An added tx must not be marked as dropped. - m.droppedTxIDs.Evict(txID) - return nil -} - -func (m *mempool) Get(txID ids.ID) (*txs.Tx, bool) { - tx, ok := m.unissuedTxs.Get(txID) - return tx, ok -} - -func (m *mempool) Remove(txs ...*txs.Tx) { - m.lock.Lock() - defer m.lock.Unlock() - - for _, tx := range txs { - txID := tx.ID() - // If the transaction is in the mempool, remove it. - if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { - m.unissuedTxs.Delete(txID) - m.bytesAvailable += len(tx.Bytes()) - continue - } - - // If the transaction isn't in the mempool, remove any conflicts it has. - inputs := tx.Unsigned.InputIDs() - for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { - tx, _ := m.unissuedTxs.Get(removed.Key) - m.unissuedTxs.Delete(removed.Key) - m.bytesAvailable += len(tx.Bytes()) - } - } - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - m.numTxs.Set(float64(m.unissuedTxs.Len())) -} - -func (m *mempool) Peek() (*txs.Tx, bool) { - _, tx, exists := m.unissuedTxs.Oldest() - return tx, exists -} - -func (m *mempool) Iterate(f func(*txs.Tx) bool) { - m.lock.RLock() - defer m.lock.RUnlock() - - it := m.unissuedTxs.NewIterator() - for it.Next() { - if !f(it.Value()) { - return - } - } + return &mempool{ + Mempool: pool, + toEngine: toEngine, + }, nil } func (m *mempool) RequestBuildBlock() { - if m.unissuedTxs.Len() == 0 { + if m.Len() == 0 { return } @@ -216,30 +57,3 @@ func (m *mempool) RequestBuildBlock() { default: } } - -func (m *mempool) MarkDropped(txID ids.ID, reason error) { - if errors.Is(reason, ErrMempoolFull) { - return - } - - m.lock.RLock() - defer m.lock.RUnlock() - - if _, ok := m.unissuedTxs.Get(txID); ok { - return - } - - m.droppedTxIDs.Put(txID, reason) -} - -func (m *mempool) GetDropReason(txID ids.ID) error { - err, _ := m.droppedTxIDs.Get(txID) - return err -} - -func (m *mempool) Len() int { - m.lock.RLock() - defer m.lock.RUnlock() - - return m.unissuedTxs.Len() -} diff --git a/vms/avm/txs/mempool/mempool_test.go b/vms/avm/txs/mempool/mempool_test.go index 410d2b769fad..1979378267b5 100644 --- a/vms/avm/txs/mempool/mempool_test.go +++ b/vms/avm/txs/mempool/mempool_test.go @@ -4,7 +4,6 @@ package mempool import ( - "errors" "testing" "github.com/prometheus/client_golang/prometheus" @@ -17,233 +16,15 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" ) -func TestAdd(t *testing.T) { - tx0 := newTx(0, 32) - - tests := []struct { - name string - initialTxs []*txs.Tx - tx *txs.Tx - err error - dropReason error - }{ - { - name: "successfully add tx", - initialTxs: nil, - tx: tx0, - err: nil, - dropReason: nil, - }, - { - name: "attempt adding duplicate tx", - initialTxs: []*txs.Tx{tx0}, - tx: tx0, - err: ErrDuplicateTx, - dropReason: nil, - }, - { - name: "attempt adding too large tx", - initialTxs: nil, - tx: newTx(0, MaxTxSize+1), - err: ErrTxTooLarge, - dropReason: ErrTxTooLarge, - }, - { - name: "attempt adding tx when full", - initialTxs: newTxs(maxMempoolSize/MaxTxSize, MaxTxSize), - tx: newTx(maxMempoolSize/MaxTxSize, MaxTxSize), - err: ErrMempoolFull, - dropReason: nil, - }, - { - name: "attempt adding conflicting tx", - initialTxs: []*txs.Tx{tx0}, - tx: newTx(0, 32), - err: ErrConflictsWithOtherTx, - dropReason: ErrConflictsWithOtherTx, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require := require.New(t) - - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - nil, - ) - require.NoError(err) - - for _, tx := range test.initialTxs { - require.NoError(mempool.Add(tx)) - } - - err = mempool.Add(test.tx) - require.ErrorIs(err, test.err) - - txID := test.tx.ID() - - if err != nil { - mempool.MarkDropped(txID, err) - } - - err = mempool.GetDropReason(txID) - require.ErrorIs(err, test.dropReason) - }) - } -} - -func TestGet(t *testing.T) { - require := require.New(t) - - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - nil, - ) - require.NoError(err) - - tx := newTx(0, 32) - txID := tx.ID() - - _, exists := mempool.Get(txID) - require.False(exists) - - require.NoError(mempool.Add(tx)) - - returned, exists := mempool.Get(txID) - require.True(exists) - require.Equal(tx, returned) - - mempool.Remove(tx) - - _, exists = mempool.Get(txID) - require.False(exists) -} - -func TestPeek(t *testing.T) { - require := require.New(t) - - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - nil, - ) - require.NoError(err) - - _, exists := mempool.Peek() - require.False(exists) - - tx0 := newTx(0, 32) - tx1 := newTx(1, 32) - - require.NoError(mempool.Add(tx0)) - require.NoError(mempool.Add(tx1)) - - tx, exists := mempool.Peek() - require.True(exists) - require.Equal(tx, tx0) - - mempool.Remove(tx0) - - tx, exists = mempool.Peek() - require.True(exists) - require.Equal(tx, tx1) - - mempool.Remove(tx0) - - tx, exists = mempool.Peek() - require.True(exists) - require.Equal(tx, tx1) - - mempool.Remove(tx1) - - _, exists = mempool.Peek() - require.False(exists) -} - -func TestRemoveConflict(t *testing.T) { - require := require.New(t) - - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - nil, - ) - require.NoError(err) - - tx := newTx(0, 32) - txConflict := newTx(0, 32) - - require.NoError(mempool.Add(tx)) - - returnedTx, exists := mempool.Peek() - require.True(exists) - require.Equal(returnedTx, tx) - - mempool.Remove(txConflict) - - _, exists = mempool.Peek() - require.False(exists) -} - -func TestIterate(t *testing.T) { - require := require.New(t) - - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - nil, - ) - require.NoError(err) - - var ( - iteratedTxs []*txs.Tx - maxLen = 2 - ) - addTxs := func(tx *txs.Tx) bool { - iteratedTxs = append(iteratedTxs, tx) - return len(iteratedTxs) < maxLen - } - mempool.Iterate(addTxs) - require.Empty(iteratedTxs) - - tx0 := newTx(0, 32) - require.NoError(mempool.Add(tx0)) - - mempool.Iterate(addTxs) - require.Equal([]*txs.Tx{tx0}, iteratedTxs) - - tx1 := newTx(1, 32) - require.NoError(mempool.Add(tx1)) - - iteratedTxs = nil - mempool.Iterate(addTxs) - require.Equal([]*txs.Tx{tx0, tx1}, iteratedTxs) - - tx2 := newTx(2, 32) - require.NoError(mempool.Add(tx2)) - - iteratedTxs = nil - mempool.Iterate(addTxs) - require.Equal([]*txs.Tx{tx0, tx1}, iteratedTxs) - - mempool.Remove(tx0, tx2) - - iteratedTxs = nil - mempool.Iterate(addTxs) - require.Equal([]*txs.Tx{tx1}, iteratedTxs) +func newMempool(toEngine chan<- common.Message) (Mempool, error) { + return New("mempool", prometheus.NewRegistry(), toEngine) } func TestRequestBuildBlock(t *testing.T) { require := require.New(t) toEngine := make(chan common.Message, 1) - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - toEngine, - ) + mempool, err := newMempool(toEngine) require.NoError(err) mempool.RequestBuildBlock() @@ -270,40 +51,6 @@ func TestRequestBuildBlock(t *testing.T) { } } -func TestDropped(t *testing.T) { - require := require.New(t) - - mempool, err := New( - "mempool", - prometheus.NewRegistry(), - nil, - ) - require.NoError(err) - - tx := newTx(0, 32) - txID := tx.ID() - testErr := errors.New("test") - - mempool.MarkDropped(txID, testErr) - - err = mempool.GetDropReason(txID) - require.ErrorIs(err, testErr) - - require.NoError(mempool.Add(tx)) - require.NoError(mempool.GetDropReason(txID)) - - mempool.MarkDropped(txID, testErr) - require.NoError(mempool.GetDropReason(txID)) -} - -func newTxs(num int, size int) []*txs.Tx { - txs := make([]*txs.Tx, num) - for i := range txs { - txs[i] = newTx(uint32(i), size) - } - return txs -} - func newTx(index uint32, size int) *txs.Tx { tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ Ins: []*avax.TransferableInput{{ diff --git a/vms/avm/txs/operation_test.go b/vms/avm/txs/operation_test.go index 3ca4676eb370..ac9cf62530c6 100644 --- a/vms/avm/txs/operation_test.go +++ b/vms/avm/txs/operation_test.go @@ -5,7 +5,6 @@ package txs import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -80,7 +79,7 @@ func TestOperationVerify(t *testing.T) { func TestOperationSorting(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&testOperable{})) m := codec.NewDefaultManager() diff --git a/vms/avm/txs/parser.go b/vms/avm/txs/parser.go index 979c71d8a7c8..c5b7fe19edc0 100644 --- a/vms/avm/txs/parser.go +++ b/vms/avm/txs/parser.go @@ -7,7 +7,6 @@ import ( "fmt" "math" "reflect" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -40,9 +39,8 @@ type parser struct { gc linearcodec.Codec } -func NewParser(durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { +func NewParser(fxs []fxs.Fx) (Parser, error) { return NewCustomParser( - durangoTime, make(map[reflect.Type]int), &mockable.Clock{}, logging.NoLog{}, @@ -51,14 +49,13 @@ func NewParser(durangoTime time.Time, fxs []fxs.Fx) (Parser, error) { } func NewCustomParser( - durangoTime time.Time, typeToFxIndex map[reflect.Type]int, clock *mockable.Clock, log logging.Logger, fxs []fxs.Fx, ) (Parser, error) { - gc := linearcodec.NewDefault(time.Time{}) - c := linearcodec.NewDefault(durangoTime) + gc := linearcodec.NewDefault() + c := linearcodec.NewDefault() gcm := codec.NewManager(math.MaxInt32) cm := codec.NewDefaultManager() diff --git a/vms/avm/txs/tx.go b/vms/avm/txs/tx.go index 42e845b07b15..c040d30edb4b 100644 --- a/vms/avm/txs/tx.go +++ b/vms/avm/txs/tx.go @@ -88,6 +88,10 @@ func (t *Tx) Bytes() []byte { return t.bytes } +func (t *Tx) Size() int { + return len(t.bytes) +} + // UTXOs returns the UTXOs transaction is producing. func (t *Tx) UTXOs() []*avax.UTXO { u := utxoGetter{tx: t} @@ -97,6 +101,10 @@ func (t *Tx) UTXOs() []*avax.UTXO { return u.utxos } +func (t *Tx) InputIDs() set.Set[ids.ID] { + return t.Unsigned.InputIDs() +} + func (t *Tx) SignSECP256K1Fx(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(CodecVersion, &t.Unsigned) if err != nil { diff --git a/vms/avm/txs/txstest/builder.go b/vms/avm/txs/txstest/builder.go new file mode 100644 index 000000000000..c52e56cdb5e7 --- /dev/null +++ b/vms/avm/txs/txstest/builder.go @@ -0,0 +1,231 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/state" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" +) + +type Builder struct { + utxos *utxos + ctx *builder.Context +} + +func New( + codec codec.Manager, + ctx *snow.Context, + cfg *config.Config, + feeAssetID ids.ID, + state state.State, +) *Builder { + utxos := newUTXOs(ctx, state, ctx.SharedMemory, codec) + return &Builder{ + utxos: utxos, + ctx: newContext(ctx, cfg, feeAssetID), + } +} + +func (b *Builder) CreateAssetTx( + name, symbol string, + denomination byte, + initialStates map[uint32][]verify.State, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewCreateAssetTx( + name, + symbol, + denomination, + initialStates, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed building base tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) BaseTx( + outs []*avax.TransferableOutput, + memo []byte, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewBaseTx( + outs, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + common.WithMemo(memo), + ) + if err != nil { + return nil, fmt.Errorf("failed building base tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) MintNFT( + assetID ids.ID, + payload []byte, + owners []*secp256k1fx.OutputOwners, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewOperationTxMintNFT( + assetID, + payload, + owners, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed minting NFTs: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) MintFTs( + outputs map[ids.ID]*secp256k1fx.TransferOutput, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewOperationTxMintFT( + outputs, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed minting FTs: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) Operation( + ops []*txs.Operation, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + utx, err := xBuilder.NewOperationTx( + ops, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed building operation tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) ImportTx( + sourceChain ids.ID, + to ids.ShortID, + kc *secp256k1fx.Keychain, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + outOwner := &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{to}, + } + + utx, err := xBuilder.NewImportTx( + sourceChain, + outOwner, + ) + if err != nil { + return nil, fmt.Errorf("failed building import tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) ExportTx( + destinationChain ids.ID, + to ids.ShortID, + exportedAssetID ids.ID, + exportedAmt uint64, + kc *secp256k1fx.Keychain, + changeAddr ids.ShortID, +) (*txs.Tx, error) { + xBuilder, xSigner := b.builders(kc) + + outputs := []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: exportedAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: exportedAmt, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }} + + utx, err := xBuilder.NewExportTx( + destinationChain, + outputs, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), + ) + if err != nil { + return nil, fmt.Errorf("failed building export tx: %w", err) + } + + return signer.SignUnsigned(context.Background(), xSigner, utx) +} + +func (b *Builder) builders(kc *secp256k1fx.Keychain) (builder.Builder, signer.Signer) { + var ( + addrs = kc.Addresses() + wa = &walletUTXOsAdapter{ + utxos: b.utxos, + addrs: addrs, + } + builder = builder.New(addrs, b.ctx, wa) + signer = signer.New(kc, wa) + ) + return builder, signer +} diff --git a/vms/avm/txs/txstest/context.go b/vms/avm/txs/txstest/context.go new file mode 100644 index 000000000000..ea3b9f2410f4 --- /dev/null +++ b/vms/avm/txs/txstest/context.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" +) + +func newContext( + ctx *snow.Context, + cfg *config.Config, + feeAssetID ids.ID, +) *builder.Context { + return &builder.Context{ + NetworkID: ctx.NetworkID, + BlockchainID: ctx.XChainID, + AVAXAssetID: feeAssetID, + BaseTxFee: cfg.TxFee, + CreateAssetTxFee: cfg.CreateAssetTxFee, + } +} diff --git a/vms/avm/txs/txstest/utxos.go b/vms/avm/txs/txstest/utxos.go new file mode 100644 index 000000000000..39b3b712905b --- /dev/null +++ b/vms/avm/txs/txstest/utxos.go @@ -0,0 +1,103 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/state" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" +) + +const maxPageSize uint64 = 1024 + +var ( + _ builder.Backend = (*walletUTXOsAdapter)(nil) + _ signer.Backend = (*walletUTXOsAdapter)(nil) +) + +func newUTXOs( + ctx *snow.Context, + state state.State, + sharedMemory atomic.SharedMemory, + codec codec.Manager, +) *utxos { + return &utxos{ + xchainID: ctx.ChainID, + state: state, + sharedMemory: sharedMemory, + codec: codec, + } +} + +type utxos struct { + xchainID ids.ID + state state.State + sharedMemory atomic.SharedMemory + codec codec.Manager +} + +func (u *utxos) UTXOs(addrs set.Set[ids.ShortID], sourceChainID ids.ID) ([]*avax.UTXO, error) { + if sourceChainID == u.xchainID { + return avax.GetAllUTXOs(u.state, addrs) + } + + atomicUTXOs, _, _, err := avax.GetAtomicUTXOs( + u.sharedMemory, + u.codec, + sourceChainID, + addrs, + ids.ShortEmpty, + ids.Empty, + int(maxPageSize), + ) + return atomicUTXOs, err +} + +func (u *utxos) GetUTXO(addrs set.Set[ids.ShortID], chainID, utxoID ids.ID) (*avax.UTXO, error) { + if chainID == u.xchainID { + return u.state.GetUTXO(utxoID) + } + + atomicUTXOs, _, _, err := avax.GetAtomicUTXOs( + u.sharedMemory, + u.codec, + chainID, + addrs, + ids.ShortEmpty, + ids.Empty, + int(maxPageSize), + ) + if err != nil { + return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) + } + for _, utxo := range atomicUTXOs { + if utxo.InputID() == utxoID { + return utxo, nil + } + } + return nil, database.ErrNotFound +} + +type walletUTXOsAdapter struct { + utxos *utxos + addrs set.Set[ids.ShortID] +} + +func (w *walletUTXOsAdapter) UTXOs(_ context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) { + return w.utxos.UTXOs(w.addrs, sourceChainID) +} + +func (w *walletUTXOsAdapter) GetUTXO(_ context.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) { + return w.utxos.GetUTXO(w.addrs, chainID, utxoID) +} diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 833bd6f79a4d..6a455132c1a1 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -15,6 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" @@ -27,27 +28,28 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/metrics" "github.com/ava-labs/avalanchego/vms/avm/network" "github.com/ava-labs/avalanchego/vms/avm/state" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/avm/utxo" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/txs/mempool" blockbuilder "github.com/ava-labs/avalanchego/vms/avm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/avm/block/executor" extensions "github.com/ava-labs/avalanchego/vms/avm/fxs" + avmmetrics "github.com/ava-labs/avalanchego/vms/avm/metrics" txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" + xmempool "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" ) const assetToFxCacheSize = 1024 @@ -65,10 +67,9 @@ type VM struct { config.Config - metrics metrics.Metrics + metrics avmmetrics.Metrics avax.AddressManager - avax.AtomicUTXOManager ids.Aliaser utxo.Spender @@ -173,16 +174,15 @@ func (vm *VM) Initialize( zap.Reflect("config", avmConfig), ) - registerer := prometheus.NewRegistry() - if err := ctx.Metrics.Register(registerer); err != nil { + vm.registerer, err = metrics.MakeAndRegister(ctx.Metrics, "") + if err != nil { return err } - vm.registerer = registerer vm.connectedPeers = make(map[ids.NodeID]*version.Application) // Initialize metrics as soon as possible - vm.metrics, err = metrics.New("", registerer) + vm.metrics, err = avmmetrics.New(vm.registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } @@ -217,7 +217,6 @@ func (vm *VM) Initialize( vm.typeToFxIndex = map[reflect.Type]int{} vm.parser, err = block.NewCustomParser( - vm.DurangoTime, vm.typeToFxIndex, &vm.clock, ctx.Log, @@ -228,7 +227,6 @@ func (vm *VM) Initialize( } codec := vm.parser.Codec() - vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, codec) vm.Spender = utxo.NewSpender(&vm.clock, codec) state, err := state.New( @@ -248,7 +246,7 @@ func (vm *VM) Initialize( } vm.walletService.vm = vm - vm.walletService.pendingTxs = linkedhashmap.New[ids.ID, *txs.Tx]() + vm.walletService.pendingTxs = linked.NewHashmap[ids.ID, *txs.Tx]() // use no op impl when disabled in config if avmConfig.IndexTransactions { @@ -391,10 +389,6 @@ func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, erro return vm.state.GetBlockIDAtHeight(height) } -func (*VM) VerifyHeightIndex(context.Context) error { - return nil -} - /* ****************************************************************************** *********************************** DAG VM *********************************** @@ -408,7 +402,7 @@ func (vm *VM) Linearize(ctx context.Context, stopVertexID ids.ID, toEngine chan< return err } - mempool, err := mempool.New("mempool", vm.registerer, toEngine) + mempool, err := xmempool.New("mempool", vm.registerer, toEngine) if err != nil { return fmt.Errorf("failed to create mempool: %w", err) } @@ -431,7 +425,10 @@ func (vm *VM) Linearize(ctx context.Context, stopVertexID ids.ID, toEngine chan< // Invariant: The context lock is not held when calling network.IssueTx. vm.network, err = network.New( - vm.ctx, + vm.ctx.Log, + vm.ctx.NodeID, + vm.ctx.SubnetID, + vm.ctx.ValidatorState, vm.parser, network.NewLockedTxVerifier( &vm.ctx.Lock, @@ -459,23 +456,18 @@ func (vm *VM) Linearize(ctx context.Context, stopVertexID ids.ID, toEngine chan< // handled asynchronously. vm.Atomic.Set(vm.network) - vm.awaitShutdown.Add(1) + vm.awaitShutdown.Add(2) go func() { defer vm.awaitShutdown.Done() - // Invariant: Gossip must never grab the context lock. - vm.network.Gossip(vm.onShutdownCtx) + // Invariant: PushGossip must never grab the context lock. + vm.network.PushGossip(vm.onShutdownCtx) }() - go func() { - err := vm.state.Prune(&vm.ctx.Lock, vm.ctx.Log) - if err != nil { - vm.ctx.Log.Warn("state pruning failed", - zap.Error(err), - ) - return - } - vm.ctx.Log.Info("state pruning finished") + defer vm.awaitShutdown.Done() + + // Invariant: PullGossip must never grab the context lock. + vm.network.PullGossip(vm.onShutdownCtx) }() return nil @@ -507,13 +499,13 @@ func (vm *VM) ParseTx(_ context.Context, bytes []byte) (snowstorm.Tx, error) { ****************************************************************************** */ -// issueTx attempts to send a transaction to consensus. +// issueTxFromRPC attempts to send a transaction to consensus. // // Invariant: The context lock is not held // Invariant: This function is only called after Linearize has been called. -func (vm *VM) issueTx(tx *txs.Tx) (ids.ID, error) { +func (vm *VM) issueTxFromRPC(tx *txs.Tx) (ids.ID, error) { txID := tx.ID() - err := vm.network.IssueTx(context.TODO(), tx) + err := vm.network.IssueTxFromRPC(tx) if err != nil && !errors.Is(err, mempool.ErrDuplicateTx) { vm.ctx.Log.Debug("failed to add tx to mempool", zap.Stringer("txID", txID), diff --git a/vms/avm/vm_benchmark_test.go b/vms/avm/vm_benchmark_test.go index 713f809f7f5c..096ed51e13bc 100644 --- a/vms/avm/vm_benchmark_test.go +++ b/vms/avm/vm_benchmark_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "fmt" "math/rand" "testing" @@ -23,15 +22,13 @@ func BenchmarkLoadUser(b *testing.B) { require := require.New(b) env := setup(b, &envConfig{ + fork: latest, keystoreUsers: []*user{{ username: username, password: password, }}, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() user, err := keystore.NewUserFromKeystore(env.vm.ctx.Keystore, username, password) require.NoError(err) @@ -64,23 +61,19 @@ func BenchmarkLoadUser(b *testing.B) { } // GetAllUTXOsBenchmark is a helper func to benchmark the GetAllUTXOs depending on the size -func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { +func getAllUTXOsBenchmark(b *testing.B, utxoCount int, randSrc rand.Source) { require := require.New(b) - env := setup(b, &envConfig{}) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(b, &envConfig{fork: latest}) + defer env.vm.ctx.Lock.Unlock() addr := ids.GenerateTestShortID() - // #nosec G404 for i := 0; i < utxoCount; i++ { utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), + OutputIndex: uint32(randSrc.Int63()), }, Asset: avax.Asset{ID: env.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ @@ -128,9 +121,10 @@ func BenchmarkGetUTXOs(b *testing.B) { }, } - for _, count := range tests { + for testIdx, count := range tests { + randSrc := rand.NewSource(int64(testIdx)) b.Run(count.name, func(b *testing.B) { - GetAllUTXOsBenchmark(b, count.utxoCount) + getAllUTXOsBenchmark(b, count.utxoCount, randSrc) }) } } diff --git a/vms/avm/vm_regression_test.go b/vms/avm/vm_regression_test.go index c6ac40df845d..9e684e756d5c 100644 --- a/vms/avm/vm_regression_test.go +++ b/vms/avm/vm_regression_test.go @@ -4,16 +4,11 @@ package avm import ( - "context" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -23,95 +18,78 @@ import ( func TestVerifyFxUsage(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, - }) + env := setup(t, &envConfig{fork: latest}) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + initialStates := map[uint32][]verify.State{ + 0: { + &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - { - FxIndex: 1, - Outs: []verify.State{ - &nftfx.MintOutput{ - GroupID: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, + }, + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, }, - }} - require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + } + + // Create the asset + createAssetTx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // Mint the NFT + mintNFTTx, err := env.txBuilder.MintNFT( + createAssetTx.ID(), + []byte{'h', 'e', 'l', 'l', 'o'}, // payload + []*secp256k1fx.OutputOwners{{ + Threshold: 1, + Addrs: []ids.ShortID{key.Address()}, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: createAssetTx.ID(), - OutputIndex: 1, - }}, - Op: &nftfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - GroupID: 1, - Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - Outputs: []*secp256k1fx.OutputOwners{{}}, - }, - }}, - }} - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - spendTx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: createAssetTx.ID(), - OutputIndex: 0, - }, + // move the NFT + to := keys[2].PublicKey().Address() + spendTx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{{ Asset: avax.Asset{ID: createAssetTx.ID()}, - In: &secp256k1fx.TransferInput{ + Out: &secp256k1fx.TransferOutput{ Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, }, }, }}, - }}} - require.NoError(spendTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + nil, // memo + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, spendTx) } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index d8aeaf3b8743..33af48c483f8 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -19,8 +19,6 @@ import ( "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/vms/avm/config" - "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -115,13 +113,10 @@ func TestFxInitializationFailure(t *testing.T) { func TestIssueTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) + env := setup(t, &envConfig{ + fork: latest, + }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, "AVAX") issueAndAccept(require, env.vm, env.issuer, tx) @@ -132,99 +127,71 @@ func TestIssueNFT(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{{ - FxIndex: 1, - Outs: []verify.State{ - &nftfx.MintOutput{ - GroupID: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - &nftfx.MintOutput{ - GroupID: 2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + // Create the asset + initialStates := map[uint32][]verify.State{ + 1: { + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - }}, - }} - require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + }, + } + + createAssetTx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // Mint the NFT + mintNFTTx, err := env.txBuilder.MintNFT( + createAssetTx.ID(), + []byte{'h', 'e', 'l', 'l', 'o'}, // payload + []*secp256k1fx.OutputOwners{{ + Threshold: 1, + Addrs: []ids.ShortID{key.Address()}, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: createAssetTx.ID(), - OutputIndex: 0, - }}, - Op: &nftfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - GroupID: 1, - Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - Outputs: []*secp256k1fx.OutputOwners{{}}, - }, - }}, - }} - require.NoError(mintNFTTx.SignNFTFx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, mintNFTTx) - transferNFTTx := &txs.Tx{ - Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: mintNFTTx.ID(), - OutputIndex: 0, - }}, - Op: &nftfx.TransferOperation{ - Input: secp256k1fx.Input{}, - Output: nftfx.TransferOutput{ - GroupID: 1, - Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - OutputOwners: secp256k1fx.OutputOwners{}, - }, - }, - }}, - }, - Creds: []*fxs.FxCredential{ - { - Credential: &nftfx.Credential{}, - }, - }, - } - require.NoError(transferNFTTx.Initialize(env.vm.parser.Codec())) + // Move the NFT + utxos, err := avax.GetAllUTXOs(env.vm.state, kc.Addresses()) + require.NoError(err) + transferOp, _, err := env.vm.SpendNFT( + utxos, + kc, + createAssetTx.ID(), + 1, + keys[2].Address(), + ) + require.NoError(err) + + transferNFTTx, err := env.txBuilder.Operation( + transferOp, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, transferNFTTx) } @@ -233,92 +200,87 @@ func TestIssueProperty(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: latest, additionalFxs: []*common.Fx{{ ID: propertyfx.ID, Fx: &propertyfx.Fx{}, }}, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() - createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - }}, - Name: "Team Rocket", - Symbol: "TR", - Denomination: 0, - States: []*txs.InitialState{{ - FxIndex: 2, - Outs: []verify.State{ - &propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + // create the asset + initialStates := map[uint32][]verify.State{ + 2: { + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }}, - }} - require.NoError(createAssetTx.Initialize(env.vm.parser.Codec())) + }, + } + + createAssetTx, err := env.txBuilder.CreateAssetTx( + "Team Rocket", // name + "TR", // symbol + 0, // denomination + initialStates, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, createAssetTx) - mintPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // mint the property + mintPropertyOp := &txs.Operation{ + Asset: avax.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*avax.UTXOID{{ + TxID: createAssetTx.ID(), + OutputIndex: 1, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: createAssetTx.ID(), - OutputIndex: 0, - }}, - Op: &propertyfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - MintOutput: propertyfx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Op: &propertyfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, - OwnedOutput: propertyfx.OwnedOutput{}, }, - }}, - }} + OwnedOutput: propertyfx.OwnedOutput{}, + }, + } - codec := env.vm.parser.Codec() - require.NoError(mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ - {keys[0]}, - })) + mintPropertyTx, err := env.txBuilder.Operation( + []*txs.Operation{mintPropertyOp}, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, mintPropertyTx) - burnPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, + // burn the property + burnPropertyOp := &txs.Operation{ + Asset: avax.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*avax.UTXOID{{ + TxID: mintPropertyTx.ID(), + OutputIndex: 2, }}, - Ops: []*txs.Operation{{ - Asset: avax.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*avax.UTXOID{{ - TxID: mintPropertyTx.ID(), - OutputIndex: 1, - }}, - Op: &propertyfx.BurnOperation{Input: secp256k1fx.Input{}}, - }}, - }} + Op: &propertyfx.BurnOperation{Input: secp256k1fx.Input{}}, + } - require.NoError(burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ - {}, - })) + burnPropertyTx, err := env.txBuilder.Operation( + []*txs.Operation{burnPropertyOp}, + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, burnPropertyTx) } @@ -326,14 +288,10 @@ func TestIssueTxWithFeeAsset(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ + fork: latest, isCustomFeeAsset: true, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() // send first asset tx := newTx(t, env.genesisBytes, env.vm.ctx.ChainID, env.vm.parser, feeAssetName) @@ -344,70 +302,56 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ + fork: latest, isCustomFeeAsset: true, }) env.vm.ctx.Lock.Unlock() - defer func() { - env.vm.ctx.Lock.Lock() - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() // send second asset - feeAssetCreateTx := getCreateTxFromGenesisTest(t, env.genesisBytes, feeAssetName) - createTx := getCreateTxFromGenesisTest(t, env.genesisBytes, otherAssetName) + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{ - // fee asset - { - UTXOID: avax.UTXOID{ - TxID: feeAssetCreateTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: feeAssetCreateTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, + feeAssetCreateTx = getCreateTxFromGenesisTest(t, env.genesisBytes, feeAssetName) + createTx = getCreateTxFromGenesisTest(t, env.genesisBytes, otherAssetName) + ) + + tx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{ + { // fee asset + Asset: avax.Asset{ID: feeAssetCreateTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - // issued asset - { - UTXOID: avax.UTXOID{ - TxID: createTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: createTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, + }, + { // issued asset + Asset: avax.Asset{ID: createTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, }, }, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}})) - + nil, // memo + kc, + key.Address(), + ) + require.NoError(err) issueAndAccept(require, env.vm, env.issuer, tx) } func TestVMFormat(t *testing.T) { - env := setup(t, &envConfig{}) - defer func() { - require.NoError(t, env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{ + fork: latest, + }) + defer env.vm.ctx.Lock.Unlock() tests := []struct { in ids.ShortID @@ -432,47 +376,34 @@ func TestTxAcceptAfterParseTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ + fork: latest, notLinearized: true, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() - key := keys[0] - firstTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: env.genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: env.genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: env.genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - env.vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + ) + + firstTx, err := env.txBuilder.BaseTx( + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: startBalance - env.vm.TxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, - }}, - }, - }} - require.NoError(firstTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + }, + }}, + nil, // memo + kc, + key.Address(), + ) + require.NoError(err) + // let secondTx spend firstTx outputs secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ NetworkID: constants.UnitTestID, @@ -520,75 +451,46 @@ func TestIssueImportTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, + fork: durango, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") avaxID := genesisTx.ID() - key := keys[0] - utxoID := avax.UTXOID{ - TxID: ids.ID{ - 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, - 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, - 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, - 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, - }, - } + var ( + key = keys[0] + kc = secp256k1fx.NewKeychain(key) - txAssetID := avax.Asset{ID: avaxID} - tx := &txs.Tx{Unsigned: &txs.ImportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Outs: []*avax.TransferableOutput{{ - Asset: txAssetID, - Out: &secp256k1fx.TransferOutput{ - Amt: 1000, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }}, - SourceChain: constants.PlatformChainID, - ImportedIns: []*avax.TransferableInput{{ + utxoID = avax.UTXOID{ + TxID: ids.ID{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + }, + } + txAssetID = avax.Asset{ID: avaxID} + importedUtxo = &avax.UTXO{ UTXOID: utxoID, Asset: txAssetID, - In: &secp256k1fx.TransferInput{ + Out: &secp256k1fx.TransferOutput{ Amt: 1010, - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - }}, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + } + ) // Provide the platform UTXO: - utxo := &avax.UTXO{ - UTXOID: utxoID, - Asset: txAssetID, - Out: &secp256k1fx.TransferOutput{ - Amt: 1010, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - } - - utxoBytes, err := env.vm.parser.Codec().Marshal(txs.CodecVersion, utxo) + utxoBytes, err := env.vm.parser.Codec().Marshal(txs.CodecVersion, importedUtxo) require.NoError(err) - inputID := utxo.InputID() + inputID := importedUtxo.InputID() require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ env.vm.ctx.ChainID: { PutRequests: []*atomic.Element{{ @@ -601,6 +503,13 @@ func TestIssueImportTx(t *testing.T) { }, })) + tx, err := env.txBuilder.ImportTx( + constants.PlatformChainID, // source chain + key.Address(), + kc, + ) + require.NoError(err) + env.vm.ctx.Lock.Unlock() issueAndAccept(require, env.vm, env.issuer, tx) @@ -620,13 +529,10 @@ func TestForceAcceptImportTx(t *testing.T) { require := require.New(t) env := setup(t, &envConfig{ - vmStaticConfig: &config.Config{}, - notLinearized: true, + fork: durango, + notLinearized: true, }) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + defer env.vm.ctx.Lock.Unlock() genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") avaxID := genesisTx.ID() @@ -649,7 +555,7 @@ func TestForceAcceptImportTx(t *testing.T) { Outs: []*avax.TransferableOutput{{ Asset: txAssetID, Out: &secp256k1fx.TransferOutput{ - Amt: 1000, + Amt: 10, OutputOwners: secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, @@ -697,45 +603,28 @@ func TestImportTxNotState(t *testing.T) { func TestIssueExportTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{fork: durango}) + defer env.vm.ctx.Lock.Unlock() genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") - avaxID := genesisTx.ID() - key := keys[0] - tx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - env.vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + var ( + avaxID = genesisTx.ID() + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + to = key.PublicKey().Address() + changeAddr = to + ) + + tx, err := env.txBuilder.ExportTx( + constants.PlatformChainID, + to, // to + avaxID, + startBalance-env.vm.TxFee, + kc, + changeAddr, + ) + require.NoError(err) peerSharedMemory := env.sharedMemory.NewSharedMemory(constants.PlatformChainID) utxoBytes, _, _, err := peerSharedMemory.Indexed( @@ -772,46 +661,31 @@ func TestIssueExportTx(t *testing.T) { func TestClearForceAcceptedExportTx(t *testing.T) { require := require.New(t) - env := setup(t, &envConfig{}) - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + env := setup(t, &envConfig{ + fork: latest, + }) + defer env.vm.ctx.Lock.Unlock() genesisTx := getCreateTxFromGenesisTest(t, env.genesisBytes, "AVAX") - avaxID := genesisTx.ID() - key := keys[0] - assetID := avax.Asset{ID: avaxID} - tx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: constants.UnitTestID, - BlockchainID: env.vm.ctx.XChainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: assetID, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: assetID, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - env.vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, - }, - }}, - }} - require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) + var ( + avaxID = genesisTx.ID() + assetID = avax.Asset{ID: avaxID} + key = keys[0] + kc = secp256k1fx.NewKeychain(key) + to = key.PublicKey().Address() + changeAddr = to + ) + + tx, err := env.txBuilder.ExportTx( + constants.PlatformChainID, + to, // to + avaxID, + startBalance-env.vm.TxFee, + kc, + changeAddr, + ) + require.NoError(err) utxo := avax.UTXOID{ TxID: tx.ID(), @@ -826,7 +700,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { }, })) - _, err := peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) + _, err = peerSharedMemory.Get(env.vm.ctx.ChainID, [][]byte{utxoID[:]}) require.ErrorIs(err, database.ErrNotFound) env.vm.ctx.Lock.Unlock() diff --git a/vms/avm/wallet_service.go b/vms/avm/wallet_service.go index 321bf9e57eb4..871d81580a59 100644 --- a/vms/avm/wallet_service.go +++ b/vms/avm/wallet_service.go @@ -4,7 +4,6 @@ package avm import ( - "context" "errors" "fmt" "net/http" @@ -15,20 +14,20 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/txs/mempool" ) var errMissingUTXO = errors.New("missing utxo") type WalletService struct { vm *VM - pendingTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] + pendingTxs *linked.Hashmap[ids.ID, *txs.Tx] } func (w *WalletService) decided(txID ids.ID) { @@ -45,7 +44,7 @@ func (w *WalletService) decided(txID ids.ID) { return } - err := w.vm.network.IssueVerifiedTx(context.TODO(), tx) + err := w.vm.network.IssueTxFromRPCWithoutVerification(tx) if err == nil { w.vm.ctx.Log.Info("issued tx to mempool over wallet API", zap.Stringer("txID", txID), @@ -78,7 +77,7 @@ func (w *WalletService) issue(tx *txs.Tx) (ids.ID, error) { } if w.pendingTxs.Len() == 0 { - if err := w.vm.network.IssueVerifiedTx(context.TODO(), tx); err == nil { + if err := w.vm.network.IssueTxFromRPCWithoutVerification(tx); err == nil { w.vm.ctx.Log.Info("issued tx to mempool over wallet API", zap.Stringer("txID", txID), ) diff --git a/vms/avm/wallet_service_test.go b/vms/avm/wallet_service_test.go index 7ffdccdaaa20..d4423bd31c7d 100644 --- a/vms/avm/wallet_service_test.go +++ b/vms/avm/wallet_service_test.go @@ -4,12 +4,14 @@ package avm import ( - "context" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/linked" + "github.com/ava-labs/avalanchego/vms/avm/txs" ) func TestWalletService_SendMultiple(t *testing.T) { @@ -18,6 +20,7 @@ func TestWalletService_SendMultiple(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { env := setup(t, &envConfig{ + fork: latest, isCustomFeeAsset: !tc.avaxAsset, keystoreUsers: []*user{{ username: username, @@ -27,10 +30,10 @@ func TestWalletService_SendMultiple(t *testing.T) { }) env.vm.ctx.Lock.Unlock() - defer func() { - require.NoError(env.vm.Shutdown(context.Background())) - env.vm.ctx.Lock.Unlock() - }() + walletService := &WalletService{ + vm: env.vm, + pendingTxs: linked.NewHashmap[ids.ID, *txs.Tx](), + } assetID := env.genesisTx.ID() addr := keys[0].PublicKey().Address() @@ -64,14 +67,14 @@ func TestWalletService_SendMultiple(t *testing.T) { }, } reply := &api.JSONTxIDChangeAddr{} - require.NoError(env.walletService.SendMultiple(nil, args, reply)) + require.NoError(walletService.SendMultiple(nil, args, reply)) require.Equal(changeAddrStr, reply.ChangeAddr) buildAndAccept(require, env.vm, env.issuer, reply.TxID) env.vm.ctx.Lock.Lock() - _, err = env.vm.state.GetTx(reply.TxID) + env.vm.ctx.Lock.Unlock() require.NoError(err) }) } diff --git a/vms/components/avax/asset_test.go b/vms/components/avax/asset_test.go index ad7628ce98b9..ccb6f5549630 100644 --- a/vms/components/avax/asset_test.go +++ b/vms/components/avax/asset_test.go @@ -5,7 +5,6 @@ package avax import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -29,7 +28,7 @@ func TestAssetVerifyEmpty(t *testing.T) { func TestAssetID(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) diff --git a/vms/components/avax/atomic_utxos.go b/vms/components/avax/atomic_utxos.go index 3ac9c166ea3c..f0a854284f22 100644 --- a/vms/components/avax/atomic_utxos.go +++ b/vms/components/avax/atomic_utxos.go @@ -12,41 +12,19 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -var _ AtomicUTXOManager = (*atomicUTXOManager)(nil) - -type AtomicUTXOManager interface { - // GetAtomicUTXOs returns exported UTXOs such that at least one of the - // addresses in [addrs] is referenced. - // - // Returns at most [limit] UTXOs. - // - // Returns: - // * The fetched UTXOs - // * The address associated with the last UTXO fetched - // * The ID of the last UTXO fetched - // * Any error that may have occurred upstream. - GetAtomicUTXOs( - chainID ids.ID, - addrs set.Set[ids.ShortID], - startAddr ids.ShortID, - startUTXOID ids.ID, - limit int, - ) ([]*UTXO, ids.ShortID, ids.ID, error) -} - -type atomicUTXOManager struct { - sm atomic.SharedMemory - codec codec.Manager -} - -func NewAtomicUTXOManager(sm atomic.SharedMemory, codec codec.Manager) AtomicUTXOManager { - return &atomicUTXOManager{ - sm: sm, - codec: codec, - } -} - -func (a *atomicUTXOManager) GetAtomicUTXOs( +// GetAtomicUTXOs returns exported UTXOs such that at least one of the +// addresses in [addrs] is referenced. +// +// Returns at most [limit] UTXOs. +// +// Returns: +// * The fetched UTXOs +// * The address associated with the last UTXO fetched +// * The ID of the last UTXO fetched +// * Any error that may have occurred upstream. +func GetAtomicUTXOs( + sharedMemory atomic.SharedMemory, + codec codec.Manager, chainID ids.ID, addrs set.Set[ids.ShortID], startAddr ids.ShortID, @@ -61,7 +39,7 @@ func (a *atomicUTXOManager) GetAtomicUTXOs( i++ } - allUTXOBytes, lastAddr, lastUTXO, err := a.sm.Indexed( + allUTXOBytes, lastAddr, lastUTXO, err := sharedMemory.Indexed( chainID, addrsList, startAddr.Bytes(), @@ -84,7 +62,7 @@ func (a *atomicUTXOManager) GetAtomicUTXOs( utxos := make([]*UTXO, len(allUTXOBytes)) for i, utxoBytes := range allUTXOBytes { utxo := &UTXO{} - if _, err := a.codec.Unmarshal(utxoBytes, utxo); err != nil { + if _, err := codec.Unmarshal(utxoBytes, utxo); err != nil { return nil, ids.ShortID{}, ids.ID{}, fmt.Errorf("error parsing UTXO: %w", err) } utxos[i] = utxo diff --git a/vms/components/avax/metadata.go b/vms/components/avax/metadata.go deleted file mode 100644 index 1630484131a8..000000000000 --- a/vms/components/avax/metadata.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avax - -import ( - "errors" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/vms/components/verify" -) - -var ( - errNilMetadata = errors.New("nil metadata is not valid") - errMetadataNotInitialize = errors.New("metadata was never initialized and is not valid") - - _ verify.Verifiable = (*Metadata)(nil) -) - -// TODO: Delete this once the downstream dependencies have been updated. -type Metadata struct { - id ids.ID // The ID of this data - unsignedBytes []byte // Unsigned byte representation of this data - bytes []byte // Byte representation of this data -} - -// Initialize set the bytes and ID -func (md *Metadata) Initialize(unsignedBytes, bytes []byte) { - md.id = hashing.ComputeHash256Array(bytes) - md.unsignedBytes = unsignedBytes - md.bytes = bytes -} - -// ID returns the unique ID of this data -func (md *Metadata) ID() ids.ID { - return md.id -} - -// UnsignedBytes returns the unsigned binary representation of this data -func (md *Metadata) Bytes() []byte { - return md.unsignedBytes -} - -// Bytes returns the binary representation of this data -func (md *Metadata) SignedBytes() []byte { - return md.bytes -} - -func (md *Metadata) Verify() error { - switch { - case md == nil: - return errNilMetadata - case md.id == ids.Empty: - return errMetadataNotInitialize - default: - return nil - } -} diff --git a/vms/components/avax/metadata_test.go b/vms/components/avax/metadata_test.go deleted file mode 100644 index 9569e3e3a465..000000000000 --- a/vms/components/avax/metadata_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avax - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestMetaDataVerifyNil(t *testing.T) { - md := (*Metadata)(nil) - err := md.Verify() - require.ErrorIs(t, err, errNilMetadata) -} - -func TestMetaDataVerifyUninitialized(t *testing.T) { - md := &Metadata{} - err := md.Verify() - require.ErrorIs(t, err, errMetadataNotInitialize) -} diff --git a/vms/components/avax/transferables_test.go b/vms/components/avax/transferables_test.go index 755a0124eb7b..f46d580e839a 100644 --- a/vms/components/avax/transferables_test.go +++ b/vms/components/avax/transferables_test.go @@ -5,7 +5,6 @@ package avax import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -43,7 +42,7 @@ func TestTransferableOutputVerify(t *testing.T) { func TestTransferableOutputSorting(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&TestTransferable{})) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) @@ -85,7 +84,7 @@ func TestTransferableOutputSorting(t *testing.T) { func TestTransferableOutputSerialization(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) @@ -176,7 +175,7 @@ func TestTransferableInputVerify(t *testing.T) { func TestTransferableInputSorting(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&TestTransferable{})) ins := []*TransferableInput{ @@ -233,7 +232,7 @@ func TestTransferableInputSorting(t *testing.T) { func TestTransferableInputSerialization(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) diff --git a/vms/components/avax/utxo_fetching_test.go b/vms/components/avax/utxo_fetching_test.go index e36545c19cb7..25fe3fd91155 100644 --- a/vms/components/avax/utxo_fetching_test.go +++ b/vms/components/avax/utxo_fetching_test.go @@ -5,7 +5,6 @@ package avax import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -40,7 +39,7 @@ func TestFetchUTXOs(t *testing.T) { }, } - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) @@ -73,7 +72,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { addr2 := ids.GenerateTestShortID() addrs := set.Of(addr0, addr1) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) diff --git a/vms/components/avax/utxo_id_test.go b/vms/components/avax/utxo_id_test.go index fed21d5ce986..09887c0312e9 100644 --- a/vms/components/avax/utxo_id_test.go +++ b/vms/components/avax/utxo_id_test.go @@ -6,7 +6,6 @@ package avax import ( "math" "testing" - "time" "github.com/stretchr/testify/require" @@ -24,7 +23,7 @@ func TestUTXOIDVerifyNil(t *testing.T) { func TestUTXOID(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() require.NoError(manager.RegisterCodec(codecVersion, c)) diff --git a/vms/components/avax/utxo_state_test.go b/vms/components/avax/utxo_state_test.go index fa4c530e011a..09213bfa1bc6 100644 --- a/vms/components/avax/utxo_state_test.go +++ b/vms/components/avax/utxo_state_test.go @@ -5,7 +5,6 @@ package avax import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -42,7 +41,7 @@ func TestUTXOState(t *testing.T) { } utxoID := utxo.InputID() - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) diff --git a/vms/components/avax/utxo_test.go b/vms/components/avax/utxo_test.go index a79c8fcb6cd6..54f8360173a7 100644 --- a/vms/components/avax/utxo_test.go +++ b/vms/components/avax/utxo_test.go @@ -5,7 +5,6 @@ package avax import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -33,7 +32,7 @@ func TestUTXOVerifyEmpty(t *testing.T) { func TestUTXOSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index 8bdda5960f1a..e4376be502aa 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -16,13 +16,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/metric" ) var ( - _ Block = (*TestBlock)(nil) - errCantBuildBlock = errors.New("can't build new block") errVerify = errors.New("verify failed") errAccept = errors.New("accept failed") @@ -30,36 +28,25 @@ var ( errUnexpectedBlockBytes = errors.New("unexpected block bytes") ) -type TestBlock struct { - *snowman.TestBlock -} - -// SetStatus sets the status of the Block. -func (b *TestBlock) SetStatus(status choices.Status) { - b.TestBlock.TestDecidable.StatusV = status -} - // NewTestBlock returns a new test block with height, bytes, and ID derived from [i] // and using [parentID] as the parent block ID -func NewTestBlock(i uint64, parentID ids.ID) *TestBlock { +func NewTestBlock(i uint64, parentID ids.ID) *snowmantest.Block { b := []byte{byte(i)} id := hashing.ComputeHash256Array(b) - return &TestBlock{ - TestBlock: &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: id, - StatusV: choices.Unknown, - }, - HeightV: i, - ParentV: parentID, - BytesV: b, + return &snowmantest.Block{ + TestDecidable: choices.TestDecidable{ + IDV: id, + StatusV: choices.Unknown, }, + HeightV: i, + ParentV: parentID, + BytesV: b, } } // NewTestBlocks generates [numBlocks] consecutive blocks -func NewTestBlocks(numBlocks uint64) []*TestBlock { - blks := make([]*TestBlock, 0, numBlocks) +func NewTestBlocks(numBlocks uint64) []*snowmantest.Block { + blks := make([]*snowmantest.Block, 0, numBlocks) parentID := ids.Empty for i := uint64(0); i < numBlocks; i++ { blks = append(blks, NewTestBlock(i, parentID)) @@ -70,18 +57,17 @@ func NewTestBlocks(numBlocks uint64) []*TestBlock { return blks } -func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( +func createInternalBlockFuncs(blks []*snowmantest.Block) ( func(ctx context.Context, blkID ids.ID) (snowman.Block, error), func(ctx context.Context, b []byte) (snowman.Block, error), func(ctx context.Context, height uint64) (ids.ID, error), ) { - blkMap := make(map[ids.ID]*TestBlock) - blkByteMap := make(map[byte]*TestBlock) + blkMap := make(map[ids.ID]*snowmantest.Block) + blkBytesMap := make(map[string]*snowmantest.Block) for _, blk := range blks { blkMap[blk.ID()] = blk blkBytes := blk.Bytes() - require.Len(t, blkBytes, 1) - blkByteMap[blkBytes[0]] = blk + blkBytesMap[string(blkBytes)] = blk } getBlock := func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -94,11 +80,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( } parseBlk := func(_ context.Context, b []byte) (snowman.Block, error) { - if len(b) != 1 { - return nil, fmt.Errorf("expected block bytes to be length 1, but found %d", len(b)) - } - - blk, ok := blkByteMap[b[0]] + blk, ok := blkBytesMap[string(b)] if !ok { return nil, fmt.Errorf("%w: %x", errUnexpectedBlockBytes, b) } @@ -196,22 +178,10 @@ func TestState(t *testing.T) { blk2 := testBlks[2] // Need to create a block with a different bytes and hash here // to generate a conflict with blk2 - blk3Bytes := []byte{byte(3)} - blk3ID := hashing.ComputeHash256Array(blk3Bytes) - blk3 := &TestBlock{ - TestBlock: &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: blk3ID, - StatusV: choices.Processing, - }, - HeightV: uint64(2), - BytesV: blk3Bytes, - ParentV: blk1.IDV, - }, - } + blk3 := snowmantest.BuildChild(blk1) testBlks = append(testBlks, blk3) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -290,7 +260,7 @@ func TestBuildBlock(t *testing.T) { genesisBlock.SetStatus(choices.Accepted) blk1 := testBlks[1] - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) buildBlock := func(context.Context) (snowman.Block, error) { // Once the block is built, mark it as processing blk1.SetStatus(choices.Processing) @@ -335,7 +305,7 @@ func TestStateDecideBlock(t *testing.T) { badVerifyBlk.VerifyV = errVerify badRejectBlk := testBlks[3] badRejectBlk.RejectV = errReject - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -388,7 +358,7 @@ func TestStateParent(t *testing.T) { blk1 := testBlks[1] blk2 := testBlks[2] - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -429,7 +399,7 @@ func TestGetBlockInternal(t *testing.T) { genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -443,13 +413,13 @@ func TestGetBlockInternal(t *testing.T) { }) genesisBlockInternal := chainState.LastAcceptedBlockInternal() - require.IsType(&TestBlock{}, genesisBlockInternal) + require.IsType(&snowmantest.Block{}, genesisBlockInternal) require.Equal(genesisBlock.ID(), genesisBlockInternal.ID()) blk, err := chainState.GetBlockInternal(context.Background(), genesisBlock.ID()) require.NoError(err) - require.IsType(&TestBlock{}, blk) + require.IsType(&snowmantest.Block{}, blk) require.Equal(genesisBlock.ID(), blk.ID()) } @@ -461,7 +431,7 @@ func TestGetBlockError(t *testing.T) { genesisBlock.SetStatus(choices.Accepted) blk1 := testBlks[1] - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) wrappedGetBlock := func(ctx context.Context, id ids.ID) (snowman.Block, error) { blk, err := getBlock(ctx, id) if err != nil { @@ -498,7 +468,7 @@ func TestParseBlockError(t *testing.T) { genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -520,7 +490,7 @@ func TestBuildBlockError(t *testing.T) { genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -546,7 +516,7 @@ func TestMeteredCache(t *testing.T) { genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) config := &Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -561,7 +531,7 @@ func TestMeteredCache(t *testing.T) { _, err := NewMeteredState(registry, config) require.NoError(err) _, err = NewMeteredState(registry, config) - require.ErrorIs(err, metric.ErrFailedRegistering) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } // Test the bytesToIDCache @@ -574,7 +544,7 @@ func TestStateBytesToIDCache(t *testing.T) { blk1 := testBlks[1] blk2 := testBlks[2] - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) buildBlock := func(context.Context) (snowman.Block, error) { require.FailNow("shouldn't have been called") return nil, nil @@ -627,34 +597,13 @@ func TestSetLastAcceptedBlock(t *testing.T) { genesisBlock.SetStatus(choices.Accepted) postSetBlk1ParentID := hashing.ComputeHash256Array([]byte{byte(199)}) - postSetBlk1Bytes := []byte{byte(200)} - postSetBlk2Bytes := []byte{byte(201)} - postSetBlk1 := &TestBlock{ - TestBlock: &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: hashing.ComputeHash256Array(postSetBlk1Bytes), - StatusV: choices.Accepted, - }, - HeightV: uint64(200), - BytesV: postSetBlk1Bytes, - ParentV: postSetBlk1ParentID, - }, - } - postSetBlk2 := &TestBlock{ - TestBlock: &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: hashing.ComputeHash256Array(postSetBlk2Bytes), - StatusV: choices.Processing, - }, - HeightV: uint64(201), - BytesV: postSetBlk2Bytes, - ParentV: postSetBlk1.IDV, - }, - } + postSetBlk1 := NewTestBlock(200, postSetBlk1ParentID) + postSetBlk2 := NewTestBlock(201, postSetBlk1.ID()) + // note we do not need to parse postSetBlk1 so it is omitted here testBlks = append(testBlks, postSetBlk2) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ LastAcceptedBlock: genesisBlock, GetBlock: getBlock, @@ -695,7 +644,7 @@ func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { blk1 := testBlks[1] resetBlk := testBlks[4] - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) buildBlock := func(context.Context) (snowman.Block, error) { // Once the block is built, mark it as processing blk1.SetStatus(choices.Processing) @@ -737,7 +686,7 @@ func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { blk2 := testBlks[2] blk2.SetStatus(choices.Accepted) - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, @@ -763,7 +712,7 @@ func TestIsProcessing(t *testing.T) { genesisBlock.SetStatus(choices.Accepted) blk1 := testBlks[1] - getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, MissingCacheSize: 2, diff --git a/vms/components/keystore/codec.go b/vms/components/keystore/codec.go index 15576b73e4ea..4e5a01db6dd0 100644 --- a/vms/components/keystore/codec.go +++ b/vms/components/keystore/codec.go @@ -5,7 +5,6 @@ package keystore import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -20,9 +19,9 @@ var ( ) func init() { - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() Codec = codec.NewDefaultManager() - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() LegacyCodec = codec.NewManager(math.MaxInt32) err := utils.Err( diff --git a/vms/components/message/codec.go b/vms/components/message/codec.go deleted file mode 100644 index 5614125b1cee..000000000000 --- a/vms/components/message/codec.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "time" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" -) - -const ( - CodecVersion = 0 - - maxMessageSize = 512 * units.KiB -) - -var Codec codec.Manager - -func init() { - Codec = codec.NewManager(maxMessageSize) - lc := linearcodec.NewDefault(time.Time{}) - - err := utils.Err( - lc.RegisterType(&Tx{}), - Codec.RegisterCodec(CodecVersion, lc), - ) - if err != nil { - panic(err) - } -} diff --git a/vms/components/message/handler.go b/vms/components/message/handler.go deleted file mode 100644 index 2af2f55a3f0c..000000000000 --- a/vms/components/message/handler.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" -) - -var _ Handler = NoopHandler{} - -type Handler interface { - HandleTx(nodeID ids.NodeID, requestID uint32, msg *Tx) error -} - -type NoopHandler struct { - Log logging.Logger -} - -func (h NoopHandler) HandleTx(nodeID ids.NodeID, requestID uint32, _ *Tx) error { - h.Log.Debug("dropping unexpected Tx message", - zap.Stringer("nodeID", nodeID), - zap.Uint32("requestID", requestID), - ) - return nil -} diff --git a/vms/components/message/handler_test.go b/vms/components/message/handler_test.go deleted file mode 100644 index bc2342838efa..000000000000 --- a/vms/components/message/handler_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" -) - -type CounterHandler struct { - Tx int -} - -func (h *CounterHandler) HandleTx(ids.NodeID, uint32, *Tx) error { - h.Tx++ - return nil -} - -func TestHandleTx(t *testing.T) { - require := require.New(t) - - handler := CounterHandler{} - msg := Tx{} - - require.NoError(msg.Handle(&handler, ids.EmptyNodeID, 0)) - require.Equal(1, handler.Tx) -} - -func TestNoopHandler(t *testing.T) { - handler := NoopHandler{ - Log: logging.NoLog{}, - } - - require.NoError(t, handler.HandleTx(ids.EmptyNodeID, 0, nil)) -} diff --git a/vms/components/message/message.go b/vms/components/message/message.go deleted file mode 100644 index a33d4104430a..000000000000 --- a/vms/components/message/message.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/proto" - - "github.com/ava-labs/avalanchego/ids" - - pb "github.com/ava-labs/avalanchego/proto/pb/message" -) - -var ( - _ Message = (*Tx)(nil) - - ErrUnexpectedCodecVersion = errors.New("unexpected codec version") - errUnknownMessageType = errors.New("unknown message type") -) - -type Message interface { - // Handle this message with the correct message handler - Handle(handler Handler, nodeID ids.NodeID, requestID uint32) error - - // initialize should be called whenever a message is built or parsed - initialize([]byte) - - // Bytes returns the binary representation of this message - // - // Bytes should only be called after being initialized - Bytes() []byte -} - -type message []byte - -func (m *message) initialize(bytes []byte) { - *m = bytes -} - -func (m *message) Bytes() []byte { - return *m -} - -func Parse(bytes []byte) (Message, error) { - var ( - msg Message - protoMsg pb.Message - ) - - if err := proto.Unmarshal(bytes, &protoMsg); err == nil { - // This message was encoded with proto. - switch m := protoMsg.GetMessage().(type) { - case *pb.Message_Tx: - msg = &Tx{ - Tx: m.Tx.Tx, - } - default: - return nil, fmt.Errorf("%w: %T", errUnknownMessageType, protoMsg.GetMessage()) - } - } else { - // This message wasn't encoded with proto. - // It must have been encoded with avalanchego's codec. - // TODO remove else statement remove once all nodes support proto encoding. - // i.e. when all nodes are on v1.11.0 or later. - version, err := Codec.Unmarshal(bytes, &msg) - if err != nil { - return nil, err - } - if version != CodecVersion { - return nil, ErrUnexpectedCodecVersion - } - } - msg.initialize(bytes) - return msg, nil -} - -func Build(msg Message) ([]byte, error) { - bytes, err := Codec.Marshal(CodecVersion, &msg) - msg.initialize(bytes) - return bytes, err -} diff --git a/vms/components/message/message_test.go b/vms/components/message/message_test.go deleted file mode 100644 index 946241deca9e..000000000000 --- a/vms/components/message/message_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - - "github.com/ava-labs/avalanchego/codec" - - pb "github.com/ava-labs/avalanchego/proto/pb/message" -) - -func TestParseGibberish(t *testing.T) { - randomBytes := []byte{0, 1, 2, 3, 4, 5} - _, err := Parse(randomBytes) - require.ErrorIs(t, err, codec.ErrUnknownVersion) -} - -func TestParseProto(t *testing.T) { - require := require.New(t) - - txBytes := []byte{'y', 'e', 'e', 't'} - protoMsg := pb.Message{ - Message: &pb.Message_Tx{ - Tx: &pb.Tx{ - Tx: txBytes, - }, - }, - } - msgBytes, err := proto.Marshal(&protoMsg) - require.NoError(err) - - parsedMsgIntf, err := Parse(msgBytes) - require.NoError(err) - - require.IsType(&Tx{}, parsedMsgIntf) - parsedMsg := parsedMsgIntf.(*Tx) - - require.Equal(txBytes, parsedMsg.Tx) - - // Parse invalid message - _, err = Parse([]byte{1, 3, 3, 7}) - // Can't parse as proto so it falls back to using avalanchego's codec - require.ErrorIs(err, codec.ErrUnknownVersion) -} diff --git a/vms/components/message/tx.go b/vms/components/message/tx.go deleted file mode 100644 index 4eced1818233..000000000000 --- a/vms/components/message/tx.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import "github.com/ava-labs/avalanchego/ids" - -var _ Message = (*Tx)(nil) - -type Tx struct { - message - - Tx []byte `serialize:"true"` -} - -func (msg *Tx) Handle(handler Handler, nodeID ids.NodeID, requestID uint32) error { - return handler.HandleTx(nodeID, requestID, msg) -} diff --git a/vms/components/message/tx_test.go b/vms/components/message/tx_test.go deleted file mode 100644 index 8c52828b7977..000000000000 --- a/vms/components/message/tx_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" -) - -func TestTx(t *testing.T) { - require := require.New(t) - - tx := utils.RandomBytes(256 * units.KiB) - builtMsg := Tx{ - Tx: tx, - } - builtMsgBytes, err := Build(&builtMsg) - require.NoError(err) - require.Equal(builtMsgBytes, builtMsg.Bytes()) - - parsedMsgIntf, err := Parse(builtMsgBytes) - require.NoError(err) - require.Equal(builtMsgBytes, parsedMsgIntf.Bytes()) - - require.IsType(&Tx{}, parsedMsgIntf) - parsedMsg := parsedMsgIntf.(*Tx) - - require.Equal(tx, parsedMsg.Tx) -} diff --git a/vms/example/xsvm/Dockerfile b/vms/example/xsvm/Dockerfile new file mode 100644 index 000000000000..8e7c4c5bba9f --- /dev/null +++ b/vms/example/xsvm/Dockerfile @@ -0,0 +1,31 @@ +# The version is supplied as a build argument rather than hard-coded +# to minimize the cost of version changes. +ARG GO_VERSION + +# AVALANCHEGO_NODE_IMAGE needs to identify an existing node image and should include the tag +ARG AVALANCHEGO_NODE_IMAGE + +# ============= Compilation Stage ================ +FROM golang:$GO_VERSION-bullseye AS builder + +WORKDIR /build + +# Copy and download avalanche dependencies using go mod +COPY go.mod . +COPY go.sum . +RUN go mod download + +# Copy the code into the container +COPY . . + +# Build xsvm +RUN ./scripts/build_xsvm.sh + +# ============= Cleanup Stage ================ +FROM $AVALANCHEGO_NODE_IMAGE AS execution + +# Copy the xsvm binary to the default plugin path +RUN mkdir -p /root/.avalanchego/plugins +COPY --from=builder /build/build/xsvm /root/.avalanchego/plugins/v3m4wPxaHpvGr8qfMeyK6PRW3idZrPHmYcMTt7oXdK47yurVH + +# The node image's entrypoint will be reused. diff --git a/vms/example/xsvm/api/client.go b/vms/example/xsvm/api/client.go index d9a6a711950a..6395e0aa4cf2 100644 --- a/vms/example/xsvm/api/client.go +++ b/vms/example/xsvm/api/client.go @@ -6,6 +6,7 @@ package api import ( "context" "fmt" + "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" @@ -16,6 +17,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) +const DefaultPollingInterval = 50 * time.Millisecond + // Client defines the xsvm API client. type Client interface { Network( @@ -241,3 +244,34 @@ func (c *client) Message( } return resp.Message, resp.Signature, resp.Message.Initialize() } + +func AwaitTxAccepted( + ctx context.Context, + c Client, + address ids.ShortID, + nonce uint64, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) + defer ticker.Stop() + + for { + currentNonce, err := c.Nonce(ctx, address, options...) + if err != nil { + return err + } + + if currentNonce > nonce { + // The nonce increasing indicates the acceptance of a transaction + // issued with the specified nonce. + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} diff --git a/vms/example/xsvm/builder/builder.go b/vms/example/xsvm/builder/builder.go index 231679f5df56..dd9648f8cae2 100644 --- a/vms/example/xsvm/builder/builder.go +++ b/vms/example/xsvm/builder/builder.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/vms/example/xsvm/chain" "github.com/ava-labs/avalanchego/vms/example/xsvm/execute" "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" @@ -35,7 +35,7 @@ type builder struct { engineChan chan<- common.Message chain chain.Chain - pendingTxs linkedhashmap.LinkedHashmap[ids.ID, *tx.Tx] + pendingTxs *linked.Hashmap[ids.ID, *tx.Tx] preference ids.ID } @@ -45,7 +45,7 @@ func New(chainContext *snow.Context, engineChan chan<- common.Message, chain cha engineChan: engineChan, chain: chain, - pendingTxs: linkedhashmap.New[ids.ID, *tx.Tx](), + pendingTxs: linked.NewHashmap[ids.ID, *tx.Tx](), preference: chain.LastAccepted(), } } diff --git a/vms/example/xsvm/cmd/chain/create/cmd.go b/vms/example/xsvm/cmd/chain/create/cmd.go index 984ff45df8b0..a08edf507117 100644 --- a/vms/example/xsvm/cmd/chain/create/cmd.go +++ b/vms/example/xsvm/cmd/chain/create/cmd.go @@ -9,8 +9,8 @@ import ( "github.com/spf13/cobra" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/example/xsvm" "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -72,7 +72,7 @@ func createFunc(c *cobra.Command, args []string) error { createChainTxID, err := pWallet.IssueCreateChainTx( config.SubnetID, genesisBytes, - xsvm.ID, + constants.XSVMID, nil, config.Name, common.WithContext(ctx), diff --git a/vms/example/xsvm/cmd/issue/export/cmd.go b/vms/example/xsvm/cmd/issue/export/cmd.go index efde479971cc..b8fb7145e4ea 100644 --- a/vms/example/xsvm/cmd/issue/export/cmd.go +++ b/vms/example/xsvm/cmd/issue/export/cmd.go @@ -4,13 +4,14 @@ package export import ( - "encoding/json" + "context" "log" "time" "github.com/spf13/cobra" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/status" "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" ) @@ -32,13 +33,22 @@ func exportFunc(c *cobra.Command, args []string) error { return err } - ctx := c.Context() + txStatus, err := Export(c.Context(), config) + if err != nil { + return err + } + log.Print(txStatus) + return nil +} + +func Export(ctx context.Context, config *Config) (*status.TxIssuance, error) { client := api.NewClient(config.URI, config.SourceChainID.String()) - nonce, err := client.Nonce(ctx, config.PrivateKey.Address()) + address := config.PrivateKey.Address() + nonce, err := client.Nonce(ctx, address) if err != nil { - return err + return nil, err } utx := &tx.Export{ @@ -52,19 +62,23 @@ func exportFunc(c *cobra.Command, args []string) error { } stx, err := tx.Sign(utx, config.PrivateKey) if err != nil { - return err - } - - txJSON, err := json.MarshalIndent(stx, "", " ") - if err != nil { - return err + return nil, err } issueTxStartTime := time.Now() txID, err := client.IssueTx(ctx, stx) if err != nil { - return err + return nil, err } - log.Printf("issued tx %s in %s\n%s\n", txID, time.Since(issueTxStartTime), string(txJSON)) - return nil + + if err := api.AwaitTxAccepted(ctx, client, address, nonce, api.DefaultPollingInterval); err != nil { + return nil, err + } + + return &status.TxIssuance{ + Tx: stx, + TxID: txID, + Nonce: nonce, + StartTime: issueTxStartTime, + }, nil } diff --git a/vms/example/xsvm/cmd/issue/importtx/cmd.go b/vms/example/xsvm/cmd/issue/importtx/cmd.go index 5bf104212ef6..e0892e2fdaa3 100644 --- a/vms/example/xsvm/cmd/issue/importtx/cmd.go +++ b/vms/example/xsvm/cmd/issue/importtx/cmd.go @@ -4,7 +4,7 @@ package importtx import ( - "encoding/json" + "context" "fmt" "log" "time" @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/status" "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) @@ -37,8 +38,16 @@ func importFunc(c *cobra.Command, args []string) error { return err } - ctx := c.Context() + txStatus, err := Import(c.Context(), config) + if err != nil { + return err + } + log.Print(txStatus) + + return nil +} +func Import(ctx context.Context, config *Config) (*status.TxIssuance, error) { var ( // Note: here we assume the unsigned message is correct from the last // URI in sourceURIs. In practice this shouldn't be done. @@ -51,15 +60,18 @@ func importFunc(c *cobra.Command, args []string) error { xsClient := api.NewClient(uri, config.SourceChainID) fetchStartTime := time.Now() - var rawSignature []byte + var ( + rawSignature []byte + err error + ) unsignedMessage, rawSignature, err = xsClient.Message(ctx, config.TxID) if err != nil { - return fmt.Errorf("failed to fetch BLS signature from %s with: %w", uri, err) + return nil, fmt.Errorf("failed to fetch BLS signature from %s with: %w", uri, err) } sig, err := bls.SignatureFromBytes(rawSignature) if err != nil { - return fmt.Errorf("failed to parse BLS signature from %s with: %w", uri, err) + return nil, fmt.Errorf("failed to parse BLS signature from %s with: %w", uri, err) } // Note: the public key should not be fetched from the node in practice. @@ -67,12 +79,12 @@ func importFunc(c *cobra.Command, args []string) error { infoClient := info.NewClient(uri) _, nodePOP, err := infoClient.GetNodeID(ctx) if err != nil { - return fmt.Errorf("failed to fetch BLS public key from %s with: %w", uri, err) + return nil, fmt.Errorf("failed to fetch BLS public key from %s with: %w", uri, err) } pk := nodePOP.Key() if !bls.Verify(pk, sig, unsignedMessage.Bytes()) { - return fmt.Errorf("failed to verify BLS signature against public key from %s", uri) + return nil, fmt.Errorf("failed to verify BLS signature against public key from %s", uri) } log.Printf("fetched BLS signature from %s in %s\n", uri, time.Since(fetchStartTime)) @@ -89,7 +101,7 @@ func importFunc(c *cobra.Command, args []string) error { aggSignature, err := bls.AggregateSignatures(signatures) if err != nil { - return err + return nil, err } aggSignatureBytes := bls.SignatureToBytes(aggSignature) @@ -100,14 +112,15 @@ func importFunc(c *cobra.Command, args []string) error { signature, ) if err != nil { - return err + return nil, err } client := api.NewClient(config.URI, config.DestinationChainID) - nonce, err := client.Nonce(ctx, config.PrivateKey.Address()) + address := config.PrivateKey.Address() + nonce, err := client.Nonce(ctx, address) if err != nil { - return err + return nil, err } utx := &tx.Import{ @@ -117,19 +130,23 @@ func importFunc(c *cobra.Command, args []string) error { } stx, err := tx.Sign(utx, config.PrivateKey) if err != nil { - return err - } - - txJSON, err := json.MarshalIndent(stx, "", " ") - if err != nil { - return err + return nil, err } issueTxStartTime := time.Now() txID, err := client.IssueTx(ctx, stx) if err != nil { - return err + return nil, err } - log.Printf("issued tx %s in %s\n%s\n", txID, time.Since(issueTxStartTime), string(txJSON)) - return nil + + if err := api.AwaitTxAccepted(ctx, client, address, nonce, api.DefaultPollingInterval); err != nil { + return nil, err + } + + return &status.TxIssuance{ + Tx: stx, + TxID: txID, + Nonce: nonce, + StartTime: issueTxStartTime, + }, nil } diff --git a/vms/example/xsvm/cmd/issue/status/status.go b/vms/example/xsvm/cmd/issue/status/status.go new file mode 100644 index 000000000000..2e8187906496 --- /dev/null +++ b/vms/example/xsvm/cmd/issue/status/status.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package status + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" +) + +type TxIssuance struct { + Tx *tx.Tx + TxID ids.ID + Nonce uint64 + StartTime time.Time +} + +func (s *TxIssuance) String() string { + txJSON, err := json.MarshalIndent(s.Tx, "", " ") + if err != nil { + return "failed to marshal transaction: " + err.Error() + } + return fmt.Sprintf("issued tx %s in %s\n%s\n", s.TxID, time.Since(s.StartTime), string(txJSON)) +} diff --git a/vms/example/xsvm/cmd/issue/transfer/cmd.go b/vms/example/xsvm/cmd/issue/transfer/cmd.go index 86c47032a6c0..cd0e9abe48a7 100644 --- a/vms/example/xsvm/cmd/issue/transfer/cmd.go +++ b/vms/example/xsvm/cmd/issue/transfer/cmd.go @@ -4,13 +4,14 @@ package transfer import ( - "encoding/json" + "context" "log" "time" "github.com/spf13/cobra" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" + "github.com/ava-labs/avalanchego/vms/example/xsvm/cmd/issue/status" "github.com/ava-labs/avalanchego/vms/example/xsvm/tx" ) @@ -32,13 +33,22 @@ func transferFunc(c *cobra.Command, args []string) error { return err } - ctx := c.Context() + txStatus, err := Transfer(c.Context(), config) + if err != nil { + return err + } + log.Print(txStatus) + return nil +} + +func Transfer(ctx context.Context, config *Config) (*status.TxIssuance, error) { client := api.NewClient(config.URI, config.ChainID.String()) - nonce, err := client.Nonce(ctx, config.PrivateKey.Address()) + address := config.PrivateKey.Address() + nonce, err := client.Nonce(ctx, address) if err != nil { - return err + return nil, err } utx := &tx.Transfer{ @@ -51,19 +61,23 @@ func transferFunc(c *cobra.Command, args []string) error { } stx, err := tx.Sign(utx, config.PrivateKey) if err != nil { - return err - } - - txJSON, err := json.MarshalIndent(stx, "", " ") - if err != nil { - return err + return nil, err } issueTxStartTime := time.Now() txID, err := client.IssueTx(ctx, stx) if err != nil { - return err + return nil, err } - log.Printf("issued tx %s in %s\n%s\n", txID, time.Since(issueTxStartTime), string(txJSON)) - return nil + + if err := api.AwaitTxAccepted(ctx, client, address, nonce, api.DefaultPollingInterval); err != nil { + return nil, err + } + + return &status.TxIssuance{ + Tx: stx, + TxID: txID, + Nonce: nonce, + StartTime: issueTxStartTime, + }, nil } diff --git a/vms/example/xsvm/cmd/version/cmd.go b/vms/example/xsvm/cmd/version/cmd.go index 1c956c6a9b00..471ccfd10aa6 100644 --- a/vms/example/xsvm/cmd/version/cmd.go +++ b/vms/example/xsvm/cmd/version/cmd.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/example/xsvm" ) @@ -29,8 +30,8 @@ func Command() *cobra.Command { func versionFunc(*cobra.Command, []string) error { fmt.Printf( format, - xsvm.Name, - xsvm.ID, + constants.XSVMName, + constants.XSVMID, xsvm.Version, version.RPCChainVMProtocol, ) diff --git a/vms/example/xsvm/constants.go b/vms/example/xsvm/constants.go index eb2199211ef7..7628cc56b176 100644 --- a/vms/example/xsvm/constants.go +++ b/vms/example/xsvm/constants.go @@ -3,19 +3,10 @@ package xsvm -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/version" -) +import "github.com/ava-labs/avalanchego/version" -const Name = "xsvm" - -var ( - ID = ids.ID{'x', 's', 'v', 'm'} - - Version = &version.Semantic{ - Major: 1, - Minor: 0, - Patch: 4, - } -) +var Version = &version.Semantic{ + Major: 1, + Minor: 0, + Patch: 4, +} diff --git a/vms/example/xsvm/tx/codec.go b/vms/example/xsvm/tx/codec.go index f61c7bf18098..4ba775abb3f4 100644 --- a/vms/example/xsvm/tx/codec.go +++ b/vms/example/xsvm/tx/codec.go @@ -5,7 +5,6 @@ package tx import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -17,7 +16,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt32) err := utils.Err( diff --git a/vms/example/xsvm/vm.go b/vms/example/xsvm/vm.go index 38f25393c39b..526fc47c499d 100644 --- a/vms/example/xsvm/vm.go +++ b/vms/example/xsvm/vm.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/example/xsvm/api" @@ -124,7 +125,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { ) return map[string]http.Handler{ "": server, - }, server.RegisterService(api, Name) + }, server.RegisterService(api, constants.XSVMName) } func (*VM) HealthCheck(context.Context) (interface{}, error) { @@ -168,10 +169,6 @@ func (vm *VM) BuildBlockWithContext(ctx context.Context, blockContext *smblock.C return vm.builder.BuildBlock(ctx, blockContext) } -func (*VM) VerifyHeightIndex(context.Context) error { - return nil -} - func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { return state.GetBlockIDByHeight(vm.db, height) } diff --git a/vms/metervm/block_metrics.go b/vms/metervm/block_metrics.go index 160d0eee50ad..2bdc247c4062 100644 --- a/vms/metervm/block_metrics.go +++ b/vms/metervm/block_metrics.go @@ -24,7 +24,6 @@ type blockMetrics struct { accept, reject, // Height metrics - verifyHeightIndex, getBlockIDAtHeight, // Block verification with context metrics shouldVerifyWithContext, @@ -50,44 +49,42 @@ func (m *blockMetrics) Initialize( supportsBlockBuildingWithContext bool, supportsBatchedFetching bool, supportsStateSync bool, - namespace string, reg prometheus.Registerer, ) error { errs := wrappers.Errs{} - m.buildBlock = newAverager(namespace, "build_block", reg, &errs) - m.buildBlockErr = newAverager(namespace, "build_block_err", reg, &errs) - m.parseBlock = newAverager(namespace, "parse_block", reg, &errs) - m.parseBlockErr = newAverager(namespace, "parse_block_err", reg, &errs) - m.getBlock = newAverager(namespace, "get_block", reg, &errs) - m.getBlockErr = newAverager(namespace, "get_block_err", reg, &errs) - m.setPreference = newAverager(namespace, "set_preference", reg, &errs) - m.lastAccepted = newAverager(namespace, "last_accepted", reg, &errs) - m.verify = newAverager(namespace, "verify", reg, &errs) - m.verifyErr = newAverager(namespace, "verify_err", reg, &errs) - m.accept = newAverager(namespace, "accept", reg, &errs) - m.reject = newAverager(namespace, "reject", reg, &errs) - m.shouldVerifyWithContext = newAverager(namespace, "should_verify_with_context", reg, &errs) - m.verifyWithContext = newAverager(namespace, "verify_with_context", reg, &errs) - m.verifyWithContextErr = newAverager(namespace, "verify_with_context_err", reg, &errs) - m.verifyHeightIndex = newAverager(namespace, "verify_height_index", reg, &errs) - m.getBlockIDAtHeight = newAverager(namespace, "get_block_id_at_height", reg, &errs) + m.buildBlock = newAverager("build_block", reg, &errs) + m.buildBlockErr = newAverager("build_block_err", reg, &errs) + m.parseBlock = newAverager("parse_block", reg, &errs) + m.parseBlockErr = newAverager("parse_block_err", reg, &errs) + m.getBlock = newAverager("get_block", reg, &errs) + m.getBlockErr = newAverager("get_block_err", reg, &errs) + m.setPreference = newAverager("set_preference", reg, &errs) + m.lastAccepted = newAverager("last_accepted", reg, &errs) + m.verify = newAverager("verify", reg, &errs) + m.verifyErr = newAverager("verify_err", reg, &errs) + m.accept = newAverager("accept", reg, &errs) + m.reject = newAverager("reject", reg, &errs) + m.shouldVerifyWithContext = newAverager("should_verify_with_context", reg, &errs) + m.verifyWithContext = newAverager("verify_with_context", reg, &errs) + m.verifyWithContextErr = newAverager("verify_with_context_err", reg, &errs) + m.getBlockIDAtHeight = newAverager("get_block_id_at_height", reg, &errs) if supportsBlockBuildingWithContext { - m.buildBlockWithContext = newAverager(namespace, "build_block_with_context", reg, &errs) - m.buildBlockWithContextErr = newAverager(namespace, "build_block_with_context_err", reg, &errs) + m.buildBlockWithContext = newAverager("build_block_with_context", reg, &errs) + m.buildBlockWithContextErr = newAverager("build_block_with_context_err", reg, &errs) } if supportsBatchedFetching { - m.getAncestors = newAverager(namespace, "get_ancestors", reg, &errs) - m.batchedParseBlock = newAverager(namespace, "batched_parse_block", reg, &errs) + m.getAncestors = newAverager("get_ancestors", reg, &errs) + m.batchedParseBlock = newAverager("batched_parse_block", reg, &errs) } if supportsStateSync { - m.stateSyncEnabled = newAverager(namespace, "state_sync_enabled", reg, &errs) - m.getOngoingSyncStateSummary = newAverager(namespace, "get_ongoing_state_sync_summary", reg, &errs) - m.getLastStateSummary = newAverager(namespace, "get_last_state_summary", reg, &errs) - m.parseStateSummary = newAverager(namespace, "parse_state_summary", reg, &errs) - m.parseStateSummaryErr = newAverager(namespace, "parse_state_summary_err", reg, &errs) - m.getStateSummary = newAverager(namespace, "get_state_summary", reg, &errs) - m.getStateSummaryErr = newAverager(namespace, "get_state_summary_err", reg, &errs) + m.stateSyncEnabled = newAverager("state_sync_enabled", reg, &errs) + m.getOngoingSyncStateSummary = newAverager("get_ongoing_state_sync_summary", reg, &errs) + m.getLastStateSummary = newAverager("get_last_state_summary", reg, &errs) + m.parseStateSummary = newAverager("parse_state_summary", reg, &errs) + m.parseStateSummaryErr = newAverager("parse_state_summary_err", reg, &errs) + m.getStateSummary = newAverager("get_state_summary", reg, &errs) + m.getStateSummaryErr = newAverager("get_state_summary_err", reg, &errs) } return errs.Err } diff --git a/vms/metervm/block_vm.go b/vms/metervm/block_vm.go index 73e949180a96..da64f9af01d2 100644 --- a/vms/metervm/block_vm.go +++ b/vms/metervm/block_vm.go @@ -8,7 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -32,10 +31,14 @@ type blockVM struct { ssVM block.StateSyncableVM blockMetrics - clock mockable.Clock + registry prometheus.Registerer + clock mockable.Clock } -func NewBlockVM(vm block.ChainVM) block.ChainVM { +func NewBlockVM( + vm block.ChainVM, + reg prometheus.Registerer, +) block.ChainVM { buildBlockVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) ssVM, _ := vm.(block.StateSyncableVM) @@ -44,6 +47,7 @@ func NewBlockVM(vm block.ChainVM) block.ChainVM { buildBlockVM: buildBlockVM, batchedVM: batchedVM, ssVM: ssVM, + registry: reg, } } @@ -58,31 +62,16 @@ func (vm *blockVM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - registerer := prometheus.NewRegistry() err := vm.blockMetrics.Initialize( vm.buildBlockVM != nil, vm.batchedVM != nil, vm.ssVM != nil, - "", - registerer, + vm.registry, ) if err != nil { return err } - optionalGatherer := metrics.NewOptionalGatherer() - multiGatherer := metrics.NewMultiGatherer() - if err := multiGatherer.Register("metervm", registerer); err != nil { - return err - } - if err := multiGatherer.Register("", optionalGatherer); err != nil { - return err - } - if err := chainCtx.Metrics.Register(multiGatherer); err != nil { - return err - } - chainCtx.Metrics = optionalGatherer - return vm.ChainVM.Initialize(ctx, chainCtx, db, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) } @@ -150,14 +139,6 @@ func (vm *blockVM) LastAccepted(ctx context.Context) (ids.ID, error) { return lastAcceptedID, err } -func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { - start := vm.clock.Time() - err := vm.ChainVM.VerifyHeightIndex(ctx) - end := vm.clock.Time() - vm.blockMetrics.verifyHeightIndex.Observe(float64(end.Sub(start))) - return err -} - func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { start := vm.clock.Time() blockID, err := vm.ChainVM.GetBlockIDAtHeight(ctx, height) diff --git a/vms/metervm/metrics.go b/vms/metervm/metrics.go index d4c9304e7696..4cad7d153f83 100644 --- a/vms/metervm/metrics.go +++ b/vms/metervm/metrics.go @@ -10,9 +10,8 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -func newAverager(namespace, name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { +func newAverager(name string, reg prometheus.Registerer, errs *wrappers.Errs) metric.Averager { return metric.NewAveragerWithErrs( - namespace, name, "time (in ns) of a "+name, reg, diff --git a/vms/metervm/vertex_metrics.go b/vms/metervm/vertex_metrics.go index 67caa50b610e..04096f2ae035 100644 --- a/vms/metervm/vertex_metrics.go +++ b/vms/metervm/vertex_metrics.go @@ -19,16 +19,13 @@ type vertexMetrics struct { reject metric.Averager } -func (m *vertexMetrics) Initialize( - namespace string, - reg prometheus.Registerer, -) error { +func (m *vertexMetrics) Initialize(reg prometheus.Registerer) error { errs := wrappers.Errs{} - m.parse = newAverager(namespace, "parse_tx", reg, &errs) - m.parseErr = newAverager(namespace, "parse_tx_err", reg, &errs) - m.verify = newAverager(namespace, "verify_tx", reg, &errs) - m.verifyErr = newAverager(namespace, "verify_tx_err", reg, &errs) - m.accept = newAverager(namespace, "accept", reg, &errs) - m.reject = newAverager(namespace, "reject", reg, &errs) + m.parse = newAverager("parse_tx", reg, &errs) + m.parseErr = newAverager("parse_tx_err", reg, &errs) + m.verify = newAverager("verify_tx", reg, &errs) + m.verifyErr = newAverager("verify_tx_err", reg, &errs) + m.accept = newAverager("accept", reg, &errs) + m.reject = newAverager("reject", reg, &errs) return errs.Err } diff --git a/vms/metervm/vertex_vm.go b/vms/metervm/vertex_vm.go index 8992b4863283..936a688de99d 100644 --- a/vms/metervm/vertex_vm.go +++ b/vms/metervm/vertex_vm.go @@ -8,7 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" @@ -22,16 +21,21 @@ var ( _ snowstorm.Tx = (*meterTx)(nil) ) -func NewVertexVM(vm vertex.LinearizableVMWithEngine) vertex.LinearizableVMWithEngine { +func NewVertexVM( + vm vertex.LinearizableVMWithEngine, + reg prometheus.Registerer, +) vertex.LinearizableVMWithEngine { return &vertexVM{ LinearizableVMWithEngine: vm, + registry: reg, } } type vertexVM struct { vertex.LinearizableVMWithEngine vertexMetrics - clock mockable.Clock + registry prometheus.Registerer + clock mockable.Clock } func (vm *vertexVM) Initialize( @@ -45,24 +49,10 @@ func (vm *vertexVM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - registerer := prometheus.NewRegistry() - if err := vm.vertexMetrics.Initialize("", registerer); err != nil { + if err := vm.vertexMetrics.Initialize(vm.registry); err != nil { return err } - optionalGatherer := metrics.NewOptionalGatherer() - multiGatherer := metrics.NewMultiGatherer() - if err := multiGatherer.Register("metervm", registerer); err != nil { - return err - } - if err := multiGatherer.Register("", optionalGatherer); err != nil { - return err - } - if err := chainCtx.Metrics.Register(multiGatherer); err != nil { - return err - } - chainCtx.Metrics = optionalGatherer - return vm.LinearizableVMWithEngine.Initialize( ctx, chainCtx, diff --git a/vms/nftfx/fx_test.go b/vms/nftfx/fx_test.go index 1ed3426f5b11..99a047b11328 100644 --- a/vms/nftfx/fx_test.go +++ b/vms/nftfx/fx_test.go @@ -39,7 +39,7 @@ var ( func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} @@ -56,7 +56,7 @@ func TestFxVerifyMintOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -92,7 +92,7 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -126,7 +126,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -157,7 +157,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -189,7 +189,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -220,7 +220,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -258,7 +258,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -296,7 +296,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -346,7 +346,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -387,7 +387,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -435,7 +435,7 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -486,7 +486,7 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -537,7 +537,7 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -589,7 +589,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -625,7 +625,7 @@ func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) diff --git a/vms/platformvm/api/static_service.go b/vms/platformvm/api/static_service.go index 7b3b5232281d..d2374d74133a 100644 --- a/vms/platformvm/api/static_service.go +++ b/vms/platformvm/api/static_service.go @@ -196,7 +196,7 @@ type BuildGenesisReply struct { Encoding formatting.Encoding `json:"encoding"` } -// beck32ToID takes bech32 address and produces a shortID +// bech32ToID takes bech32 address and produces a shortID func bech32ToID(addrStr string) (ids.ShortID, error) { _, addrBytes, err := address.ParseBech32(addrStr) if err != nil { diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index 77f39fbd0296..a445bb52cb1b 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -171,7 +171,7 @@ func (b *builder) durationToSleep() (time.Duration, error) { return 0, fmt.Errorf("%w: %s", errMissingPreferredState, preferredID) } - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(preferredState) if err != nil { return 0, fmt.Errorf("%w of %s: %w", errCalculatingNextStakerTime, preferredID, err) } @@ -216,7 +216,7 @@ func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { return nil, fmt.Errorf("%w: %s", state.ErrMissingParentState, preferredID) } - timestamp, timeWasCapped, err := txexecutor.NextBlockTime(preferredState, b.txExecutorBackend.Clk) + timestamp, timeWasCapped, err := state.NextBlockTime(preferredState, b.txExecutorBackend.Clk) if err != nil { return nil, fmt.Errorf("could not calculate next staker change time: %w", err) } @@ -263,6 +263,19 @@ func buildBlock( forceAdvanceTime bool, parentState state.Chain, ) (block.Block, error) { + blockTxs, err := packBlockTxs( + parentID, + parentState, + builder.Mempool, + builder.txExecutorBackend, + builder.blkManager, + timestamp, + targetBlockSize, + ) + if err != nil { + return nil, fmt.Errorf("failed to pack block txs: %w", err) + } + // Try rewarding stakers whose staking period ends at the new chain time. // This is done first to prioritize advancing the timestamp as quickly as // possible. @@ -276,23 +289,6 @@ func buildBlock( return nil, fmt.Errorf("could not build tx to reward staker: %w", err) } - var blockTxs []*txs.Tx - // TODO: Cleanup post-Durango - if builder.txExecutorBackend.Config.IsDurangoActivated(timestamp) { - blockTxs, err = packBlockTxs( - parentID, - parentState, - builder.Mempool, - builder.txExecutorBackend, - builder.blkManager, - timestamp, - targetBlockSize, - ) - if err != nil { - return nil, fmt.Errorf("failed to pack block txs: %w", err) - } - } - return block.NewBanffProposalBlock( timestamp, parentID, @@ -302,19 +298,6 @@ func buildBlock( ) } - blockTxs, err := packBlockTxs( - parentID, - parentState, - builder.Mempool, - builder.txExecutorBackend, - builder.blkManager, - timestamp, - targetBlockSize, - ) - if err != nil { - return nil, fmt.Errorf("failed to pack block txs: %w", err) - } - // If there is no reason to build a block, don't. if len(blockTxs) == 0 && !forceAdvanceTime { builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index e3486f96dca2..eeddc5bc7f9b 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/block" @@ -24,9 +23,12 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestBuildBlockBasic(t *testing.T) { @@ -37,22 +39,22 @@ func TestBuildBlockBasic(t *testing.T) { defer env.ctx.Lock.Unlock() // Create a valid transaction - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) txID := tx.ID() // Issue the transaction env.ctx.Lock.Unlock() - require.NoError(env.network.IssueTx(context.Background(), tx)) + require.NoError(env.network.IssueTxFromRPC(tx)) env.ctx.Lock.Lock() _, ok := env.mempool.Get(txID) require.True(ok) @@ -109,24 +111,41 @@ func TestBuildBlockShouldReward(t *testing.T) { require.NoError(err) // Create a valid [AddPermissionlessValidatorTx] - tx, err := env.txBuilder.NewAddPermissionlessValidatorTx( - defaultValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, + builder, txSigner := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - preFundedKeys[0].PublicKey().Address(), + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - preFundedKeys[0].PublicKey().Address(), - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) txID := tx.ID() // Issue the transaction env.ctx.Lock.Unlock() - require.NoError(env.network.IssueTx(context.Background(), tx)) + require.NoError(env.network.IssueTxFromRPC(tx)) env.ctx.Lock.Lock() _, ok := env.mempool.Get(txID) require.True(ok) @@ -232,22 +251,22 @@ func TestBuildBlockForceAdvanceTime(t *testing.T) { defer env.ctx.Lock.Unlock() // Create a valid transaction - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) txID := tx.ID() // Issue the transaction env.ctx.Lock.Unlock() - require.NoError(env.network.IssueTx(context.Background(), tx)) + require.NoError(env.network.IssueTxFromRPC(tx)) env.ctx.Lock.Lock() _, ok := env.mempool.Get(txID) require.True(ok) @@ -279,112 +298,6 @@ func TestBuildBlockForceAdvanceTime(t *testing.T) { require.Equal(nextTime.Unix(), standardBlk.Timestamp().Unix()) } -func TestBuildBlockDropExpiredStakerTxs(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t, latestFork) - env.ctx.Lock.Lock() - defer env.ctx.Lock.Unlock() - - // The [StartTime] in a staker tx is only validated pre-Durango. - // TODO: Delete this test post-Durango activation. - env.config.DurangoTime = mockable.MaxTime - - var ( - now = env.backend.Clk.Time() - defaultValidatorStake = 100 * units.MilliAvax - - // Add a validator with StartTime in the future within [MaxFutureStartTime] - validatorStartTime = now.Add(txexecutor.MaxFutureStartTime - 1*time.Second) - validatorEndTime = validatorStartTime.Add(360 * 24 * time.Hour) - ) - - tx1, err := env.txBuilder.NewAddValidatorTx( - defaultValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - ids.GenerateTestNodeID(), - preFundedKeys[0].PublicKey().Address(), - reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - preFundedKeys[0].PublicKey().Address(), - nil, - ) - require.NoError(err) - require.NoError(env.mempool.Add(tx1)) - tx1ID := tx1.ID() - _, ok := env.mempool.Get(tx1ID) - require.True(ok) - - // Add a validator with StartTime before current chain time - validator2StartTime := now.Add(-5 * time.Second) - validator2EndTime := validator2StartTime.Add(360 * 24 * time.Hour) - - tx2, err := env.txBuilder.NewAddValidatorTx( - defaultValidatorStake, - uint64(validator2StartTime.Unix()), - uint64(validator2EndTime.Unix()), - ids.GenerateTestNodeID(), - preFundedKeys[1].PublicKey().Address(), - reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[1]}, - preFundedKeys[1].PublicKey().Address(), - nil, - ) - require.NoError(err) - require.NoError(env.mempool.Add(tx2)) - tx2ID := tx2.ID() - _, ok = env.mempool.Get(tx2ID) - require.True(ok) - - // Add a validator with StartTime in the future past [MaxFutureStartTime] - validator3StartTime := now.Add(txexecutor.MaxFutureStartTime + 5*time.Second) - validator3EndTime := validator2StartTime.Add(360 * 24 * time.Hour) - - tx3, err := env.txBuilder.NewAddValidatorTx( - defaultValidatorStake, - uint64(validator3StartTime.Unix()), - uint64(validator3EndTime.Unix()), - ids.GenerateTestNodeID(), - preFundedKeys[2].PublicKey().Address(), - reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[2]}, - preFundedKeys[2].PublicKey().Address(), - nil, - ) - require.NoError(err) - require.NoError(env.mempool.Add(tx3)) - tx3ID := tx3.ID() - _, ok = env.mempool.Get(tx3ID) - require.True(ok) - - // Only tx1 should be in a built block - blkIntf, err := env.Builder.BuildBlock(context.Background()) - require.NoError(err) - - require.IsType(&blockexecutor.Block{}, blkIntf) - blk := blkIntf.(*blockexecutor.Block) - require.Len(blk.Txs(), 1) - require.Equal(tx1ID, blk.Txs()[0].ID()) - - // Mempool should have none of the txs - _, ok = env.mempool.Get(tx1ID) - require.False(ok) - _, ok = env.mempool.Get(tx2ID) - require.False(ok) - _, ok = env.mempool.Get(tx3ID) - require.False(ok) - - // Only tx2 and tx3 should be dropped - require.NoError(env.mempool.GetDropReason(tx1ID)) - - tx2DropReason := env.mempool.GetDropReason(tx2ID) - require.ErrorIs(tx2DropReason, txexecutor.ErrTimestampNotBeforeStartTime) - - tx3DropReason := env.mempool.GetDropReason(tx3ID) - require.ErrorIs(tx3DropReason, txexecutor.ErrFutureStakeTime) -} - func TestBuildBlockInvalidStakingDurations(t *testing.T) { require := require.New(t) @@ -394,7 +307,7 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { // Post-Durango, [StartTime] is no longer validated. Staking durations are // based on the current chain timestamp and must be validated. - env.config.DurangoTime = time.Time{} + env.config.UpgradeConfig.DurangoTime = time.Time{} var ( now = env.backend.Clk.Time() @@ -407,19 +320,36 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - tx1, err := env.txBuilder.NewAddPermissionlessValidatorTx( - defaultValidatorStake, - uint64(now.Unix()), - uint64(validatorEndTime.Unix()), - ids.GenerateTestNodeID(), + builder1, signer1 := env.factory.NewWallet(preFundedKeys[0]) + utx1, err := builder1.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(now.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - preFundedKeys[0].PublicKey().Address(), + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - preFundedKeys[0].PublicKey().Address(), - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }), ) require.NoError(err) + tx1, err := walletsigner.SignUnsigned(context.Background(), signer1, utx1) + require.NoError(err) require.NoError(env.mempool.Add(tx1)) tx1ID := tx1.ID() _, ok := env.mempool.Get(tx1ID) @@ -431,19 +361,36 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { sk, err = bls.NewSecretKey() require.NoError(err) - tx2, err := env.txBuilder.NewAddPermissionlessValidatorTx( - defaultValidatorStake, - uint64(now.Unix()), - uint64(validator2EndTime.Unix()), - ids.GenerateTestNodeID(), + builder2, signer2 := env.factory.NewWallet(preFundedKeys[2]) + utx2, err := builder2.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(now.Unix()), + End: uint64(validator2EndTime.Unix()), + Wght: defaultValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - preFundedKeys[2].PublicKey().Address(), + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[2].PublicKey().Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[2].PublicKey().Address()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[2]}, - preFundedKeys[2].PublicKey().Address(), - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[2].PublicKey().Address()}, + }), ) require.NoError(err) + tx2, err := walletsigner.SignUnsigned(context.Background(), signer2, utx2) + require.NoError(err) require.NoError(env.mempool.Add(tx2)) tx2ID := tx2.ID() _, ok = env.mempool.Get(tx2ID) @@ -479,17 +426,17 @@ func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { defer env.ctx.Lock.Unlock() // Create a valid transaction - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) txID := tx.ID() // Transaction should not be marked as dropped before being added to the @@ -504,7 +451,7 @@ func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { // Issue the transaction env.ctx.Lock.Unlock() - err = env.network.IssueTx(context.Background(), tx) + err = env.network.IssueTxFromRPC(tx) require.ErrorIs(err, errTestingDropped) env.ctx.Lock.Lock() _, ok := env.mempool.Get(txID) diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index 9190f01e2c6b..eb80a9ffc4d3 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -5,7 +5,6 @@ package builder import ( "context" - "fmt" "testing" "time" @@ -35,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -45,14 +43,18 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) const ( @@ -64,6 +66,7 @@ const ( banff cortina durango + eUpgrade latestFork = durango ) @@ -103,7 +106,7 @@ type environment struct { Builder blkManager blockexecutor.Manager mempool mempool.Mempool - network network.Network + network *network.Network sender *common.SenderTest isBootstrapped *utils.Atomic[bool] @@ -114,10 +117,9 @@ type environment struct { msm *mutableSharedMemory fx fx.Fx state state.State - atomicUTXOs avax.AtomicUTXOManager uptimes uptime.Manager - utxosHandler utxo.Handler - txBuilder txbuilder.Builder + utxosVerifier utxo.Verifier + factory *txstest.WalletFactory backend txexecutor.Backend } @@ -149,19 +151,9 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam rewardsCalc := reward.NewCalculator(res.config.RewardConfig) res.state = defaultState(t, res.config, res.ctx, res.baseDB, rewardsCalc) - res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) res.uptimes = uptime.NewManager(res.state, res.clk) - res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) - - res.txBuilder = txbuilder.New( - res.ctx, - res.config, - res.clk, - res.fx, - res.state, - res.atomicUTXOs, - res.utxosHandler, - ) + res.utxosVerifier = utxo.NewVerifier(res.ctx, res.clk, res.fx) + res.factory = txstest.NewWalletFactory(res.ctx, res.config, res.state) genesisID := res.state.GetLastAccepted() res.backend = txexecutor.Backend{ @@ -170,18 +162,18 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam Clk: res.clk, Bootstrapped: res.isBootstrapped, Fx: res.fx, - FlowChecker: res.utxosHandler, + FlowChecker: res.utxosVerifier, Uptimes: res.uptimes, Rewards: rewardsCalc, } registerer := prometheus.NewRegistry() res.sender = &common.SenderTest{T: t} - res.sender.SendAppGossipF = func(context.Context, []byte) error { + res.sender.SendAppGossipF = func(context.Context, common.SendConfig, []byte) error { return nil } - metrics, err := metrics.New("", registerer) + metrics, err := metrics.New(registerer) require.NoError(err) res.mempool, err = mempool.New("mempool", registerer, nil) @@ -244,22 +236,25 @@ func newEnvironment(t *testing.T, f fork) *environment { //nolint:unparam func addSubnet(t *testing.T, env *environment) { require := require.New(t) - // Create a subnet - var err error - testSubnet1, err = env.txBuilder.NewCreateSubnetTx( - 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet - []ids.ShortID{ // control keys - preFundedKeys[0].PublicKey().Address(), - preFundedKeys[1].PublicKey().Address(), - preFundedKeys[2].PublicKey().Address(), + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + preFundedKeys[0].PublicKey().Address(), + preFundedKeys[1].PublicKey().Address(), + preFundedKeys[2].PublicKey().Address(), + }, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - preFundedKeys[0].PublicKey().Address(), - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }), ) require.NoError(err) + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) - // store it genesisID := env.state.GetLastAccepted() stateDiff, err := state.NewDiff(genesisID, env.blkManager) require.NoError(err) @@ -305,57 +300,59 @@ func defaultState( } func defaultConfig(t *testing.T, f fork) *config.Config { - var ( - apricotPhase3Time = mockable.MaxTime - apricotPhase5Time = mockable.MaxTime - banffTime = mockable.MaxTime - cortinaTime = mockable.MaxTime - durangoTime = mockable.MaxTime - ) + c := &config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + Validators: validators.NewManager(), + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: reward.Config{ + MaxConsumptionRate: .12 * reward.PercentDenominator, + MinConsumptionRate: .10 * reward.PercentDenominator, + MintingPeriod: 365 * 24 * time.Hour, + SupplyCap: 720 * units.MegaAvax, + }, + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: mockable.MaxTime, + ApricotPhase5Time: mockable.MaxTime, + BanffTime: mockable.MaxTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, + EUpgradeTime: mockable.MaxTime, + }, + } switch f { + case eUpgrade: + c.UpgradeConfig.EUpgradeTime = time.Time{} // neglecting fork ordering this for package tests + fallthrough case durango: - durangoTime = time.Time{} // neglecting fork ordering for this package's tests + c.UpgradeConfig.DurangoTime = time.Time{} // neglecting fork ordering for this package's tests fallthrough case cortina: - cortinaTime = time.Time{} // neglecting fork ordering for this package's tests + c.UpgradeConfig.CortinaTime = time.Time{} // neglecting fork ordering for this package's tests fallthrough case banff: - banffTime = time.Time{} // neglecting fork ordering for this package's tests + c.UpgradeConfig.BanffTime = time.Time{} // neglecting fork ordering for this package's tests fallthrough case apricotPhase5: - apricotPhase5Time = defaultValidateEndTime + c.UpgradeConfig.ApricotPhase5Time = defaultValidateEndTime fallthrough case apricotPhase3: - apricotPhase3Time = defaultValidateEndTime + c.UpgradeConfig.ApricotPhase3Time = defaultValidateEndTime default: - require.NoError(t, fmt.Errorf("unhandled fork %d", f)) + require.FailNow(t, "unhandled fork", f) } - return &config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: reward.Config{ - MaxConsumptionRate: .12 * reward.PercentDenominator, - MinConsumptionRate: .10 * reward.PercentDenominator, - MintingPeriod: 365 * 24 * time.Hour, - SupplyCap: 720 * units.MegaAvax, - }, - ApricotPhase3Time: apricotPhase3Time, - ApricotPhase5Time: apricotPhase5Time, - BanffTime: banffTime, - CortinaTime: cortinaTime, - DurangoTime: durangoTime, - } + return c } func defaultClock() *mockable.Clock { @@ -387,7 +384,7 @@ func defaultFx(t *testing.T, clk *mockable.Clock, log logging.Logger, isBootstra require := require.New(t) fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(time.Time{}), + registry: linearcodec.NewDefault(), clk: clk, log: log, } diff --git a/vms/platformvm/block/builder/standard_block_test.go b/vms/platformvm/block/builder/standard_block_test.go index fa1a07fb3f0e..18ca5bdb3582 100644 --- a/vms/platformvm/block/builder/standard_block_test.go +++ b/vms/platformvm/block/builder/standard_block_test.go @@ -12,11 +12,12 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestAtomicTxImports(t *testing.T) { @@ -62,14 +63,17 @@ func TestAtomicTxImports(t *testing.T) { }}}, })) - tx, err := env.txBuilder.NewImportTx( + builder, signer := env.factory.NewWallet(recipientKey) + utx, err := builder.NewImportTx( env.ctx.XChainID, - recipientKey.PublicKey().Address(), - []*secp256k1.PrivateKey{recipientKey}, - ids.ShortEmpty, // change addr - nil, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) require.NoError(env.Builder.Add(tx)) b, err := env.Builder.BuildBlock(context.Background()) diff --git a/vms/platformvm/block/codec.go b/vms/platformvm/block/codec.go index 33babbaf3a79..f0f66a414811 100644 --- a/vms/platformvm/block/codec.go +++ b/vms/platformvm/block/codec.go @@ -5,7 +5,6 @@ package block import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -26,13 +25,9 @@ var ( Codec codec.Manager ) -// TODO: Remove after v1.11.x has activated -// -// Invariant: InitCodec, Codec, and GenesisCodec must not be accessed -// concurrently -func InitCodec(durangoTime time.Time) error { - c := linearcodec.NewDefault(durangoTime) - gc := linearcodec.NewDefault(time.Time{}) +func init() { + c := linearcodec.NewDefault() + gc := linearcodec.NewDefault() errs := wrappers.Errs{} for _, c := range []linearcodec.Codec{c, gc} { @@ -44,24 +39,14 @@ func InitCodec(durangoTime time.Time) error { ) } - newCodec := codec.NewDefaultManager() - newGenesisCodec := codec.NewManager(math.MaxInt32) + Codec = codec.NewDefaultManager() + GenesisCodec = codec.NewManager(math.MaxInt32) errs.Add( - newCodec.RegisterCodec(CodecVersion, c), - newGenesisCodec.RegisterCodec(CodecVersion, gc), + Codec.RegisterCodec(CodecVersion, c), + GenesisCodec.RegisterCodec(CodecVersion, gc), ) if errs.Errored() { - return errs.Err - } - - Codec = newCodec - GenesisCodec = newGenesisCodec - return nil -} - -func init() { - if err := InitCodec(time.Time{}); err != nil { - panic(err) + panic(errs.Err) } } diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 825625b7c77e..a8c276579c08 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "fmt" "testing" "time" @@ -35,7 +36,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -45,12 +45,16 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - p_tx_builder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) const ( @@ -65,6 +69,7 @@ const ( banff cortina durango + eUpgrade ) var ( @@ -125,10 +130,9 @@ type environment struct { fx fx.Fx state state.State mockedState *state.MockState - atomicUTXOs avax.AtomicUTXOManager uptimes uptime.Manager - utxosHandler utxo.Handler - txBuilder p_tx_builder.Builder + utxosVerifier utxo.Verifier + factory *txstest.WalletFactory backend *executor.Backend } @@ -151,34 +155,25 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) - res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) if ctrl == nil { res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) res.uptimes = uptime.NewManager(res.state, res.clk) - res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) - res.txBuilder = p_tx_builder.New( + res.utxosVerifier = utxo.NewVerifier(res.ctx, res.clk, res.fx) + res.factory = txstest.NewWalletFactory( res.ctx, res.config, - res.clk, - res.fx, res.state, - res.atomicUTXOs, - res.utxosHandler, ) } else { genesisBlkID = ids.GenerateTestID() res.mockedState = state.NewMockState(ctrl) res.uptimes = uptime.NewManager(res.mockedState, res.clk) - res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) - res.txBuilder = p_tx_builder.New( + res.utxosVerifier = utxo.NewVerifier(res.ctx, res.clk, res.fx) + res.factory = txstest.NewWalletFactory( res.ctx, res.config, - res.clk, - res.fx, res.mockedState, - res.atomicUTXOs, - res.utxosHandler, ) // setup expectations strictly needed for environment creation @@ -191,7 +186,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment Clk: res.clk, Bootstrapped: res.isBootstrapped, Fx: res.fx, - FlowChecker: res.utxosHandler, + FlowChecker: res.utxosVerifier, Uptimes: res.uptimes, Rewards: rewardsCalc, } @@ -257,24 +252,29 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller, f fork) *environment } func addSubnet(env *environment) { - // Create a subnet - var err error - testSubnet1, err = env.txBuilder.NewCreateSubnetTx( - 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet - []ids.ShortID{ // control keys - preFundedKeys[0].PublicKey().Address(), - preFundedKeys[1].PublicKey().Address(), - preFundedKeys[2].PublicKey().Address(), + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + preFundedKeys[0].PublicKey().Address(), + preFundedKeys[1].PublicKey().Address(), + preFundedKeys[2].PublicKey().Address(), + }, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - preFundedKeys[0].PublicKey().Address(), - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }), ) if err != nil { panic(err) } + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + panic(err) + } - // store it genesisID := env.state.GetLastAccepted() stateDiff, err := state.NewDiff(genesisID, env.blkManager) if err != nil { @@ -329,57 +329,59 @@ func defaultState( } func defaultConfig(t *testing.T, f fork) *config.Config { - var ( - apricotPhase3Time = mockable.MaxTime - apricotPhase5Time = mockable.MaxTime - banffTime = mockable.MaxTime - cortinaTime = mockable.MaxTime - durangoTime = mockable.MaxTime - ) + c := &config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + Validators: validators.NewManager(), + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: reward.Config{ + MaxConsumptionRate: .12 * reward.PercentDenominator, + MinConsumptionRate: .10 * reward.PercentDenominator, + MintingPeriod: 365 * 24 * time.Hour, + SupplyCap: 720 * units.MegaAvax, + }, + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: mockable.MaxTime, + ApricotPhase5Time: mockable.MaxTime, + BanffTime: mockable.MaxTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, + EUpgradeTime: mockable.MaxTime, + }, + } switch f { + case eUpgrade: + c.UpgradeConfig.EUpgradeTime = time.Time{} // neglecting fork ordering this for package tests + fallthrough case durango: - durangoTime = time.Time{} // neglecting fork ordering for this package's tests + c.UpgradeConfig.DurangoTime = time.Time{} // neglecting fork ordering for this package's tests fallthrough case cortina: - cortinaTime = time.Time{} // neglecting fork ordering for this package's tests + c.UpgradeConfig.CortinaTime = time.Time{} // neglecting fork ordering for this package's tests fallthrough case banff: - banffTime = time.Time{} // neglecting fork ordering for this package's tests + c.UpgradeConfig.BanffTime = time.Time{} // neglecting fork ordering for this package's tests fallthrough case apricotPhase5: - apricotPhase5Time = defaultValidateEndTime + c.UpgradeConfig.ApricotPhase5Time = defaultValidateEndTime fallthrough case apricotPhase3: - apricotPhase3Time = defaultValidateEndTime + c.UpgradeConfig.ApricotPhase3Time = defaultValidateEndTime default: - require.NoError(t, fmt.Errorf("unhandled fork %d", f)) + require.FailNow(t, "unhandled fork", f) } - return &config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: reward.Config{ - MaxConsumptionRate: .12 * reward.PercentDenominator, - MinConsumptionRate: .10 * reward.PercentDenominator, - MintingPeriod: 365 * 24 * time.Hour, - SupplyCap: 720 * units.MegaAvax, - }, - ApricotPhase3Time: apricotPhase3Time, - ApricotPhase5Time: apricotPhase5Time, - BanffTime: banffTime, - CortinaTime: cortinaTime, - DurangoTime: durangoTime, - } + return c } func defaultClock() *mockable.Clock { @@ -408,7 +410,7 @@ func (fvi *fxVMInt) Logger() logging.Logger { func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(time.Time{}), + registry: linearcodec.NewDefault(), clk: clk, log: log, } @@ -495,20 +497,27 @@ func addPendingValidator( rewardAddress ids.ShortID, keys []*secp256k1.PrivateKey, ) (*txs.Tx, error) { - addPendingValidatorTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - rewardAddress, + builder, signer := env.factory.NewWallet(keys...) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, reward.PercentDenominator, - keys, - ids.ShortEmpty, - nil, ) if err != nil { return nil, err } + addPendingValidatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, err + } staker, err := state.NewPendingStaker( addPendingValidatorTx.ID(), diff --git a/vms/platformvm/block/executor/manager.go b/vms/platformvm/block/executor/manager.go index 27d35a7641ad..80e3e4503139 100644 --- a/vms/platformvm/block/executor/manager.go +++ b/vms/platformvm/block/executor/manager.go @@ -132,7 +132,7 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { return err } - nextBlkTime, _, err := executor.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) + nextBlkTime, _, err := state.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) if err != nil { return err } @@ -142,19 +142,11 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { return err } - err = tx.Unsigned.Visit(&executor.StandardTxExecutor{ + return tx.Unsigned.Visit(&executor.StandardTxExecutor{ Backend: m.txExecutorBackend, State: stateDiff, Tx: tx, }) - // We ignore [errFutureStakeTime] here because the time will be advanced - // when this transaction is issued. - // - // TODO: Remove this check post-Durango. - if errors.Is(err, executor.ErrFutureStakeTime) { - return nil - } - return err } func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 984c8081ae4f..f0037754d06a 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -28,6 +28,9 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) func TestApricotProposalBlockTimeVerification(t *testing.T) { @@ -334,23 +337,6 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.ErrorIs(err, executor.ErrAdvanceTimeTxIssuedAfterBanff) } - { - // include too many transactions - statelessProposalBlock, err := block.NewBanffProposalBlock( - nextStakerTime, - parentID, - banffParentBlk.Height()+1, - blkTx, - []*txs.Tx{}, - ) - require.NoError(err) - - statelessProposalBlock.Transactions = []*txs.Tx{blkTx} - block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) - require.ErrorIs(err, errBanffProposalBlockWithMultipleTransactions) - } - { // valid statelessProposalBlock, err := block.NewBanffProposalBlock( @@ -383,7 +369,8 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // The order in which they do it is asserted; the order may depend on the staker.TxID, // which in turns depend on every feature of the transaction creating the staker. // So in this test we avoid ids.GenerateTestNodeID, in favour of ids.BuildTestNodeID - // so that TxID does not depend on the order we run tests. + // so that TxID does not depend on the order we run tests. We also explicitly declare + // the change address, to avoid picking a random one in case multiple funding keys are set. staker0 := staker{ nodeID: ids.BuildTestNodeID([]byte{0xf0}), rewardAddress: ids.ShortID{0xf0}, @@ -551,18 +538,27 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { env.config.TrackedSubnets.Add(subnetID) for _, staker := range test.stakers { - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(staker.startTime.Unix()), - uint64(staker.endTime.Unix()), - staker.nodeID, - staker.rewardAddress, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: staker.nodeID, + Start: uint64(staker.startTime.Unix()), + End: uint64(staker.endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{staker.rewardAddress}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -576,17 +572,25 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { } for _, subStaker := range test.subnetStakers { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 10, // Weight - uint64(subStaker.startTime.Unix()), - uint64(subStaker.endTime.Unix()), - subStaker.nodeID, // validator ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subStaker.nodeID, + Start: uint64(subStaker.startTime.Unix()), + End: uint64(subStaker.endTime.Unix()), + Wght: 10, + }, + Subnet: subnetID, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) subnetStaker, err := state.NewPendingStaker( tx.ID(), @@ -605,18 +609,27 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // add Staker0 (with the right end time) to state // so to allow proposalBlk issuance staker0.endTime = newTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0.startTime.Unix()), - uint64(staker0.endTime.Unix()), - staker0.nodeID, - staker0.rewardAddress, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: staker0.nodeID, + Start: uint64(staker0.startTime.Unix()), + End: uint64(staker0.endTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{staker0.rewardAddress}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }), ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -706,17 +719,21 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1StartTime.Unix()), // Start time - uint64(subnetVdr1EndTime.Unix()), // end time - subnetValidatorNodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetValidatorNodeID, + Start: uint64(subnetVdr1StartTime.Unix()), + End: uint64(subnetVdr1EndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( @@ -735,17 +752,20 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // Queue a staker that joins the staker set after the above validator leaves subnetVdr2NodeID := genesisNodeIDs[1] - tx, err = env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time - uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), // end time - subnetVdr2NodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + utx, err = builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetVdr2NodeID, + Start: uint64(subnetVdr1EndTime.Add(time.Second).Unix()), + End: uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -766,18 +786,26 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { // so to allow proposalBlk issuance staker0StartTime := defaultValidateStartTime staker0EndTime := subnetVdr1EndTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0StartTime.Unix()), - uint64(staker0EndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), + uVdrTx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(staker0StartTime.Unix()), + End: uint64(staker0EndTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }), ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -850,17 +878,22 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1StartTime.Unix()), // Start time - uint64(subnetVdr1EndTime.Unix()), // end time - subnetValidatorNodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetValidatorNodeID, + Start: uint64(subnetVdr1StartTime.Unix()), + End: uint64(subnetVdr1EndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -879,18 +912,23 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { // so to allow proposalBlk issuance staker0StartTime := defaultGenesisTime staker0EndTime := subnetVdr1StartTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0StartTime.Unix()), - uint64(staker0EndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), + + uVdrTx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(staker0StartTime.Unix()), + End: uint64(staker0EndTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -965,18 +1003,23 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { // just to allow proposalBlk issuance (with a reward Tx) staker0StartTime := defaultGenesisTime staker0EndTime := pendingValidatorStartTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0StartTime.Unix()), - uint64(staker0EndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(staker0StartTime.Unix()), + End: uint64(staker0EndTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1031,21 +1074,22 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(1 * time.Second) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - uint64(pendingDelegatorStartTime.Unix()), - uint64(pendingDelegatorEndTime.Unix()), - nodeID, - preFundedKeys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], + builder, signer = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(pendingDelegatorStartTime.Unix()), + End: uint64(pendingDelegatorEndTime.Unix()), + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - ids.ShortEmpty, - nil, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) staker, err = state.NewPendingStaker( addDelegatorTx.ID(), @@ -1061,18 +1105,23 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { // add Staker0 (with the right end time) to state // so to allow proposalBlk issuance staker0EndTime = pendingDelegatorStartTime - addStaker0, err = env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0StartTime.Unix()), - uint64(staker0EndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), + builder, signer = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err = builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(staker0StartTime.Unix()), + End: uint64(staker0EndTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addStaker0, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // store Staker0 to state addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1151,18 +1200,23 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // so to allow proposalBlk issuance staker0StartTime := defaultGenesisTime staker0EndTime := pendingValidatorStartTime - addStaker0, err := env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0StartTime.Unix()), - uint64(staker0EndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), + builder, txSigner := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(staker0StartTime.Unix()), + End: uint64(staker0EndTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addStaker0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // store Staker0 to state addValTx := addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1216,21 +1270,22 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // Add delegator pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(defaultMinStakingDuration) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - uint64(pendingDelegatorStartTime.Unix()), - uint64(pendingDelegatorEndTime.Unix()), - nodeID, - preFundedKeys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], + builder, txSigner = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(pendingDelegatorStartTime.Unix()), + End: uint64(pendingDelegatorEndTime.Unix()), + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - ids.ShortEmpty, - nil, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx) + require.NoError(err) staker, err = state.NewPendingStaker( addDelegatorTx.ID(), @@ -1246,18 +1301,23 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // add Staker0 (with the right end time) to state // so to allow proposalBlk issuance staker0EndTime = pendingDelegatorStartTime - addStaker0, err = env.txBuilder.NewAddValidatorTx( - 10, - uint64(staker0StartTime.Unix()), - uint64(staker0EndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), + builder, txSigner = env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err = builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(staker0StartTime.Unix()), + End: uint64(staker0EndTime.Unix()), + Wght: 10, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addStaker0, err = walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // store Staker0 to state addValTx = addStaker0.Unsigned.(*txs.AddValidatorTx) @@ -1325,23 +1385,32 @@ func TestAddValidatorProposalBlock(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - addValidatorTx, err := env.txBuilder.NewAddPermissionlessValidatorTx( - env.config.MinValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, + builder, txSigner := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - preFundedKeys[0].PublicKey().Address(), - 10000, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - ids.ShortEmpty, - nil, + 10000, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Add validator through a [StandardBlock] preferredID := env.blkManager.Preferred() @@ -1367,7 +1436,7 @@ func TestAddValidatorProposalBlock(t *testing.T) { // Advance time until next staker change time is [validatorEndTime] for { - nextStakerChangeTime, err := executor.GetNextStakerChangeTime(env.state) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(env.state) require.NoError(err) if nextStakerChangeTime.Equal(validatorEndTime) { break @@ -1401,23 +1470,31 @@ func TestAddValidatorProposalBlock(t *testing.T) { sk, err = bls.NewSecretKey() require.NoError(err) - addValidatorTx2, err := env.txBuilder.NewAddPermissionlessValidatorTx( - env.config.MinValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, + utx2, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - preFundedKeys[0].PublicKey().Address(), - 10000, - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - ids.ShortEmpty, - nil, + 10000, ) require.NoError(err) + addValidatorTx2, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx2) + require.NoError(err) // Add validator through a [ProposalBlock] and reward the last one preferredID = env.blkManager.Preferred() diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index b77846351bcf..af8d469c48c3 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -23,6 +23,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestApricotStandardBlockTimeVerification(t *testing.T) { @@ -143,7 +145,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ID: avaxAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: env.config.CreateSubnetTxFee, + Amt: env.config.StaticFeeConfig.CreateSubnetTxFee, }, } utxoID := utxo.InputID() @@ -158,7 +160,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { UTXOID: utxo.UTXOID, Asset: utxo.Asset, In: &secp256k1fx.TransferInput{ - Amt: env.config.CreateSubnetTxFee, + Amt: env.config.StaticFeeConfig.CreateSubnetTxFee, }, }}, }}, @@ -508,17 +510,21 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { } for _, staker := range test.subnetStakers { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 10, // Weight - uint64(staker.startTime.Unix()), - uint64(staker.endTime.Unix()), - staker.nodeID, // validator ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: staker.nodeID, + Start: uint64(staker.startTime.Unix()), + End: uint64(staker.endTime.Unix()), + Wght: 10, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -598,17 +604,21 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1StartTime.Unix()), // Start time - uint64(subnetVdr1EndTime.Unix()), // end time - subnetValidatorNodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetValidatorNodeID, + Start: uint64(subnetVdr1StartTime.Unix()), + End: uint64(subnetVdr1EndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( @@ -627,17 +637,20 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { // Queue a staker that joins the staker set after the above validator leaves subnetVdr2NodeID := genesisNodeIDs[1] - tx, err = env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time - uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), // end time - subnetVdr2NodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + utx, err = builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetVdr2NodeID, + Start: uint64(subnetVdr1EndTime.Add(time.Second).Unix()), + End: uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -697,17 +710,21 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultGenesisTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultGenesisTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1StartTime.Unix()), // Start time - uint64(subnetVdr1EndTime.Unix()), // end time - subnetValidatorNodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetValidatorNodeID, + Start: uint64(subnetVdr1StartTime.Unix()), + End: uint64(subnetVdr1EndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -788,21 +805,22 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(1 * time.Second) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - uint64(pendingDelegatorStartTime.Unix()), - uint64(pendingDelegatorEndTime.Unix()), - nodeID, - preFundedKeys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(pendingDelegatorStartTime.Unix()), + End: uint64(pendingDelegatorEndTime.Unix()), + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - ids.ShortEmpty, - nil, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( addDelegatorTx.ID(), diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index b35d2ecdd55c..3feccf4e4c96 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -22,12 +22,11 @@ var ( ErrConflictingBlockTxs = errors.New("block contains conflicting transactions") - errApricotBlockIssuedAfterFork = errors.New("apricot block issued after fork") - errBanffProposalBlockWithMultipleTransactions = errors.New("BanffProposalBlock contains multiple transactions") - errBanffStandardBlockWithoutChanges = errors.New("BanffStandardBlock performs no state changes") - errIncorrectBlockHeight = errors.New("incorrect block height") - errChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") - errOptionBlockTimestampNotMatchingParent = errors.New("option block proposed timestamp not matching parent block one") + errApricotBlockIssuedAfterFork = errors.New("apricot block issued after fork") + errBanffStandardBlockWithoutChanges = errors.New("BanffStandardBlock performs no state changes") + errIncorrectBlockHeight = errors.New("incorrect block height") + errChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") + errOptionBlockTimestampNotMatchingParent = errors.New("option block proposed timestamp not matching parent block one") ) // verifier handles the logic for verifying a block. @@ -51,11 +50,6 @@ func (v *verifier) BanffCommitBlock(b *block.BanffCommitBlock) error { } func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { - nextChainTime := b.Timestamp() - if !v.txExecutorBackend.Config.IsDurangoActivated(nextChainTime) && len(b.Transactions) != 0 { - return errBanffProposalBlockWithMultipleTransactions - } - if err := v.banffNonOptionBlock(b); err != nil { return err } @@ -67,6 +61,7 @@ func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { } // Advance the time to [nextChainTime]. + nextChainTime := b.Timestamp() if _, err := executor.AdvanceTimeTo(v.txExecutorBackend, onDecisionState, nextChainTime); err != nil { return err } @@ -184,11 +179,11 @@ func (v *verifier) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { parentID := b.Parent() currentTimestamp := v.getTimestamp(parentID) cfg := v.txExecutorBackend.Config - if cfg.IsApricotPhase5Activated(currentTimestamp) { + if cfg.UpgradeConfig.IsApricotPhase5Activated(currentTimestamp) { return fmt.Errorf( "the chain timestamp (%d) is after the apricot phase 5 time (%d), hence atomic transactions should go through the standard block", currentTimestamp.Unix(), - cfg.ApricotPhase5Time.Unix(), + cfg.UpgradeConfig.ApricotPhase5Time.Unix(), ) } @@ -271,7 +266,7 @@ func (v *verifier) banffNonOptionBlock(b block.BanffBlock) error { ) } - nextStakerChangeTime, err := executor.GetNextStakerChangeTime(parentState) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(parentState) if err != nil { return fmt.Errorf("could not verify block timestamp: %w", err) } @@ -295,7 +290,7 @@ func (v *verifier) apricotCommonBlock(b block.Block) error { // during the verification of the ProposalBlock. parentID := b.Parent() timestamp := v.getTimestamp(parentID) - if v.txExecutorBackend.Config.IsBanffActivated(timestamp) { + if v.txExecutorBackend.Config.UpgradeConfig.IsBanffActivated(timestamp) { return fmt.Errorf("%w: timestamp = %s", errApricotBlockIssuedAfterFork, timestamp) } return v.commonBlock(b) diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index ba24fb2f1298..34d8a0d7432e 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -26,6 +26,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" ) func TestVerifierVisitProposalBlock(t *testing.T) { @@ -58,7 +59,9 @@ func TestVerifierVisitProposalBlock(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -142,8 +145,10 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - ApricotPhase5Time: time.Now().Add(time.Hour), - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + ApricotPhase5Time: time.Now().Add(time.Hour), + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -229,8 +234,10 @@ func TestVerifierVisitStandardBlock(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - ApricotPhase5Time: time.Now().Add(time.Hour), - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + ApricotPhase5Time: time.Now().Add(time.Hour), + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -334,7 +341,9 @@ func TestVerifierVisitCommitBlock(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -405,7 +414,9 @@ func TestVerifierVisitAbortBlock(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -464,7 +475,9 @@ func TestVerifyUnverifiedParent(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -536,7 +549,9 @@ func TestBanffAbortBlockTimestampChecks(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: time.Time{}, // banff is activated + UpgradeConfig: upgrade.Config{ + BanffTime: time.Time{}, // banff is activated + }, }, Clk: &mockable.Clock{}, }, @@ -632,7 +647,9 @@ func TestBanffCommitBlockTimestampChecks(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: time.Time{}, // banff is activated + UpgradeConfig: upgrade.Config{ + BanffTime: time.Time{}, // banff is activated + }, }, Clk: &mockable.Clock{}, }, @@ -711,8 +728,10 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - ApricotPhase5Time: time.Now().Add(time.Hour), - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + ApricotPhase5Time: time.Now().Add(time.Hour), + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -800,7 +819,9 @@ func TestVerifierVisitApricotStandardBlockWithProposalBlockParent(t *testing.T) verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -857,7 +878,9 @@ func TestVerifierVisitBanffStandardBlockWithProposalBlockParent(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: time.Time{}, // banff is activated + UpgradeConfig: upgrade.Config{ + BanffTime: time.Time{}, // banff is activated + }, }, Clk: &mockable.Clock{}, }, @@ -894,7 +917,9 @@ func TestVerifierVisitApricotCommitBlockUnexpectedParentState(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -937,7 +962,9 @@ func TestVerifierVisitBanffCommitBlockUnexpectedParentState(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: time.Time{}, // banff is activated + UpgradeConfig: upgrade.Config{ + BanffTime: time.Time{}, // banff is activated + }, }, Clk: &mockable.Clock{}, }, @@ -981,7 +1008,9 @@ func TestVerifierVisitApricotAbortBlockUnexpectedParentState(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: mockable.MaxTime, // banff is not activated + UpgradeConfig: upgrade.Config{ + BanffTime: mockable.MaxTime, // banff is not activated + }, }, Clk: &mockable.Clock{}, }, @@ -1024,7 +1053,9 @@ func TestVerifierVisitBanffAbortBlockUnexpectedParentState(t *testing.T) { verifier := &verifier{ txExecutorBackend: &executor.Backend{ Config: &config.Config{ - BanffTime: time.Time{}, // banff is activated + UpgradeConfig: upgrade.Config{ + BanffTime: time.Time{}, // banff is activated + }, }, Clk: &mockable.Clock{}, }, diff --git a/vms/platformvm/client.go b/vms/platformvm/client.go index 962492eda975..11453efb5f6b 100644 --- a/vms/platformvm/client.go +++ b/vms/platformvm/client.go @@ -17,8 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/avalanchego/vms/platformvm/status" - - platformapi "github.com/ava-labs/avalanchego/vms/platformvm/api" ) var _ Client = (*client)(nil) @@ -31,18 +29,10 @@ type Client interface { // // Deprecated: Keys should no longer be stored on the node. ExportKey(ctx context.Context, user api.UserPass, address ids.ShortID, options ...rpc.Option) (*secp256k1.PrivateKey, error) - // ImportKey imports the specified [privateKey] to [user]'s keystore - // - // Deprecated: Keys should no longer be stored on the node. - ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (ids.ShortID, error) // GetBalance returns the balance of [addrs] on the P Chain // // Deprecated: GetUTXOs should be used instead. GetBalance(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (*GetBalanceResponse, error) - // CreateAddress creates a new address for [user] - // - // Deprecated: Keys should no longer be stored on the node. - CreateAddress(ctx context.Context, user api.UserPass, options ...rpc.Option) (ids.ShortID, error) // ListAddresses returns an array of platform addresses controlled by [user] // // Deprecated: Keys should no longer be stored on the node. @@ -78,120 +68,10 @@ type Client interface { GetStakingAssetID(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (ids.ID, error) // GetCurrentValidators returns the list of current validators for subnet with ID [subnetID] GetCurrentValidators(ctx context.Context, subnetID ids.ID, nodeIDs []ids.NodeID, options ...rpc.Option) ([]ClientPermissionlessValidator, error) - // GetPendingValidators returns the list of pending validators for subnet with ID [subnetID] - GetPendingValidators(ctx context.Context, subnetID ids.ID, nodeIDs []ids.NodeID, options ...rpc.Option) ([]interface{}, []interface{}, error) // GetCurrentSupply returns an upper bound on the supply of AVAX in the system along with the P-chain height GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) // SampleValidators returns the nodeIDs of a sample of [sampleSize] validators from the current validator set for subnet with ID [subnetID] SampleValidators(ctx context.Context, subnetID ids.ID, sampleSize uint16, options ...rpc.Option) ([]ids.NodeID, error) - // AddValidator issues a transaction to add a validator to the primary network - // and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - AddValidator( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - rewardAddress ids.ShortID, - nodeID ids.NodeID, - stakeAmount, - startTime, - endTime uint64, - delegationFeeRate float32, - options ...rpc.Option, - ) (ids.ID, error) - // AddDelegator issues a transaction to add a delegator to the primary network - // and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - AddDelegator( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - rewardAddress ids.ShortID, - nodeID ids.NodeID, - stakeAmount, - startTime, - endTime uint64, - options ...rpc.Option, - ) (ids.ID, error) - // AddSubnetValidator issues a transaction to add validator [nodeID] to subnet - // with ID [subnetID] and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - AddSubnetValidator( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - subnetID ids.ID, - nodeID ids.NodeID, - stakeAmount, - startTime, - endTime uint64, - options ...rpc.Option, - ) (ids.ID, error) - // CreateSubnet issues a transaction to create [subnet] and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - CreateSubnet( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - controlKeys []ids.ShortID, - threshold uint32, - options ...rpc.Option, - ) (ids.ID, error) - // ExportAVAX issues an ExportTx transaction and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - ExportAVAX( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - to ids.ShortID, - toChainIDAlias string, - amount uint64, - options ...rpc.Option, - ) (ids.ID, error) - // ImportAVAX issues an ImportTx transaction and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - ImportAVAX( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - to ids.ShortID, - sourceChain string, - options ...rpc.Option, - ) (ids.ID, error) - // CreateBlockchain issues a CreateBlockchain transaction and returns the txID - // - // Deprecated: Transactions should be issued using the - // `avalanchego/wallet/chain/p.Wallet` utility. - CreateBlockchain( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - subnetID ids.ID, - vmID string, - fxIDs []string, - name string, - genesisData []byte, - options ...rpc.Option, - ) (ids.ID, error) // GetBlockchainStatus returns the current status of blockchain with ID: [blockchainID] GetBlockchainStatus(ctx context.Context, blockchainID string, options ...rpc.Option) (status.BlockchainStatus, error) // ValidatedBy returns the ID of the Subnet that validates [blockchainID] @@ -208,21 +88,10 @@ type Client interface { GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) // GetTxStatus returns the status of the transaction corresponding to [txID] GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (*GetTxStatusResponse, error) - // AwaitTxDecided polls [GetTxStatus] until a status is returned that - // implies the tx may be decided. - // TODO: Move this function off of the Client interface into a utility - // function. - AwaitTxDecided( - ctx context.Context, - txID ids.ID, - freq time.Duration, - options ...rpc.Option, - ) (*GetTxStatusResponse, error) // GetStake returns the amount of nAVAX that [addrs] have cumulatively // staked on the Primary Network. // - // Deprecated: Stake should be calculated using GetTx, GetCurrentValidators, - // and GetPendingValidators. + // Deprecated: Stake should be calculated using GetTx and GetCurrentValidators. GetStake( ctx context.Context, addrs []ids.ShortID, @@ -234,19 +103,6 @@ type Client interface { GetMinStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) // GetTotalStake returns the total amount (in nAVAX) staked on the network GetTotalStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) - // GetMaxStakeAmount returns the maximum amount of nAVAX staking to the named - // node during the time period. - // - // Deprecated: The MaxStakeAmount should be calculated using - // GetCurrentValidators, and GetPendingValidators. - GetMaxStakeAmount( - ctx context.Context, - subnetID ids.ID, - nodeID ids.NodeID, - startTime uint64, - endTime uint64, - options ...rpc.Option, - ) (uint64, error) // GetRewardUTXOs returns the reward UTXOs for a transaction // // Deprecated: GetRewardUTXOs should be fetched from a dedicated indexer. @@ -294,18 +150,6 @@ func (c *client) ExportKey(ctx context.Context, user api.UserPass, address ids.S return res.PrivateKey, err } -func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (ids.ShortID, error) { - res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "platform.importKey", &ImportKeyArgs{ - UserPass: user, - PrivateKey: privateKey, - }, res, options...) - if err != nil { - return ids.ShortID{}, err - } - return address.ParseToID(res.Address) -} - func (c *client) GetBalance(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (*GetBalanceResponse, error) { res := &GetBalanceResponse{} err := c.requester.SendRequest(ctx, "platform.getBalance", &GetBalanceRequest{ @@ -314,15 +158,6 @@ func (c *client) GetBalance(ctx context.Context, addrs []ids.ShortID, options .. return res, err } -func (c *client) CreateAddress(ctx context.Context, user api.UserPass, options ...rpc.Option) (ids.ShortID, error) { - res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "platform.createAddress", &user, res, options...) - if err != nil { - return ids.ShortID{}, err - } - return address.ParseToID(res.Address) -} - func (c *client) ListAddresses(ctx context.Context, user api.UserPass, options ...rpc.Option) ([]ids.ShortID, error) { res := &api.JSONAddresses{} err := c.requester.SendRequest(ctx, "platform.listAddresses", &user, res, options...) @@ -477,20 +312,6 @@ func (c *client) GetCurrentValidators( return getClientPermissionlessValidators(res.Validators) } -func (c *client) GetPendingValidators( - ctx context.Context, - subnetID ids.ID, - nodeIDs []ids.NodeID, - options ...rpc.Option, -) ([]interface{}, []interface{}, error) { - res := &GetPendingValidatorsReply{} - err := c.requester.SendRequest(ctx, "platform.getPendingValidators", &GetPendingValidatorsArgs{ - SubnetID: subnetID, - NodeIDs: nodeIDs, - }, res, options...) - return res.Validators, res.Delegators, err -} - func (c *client) GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) { res := &GetCurrentSupplyReply{} err := c.requester.SendRequest(ctx, "platform.getCurrentSupply", &GetCurrentSupplyArgs{ @@ -508,208 +329,6 @@ func (c *client) SampleValidators(ctx context.Context, subnetID ids.ID, sampleSi return res.Validators, err } -func (c *client) AddValidator( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - rewardAddress ids.ShortID, - nodeID ids.NodeID, - stakeAmount, - startTime, - endTime uint64, - delegationFeeRate float32, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - jsonStakeAmount := json.Uint64(stakeAmount) - err := c.requester.SendRequest(ctx, "platform.addValidator", &AddValidatorArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - Staker: platformapi.Staker{ - NodeID: nodeID, - Weight: jsonStakeAmount, - StakeAmount: &jsonStakeAmount, - StartTime: json.Uint64(startTime), - EndTime: json.Uint64(endTime), - }, - RewardAddress: rewardAddress.String(), - DelegationFeeRate: json.Float32(delegationFeeRate), - }, res, options...) - return res.TxID, err -} - -func (c *client) AddDelegator( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - rewardAddress ids.ShortID, - nodeID ids.NodeID, - stakeAmount, - startTime, - endTime uint64, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - jsonStakeAmount := json.Uint64(stakeAmount) - err := c.requester.SendRequest(ctx, "platform.addDelegator", &AddDelegatorArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - Staker: platformapi.Staker{ - NodeID: nodeID, - Weight: jsonStakeAmount, - StakeAmount: &jsonStakeAmount, - StartTime: json.Uint64(startTime), - EndTime: json.Uint64(endTime), - }, - RewardAddress: rewardAddress.String(), - }, res, options...) - return res.TxID, err -} - -func (c *client) AddSubnetValidator( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - subnetID ids.ID, - nodeID ids.NodeID, - stakeAmount, - startTime, - endTime uint64, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - jsonStakeAmount := json.Uint64(stakeAmount) - err := c.requester.SendRequest(ctx, "platform.addSubnetValidator", &AddSubnetValidatorArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - Staker: platformapi.Staker{ - NodeID: nodeID, - Weight: jsonStakeAmount, - StakeAmount: &jsonStakeAmount, - StartTime: json.Uint64(startTime), - EndTime: json.Uint64(endTime), - }, - SubnetID: subnetID.String(), - }, res, options...) - return res.TxID, err -} - -func (c *client) CreateSubnet( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - controlKeys []ids.ShortID, - threshold uint32, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "platform.createSubnet", &CreateSubnetArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - APISubnet: APISubnet{ - ControlKeys: ids.ShortIDsToStrings(controlKeys), - Threshold: json.Uint32(threshold), - }, - }, res, options...) - return res.TxID, err -} - -func (c *client) ExportAVAX( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - to ids.ShortID, - targetChain string, - amount uint64, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "platform.exportAVAX", &ExportAVAXArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - TargetChain: targetChain, - To: to.String(), - Amount: json.Uint64(amount), - }, res, options...) - return res.TxID, err -} - -func (c *client) ImportAVAX( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - to ids.ShortID, - sourceChain string, - options ...rpc.Option, -) (ids.ID, error) { - res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "platform.importAVAX", &ImportAVAXArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - To: to.String(), - SourceChain: sourceChain, - }, res, options...) - return res.TxID, err -} - -func (c *client) CreateBlockchain( - ctx context.Context, - user api.UserPass, - from []ids.ShortID, - changeAddr ids.ShortID, - subnetID ids.ID, - vmID string, - fxIDs []string, - name string, - genesisData []byte, - options ...rpc.Option, -) (ids.ID, error) { - genesisDataStr, err := formatting.Encode(formatting.Hex, genesisData) - if err != nil { - return ids.ID{}, err - } - - res := &api.JSONTxID{} - err = c.requester.SendRequest(ctx, "platform.createBlockchain", &CreateBlockchainArgs{ - JSONSpendHeader: api.JSONSpendHeader{ - UserPass: user, - JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, - JSONChangeAddr: api.JSONChangeAddr{ChangeAddr: changeAddr.String()}, - }, - SubnetID: subnetID, - VMID: vmID, - FxIDs: fxIDs, - Name: name, - GenesisData: genesisDataStr, - Encoding: formatting.Hex, - }, res, options...) - return res.TxID, err -} - func (c *client) GetBlockchainStatus(ctx context.Context, blockchainID string, options ...rpc.Option) (status.BlockchainStatus, error) { res := &GetBlockchainStatusReply{} err := c.requester.SendRequest(ctx, "platform.getBlockchainStatus", &GetBlockchainStatusArgs{ @@ -780,27 +399,6 @@ func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Op return res, err } -func (c *client) AwaitTxDecided(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (*GetTxStatusResponse, error) { - ticker := time.NewTicker(freq) - defer ticker.Stop() - - for { - res, err := c.GetTxStatus(ctx, txID, options...) - if err == nil { - switch res.Status { - case status.Committed, status.Aborted, status.Dropped: - return res, nil - } - } - - select { - case <-ticker.C: - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} - func (c *client) GetStake( ctx context.Context, addrs []ids.ShortID, @@ -857,17 +455,6 @@ func (c *client) GetTotalStake(ctx context.Context, subnetID ids.ID, options ... return uint64(amount), err } -func (c *client) GetMaxStakeAmount(ctx context.Context, subnetID ids.ID, nodeID ids.NodeID, startTime, endTime uint64, options ...rpc.Option) (uint64, error) { - res := &GetMaxStakeAmountReply{} - err := c.requester.SendRequest(ctx, "platform.getMaxStakeAmount", &GetMaxStakeAmountArgs{ - SubnetID: subnetID, - NodeID: nodeID, - StartTime: json.Uint64(startTime), - EndTime: json.Uint64(endTime), - }, res, options...) - return uint64(res.Amount), err -} - func (c *client) GetRewardUTXOs(ctx context.Context, args *api.GetTxArgs, options ...rpc.Option) ([][]byte, error) { res := &GetRewardUTXOsReply{} err := c.requester.SendRequest(ctx, "platform.getRewardUTXOs", args, res, options...) @@ -927,3 +514,32 @@ func (c *client) GetBlockByHeight(ctx context.Context, height uint64, options .. } return formatting.Decode(res.Encoding, res.Block) } + +func AwaitTxAccepted( + c Client, + ctx context.Context, + txID ids.ID, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) + defer ticker.Stop() + + for { + res, err := c.GetTxStatus(ctx, txID, options...) + if err != nil { + return err + } + + switch res.Status { + case status.Committed, status.Aborted: + return nil + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/vms/platformvm/config/config.go b/vms/platformvm/config/config.go index 50628c422afd..731b079ca425 100644 --- a/vms/platformvm/config/config.go +++ b/vms/platformvm/config/config.go @@ -14,6 +14,8 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" ) // Struct collecting all foundational parameters of PlatformVM @@ -29,6 +31,9 @@ type Config struct { // calling VM.Initialize. Validators validators.Manager + // All static fees config active before E-upgrade + StaticFeeConfig fee.StaticConfig + // Provides access to the uptime manager as a thread safe data structure UptimeLockedCalculator uptime.LockedCalculator @@ -41,33 +46,6 @@ type Config struct { // Set of subnets that this node is validating TrackedSubnets set.Set[ids.ID] - // Fee that is burned by every non-state creating transaction - TxFee uint64 - - // Fee that must be burned by every state creating transaction before AP3 - CreateAssetTxFee uint64 - - // Fee that must be burned by every subnet creating transaction after AP3 - CreateSubnetTxFee uint64 - - // Fee that must be burned by every transform subnet transaction - TransformSubnetTxFee uint64 - - // Fee that must be burned by every blockchain creating transaction after AP3 - CreateBlockchainTxFee uint64 - - // Transaction fee for adding a primary network validator - AddPrimaryNetworkValidatorFee uint64 - - // Transaction fee for adding a primary network delegator - AddPrimaryNetworkDelegatorFee uint64 - - // Transaction fee for adding a subnet validator - AddSubnetValidatorFee uint64 - - // Transaction fee for adding a subnet delegator - AddSubnetDelegatorFee uint64 - // The minimum amount of tokens one must bond to be a validator MinValidatorStake uint64 @@ -92,20 +70,8 @@ type Config struct { // Config for the minting function RewardConfig reward.Config - // Time of the AP3 network upgrade - ApricotPhase3Time time.Time - - // Time of the AP5 network upgrade - ApricotPhase5Time time.Time - - // Time of the Banff network upgrade - BanffTime time.Time - - // Time of the Cortina network upgrade - CortinaTime time.Time - - // Time of the Durango network upgrade - DurangoTime time.Time + // All network upgrade timestamps + UpgradeConfig upgrade.Config // UseCurrentHeight forces [GetMinimumHeight] to return the current height // of the P-Chain instead of the oldest block in the [recentlyAccepted] @@ -117,40 +83,6 @@ type Config struct { UseCurrentHeight bool } -func (c *Config) IsApricotPhase3Activated(timestamp time.Time) bool { - return !timestamp.Before(c.ApricotPhase3Time) -} - -func (c *Config) IsApricotPhase5Activated(timestamp time.Time) bool { - return !timestamp.Before(c.ApricotPhase5Time) -} - -func (c *Config) IsBanffActivated(timestamp time.Time) bool { - return !timestamp.Before(c.BanffTime) -} - -func (c *Config) IsCortinaActivated(timestamp time.Time) bool { - return !timestamp.Before(c.CortinaTime) -} - -func (c *Config) IsDurangoActivated(timestamp time.Time) bool { - return !timestamp.Before(c.DurangoTime) -} - -func (c *Config) GetCreateBlockchainTxFee(timestamp time.Time) uint64 { - if c.IsApricotPhase3Activated(timestamp) { - return c.CreateBlockchainTxFee - } - return c.CreateAssetTxFee -} - -func (c *Config) GetCreateSubnetTxFee(timestamp time.Time) uint64 { - if c.IsApricotPhase3Activated(timestamp) { - return c.CreateSubnetTxFee - } - return c.CreateAssetTxFee -} - // Create the blockchain described in [tx], but only if this node is a member of // the subnet that validates the chain func (c *Config) CreateChain(chainID ids.ID, tx *txs.CreateChainTx) { diff --git a/vms/platformvm/config/config.md b/vms/platformvm/config/config.md new file mode 100644 index 000000000000..a9058c111ead --- /dev/null +++ b/vms/platformvm/config/config.md @@ -0,0 +1,226 @@ +--- +tags: [Configs] +description: Reference for all available configuration options and parameters for the PlatformVM. +pagination_label: P-Chain Configs +sidebar_position: 1 +--- + +# P-Chain + +This document provides details about the configuration options available for the PlatformVM. + +In order to specify a configuration for the PlatformVM, you need to define a `Config` struct and its parameters. The default values for these parameters are: + +```json +{ + "Chains": null, + "Validators": null, + "UptimeLockedCalculator": null, + "SybilProtectionEnabled": false, + "PartialSyncPrimaryNetwork": false, + "TrackedSubnets": [], + "TxFee": 0, + "CreateAssetTxFee": 0, + "CreateSubnetTxFee": 0, + "TransformSubnetTxFee": 0, + "CreateBlockchainTxFee": 0, + "AddPrimaryNetworkValidatorFee": 0, + "AddPrimaryNetworkDelegatorFee": 0, + "AddSubnetValidatorFee": 0, + "AddSubnetDelegatorFee": 0, + "MinValidatorStake": 0, + "MaxValidatorStake": 0, + "MinDelegatorStake": 0, + "MinDelegationFee": 0, + "UptimePercentage": 0, + "MinStakeDuration": "0s", + "MaxStakeDuration": "0s", + "RewardConfig": {}, + "ApricotPhase3Time": "0001-01-01T00:00:00Z", + "ApricotPhase5Time": "0001-01-01T00:00:00Z", + "BanffTime": "0001-01-01T00:00:00Z", + "CortinaTime": "0001-01-01T00:00:00Z", + "DurangoTime": "0001-01-01T00:00:00Z", + "EUpgradeTime": "0001-01-01T00:00:00Z", + "UseCurrentHeight": false +} +``` + +Default values are overridden only if explicitly specified in the config. + +## Parameters + +The parameters are as follows: + +### `Chains` + +The node's chain manager + +### `Validators` + +Node's validator set maps SubnetID to validators of the Subnet + +- The primary network's validator set should have been added to the manager before calling VM.Initialize. +- The primary network's validator set should be empty before calling VM.Initialize. + +### `UptimeLockedCalculator` + +Provides access to the uptime manager as a thread-safe data structure + +### `SybilProtectionEnabled` + +_Boolean_ + +True if the node is being run with staking enabled + +### `PartialSyncPrimaryNetwork` + +_Boolean_ + +If true, only the P-chain will be instantiated on the primary network. + +### `TrackedSubnets` + +Set of Subnets that this node is validating + +### `TxFee` + +_Uint64_ + +Fee that is burned by every non-state creating transaction + +### `CreateAssetTxFee` + +_Uint64_ + +Fee that must be burned by every state creating transaction before AP3 + +### `CreateSubnetTxFee` + +_Uint64_ + +Fee that must be burned by every Subnet creating transaction after AP3 + +### `TransformSubnetTxFee` + +_Uint64_ + +Fee that must be burned by every transform Subnet transaction + +### `CreateBlockchainTxFee` + +_Uint64_ + +Fee that must be burned by every blockchain creating transaction after AP3 + +### `AddPrimaryNetworkValidatorFee` + +_Uint64_ + +Transaction fee for adding a primary network validator + +### `AddPrimaryNetworkDelegatorFee` + +_Uint64_ + +Transaction fee for adding a primary network delegator + +### `AddSubnetValidatorFee` + +_Uint64_ + +Transaction fee for adding a Subnet validator + +### `AddSubnetDelegatorFee` + +_Uint64_ + +Transaction fee for adding a Subnet delegator + +### `MinValidatorStake` + +_Uint64_ + +The minimum amount of tokens one must bond to be a validator + +### `MaxValidatorStake` + +_Uint64_ + +The maximum amount of tokens that can be bonded on a validator + +### `MinDelegatorStake` + +_Uint64_ + +Minimum stake, in nAVAX, that can be delegated on the primary network + +### `MinDelegationFee` + +_Uint32_ + +Minimum fee that can be charged for delegation + +### `UptimePercentage` + +_Float64_ + +UptimePercentage is the minimum uptime required to be rewarded for staking + +### `MinStakeDuration` + +_Duration_ + +Minimum amount of time to allow a staker to stake + +### `MaxStakeDuration` + +_Duration_ + +Maximum amount of time to allow a staker to stake + +### `RewardConfig` + +Config for the minting function + +### `ApricotPhase3Time` + +_Time_ + +Time of the AP3 network upgrade + +### `ApricotPhase5Time` + +_Time_ + +Time of the AP5 network upgrade + +### `BanffTime` + +_Time_ + +Time of the Banff network upgrade + +### `CortinaTime` + +_Time_ + +Time of the Cortina network upgrade + +### `DurangoTime` + +_Time_ + +Time of the Durango network upgrade + +### `EUpgradeTime` + +_Time_ + +Time of the E network upgrade + +### `UseCurrentHeight` + +_Boolean_ + +UseCurrentHeight forces `GetMinimumHeight` to return the current height of the P-Chain instead of the oldest block in the `recentlyAccepted` window. This config is particularly useful for triggering proposervm activation on recently created Subnets (without this, users need to wait for `recentlyAcceptedWindowTTL` to pass for activation to occur). diff --git a/vms/platformvm/config/execution_config_test.go b/vms/platformvm/config/execution_config_test.go index 89fd5cd55b05..f3fe8e4c6082 100644 --- a/vms/platformvm/config/execution_config_test.go +++ b/vms/platformvm/config/execution_config_test.go @@ -4,6 +4,8 @@ package config import ( + "encoding/json" + "reflect" "testing" "time" @@ -12,6 +14,23 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/network" ) +// Requires all values in a struct to be initialized +func verifyInitializedStruct(tb testing.TB, s interface{}) { + tb.Helper() + + require := require.New(tb) + + structType := reflect.TypeOf(s) + require.Equal(reflect.Struct, structType.Kind()) + + v := reflect.ValueOf(s) + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + require.True(field.IsValid(), "invalid field: ", structType.Field(i).Name) + require.False(field.IsZero(), "zero field: ", structType.Field(i).Name) + } +} + func TestExecutionConfigUnmarshal(t *testing.T) { t.Run("default values from empty json", func(t *testing.T) { require := require.New(t) @@ -41,44 +60,26 @@ func TestExecutionConfigUnmarshal(t *testing.T) { t.Run("all values extracted from json", func(t *testing.T) { require := require.New(t) - b := []byte(`{ - "network": { - "max-validator-set-staleness": 1, - "target-gossip-size": 2, - "pull-gossip-poll-size": 3, - "pull-gossip-frequency": 4, - "pull-gossip-throttling-period": 5, - "pull-gossip-throttling-limit": 6, - "expected-bloom-filter-elements":7, - "expected-bloom-filter-false-positive-probability": 8, - "max-bloom-filter-false-positive-probability": 9, - "legacy-push-gossip-cache-size": 10 - }, - "block-cache-size": 1, - "tx-cache-size": 2, - "transformed-subnet-tx-cache-size": 3, - "reward-utxos-cache-size": 5, - "chain-cache-size": 6, - "chain-db-cache-size": 7, - "block-id-cache-size": 8, - "fx-owner-cache-size": 9, - "checksums-enabled": true, - "mempool-prune-frequency": 60000000000 - }`) - ec, err := GetExecutionConfig(b) - require.NoError(err) + expected := &ExecutionConfig{ Network: network.Config{ MaxValidatorSetStaleness: 1, TargetGossipSize: 2, - PullGossipPollSize: 3, - PullGossipFrequency: 4, - PullGossipThrottlingPeriod: 5, - PullGossipThrottlingLimit: 6, - ExpectedBloomFilterElements: 7, - ExpectedBloomFilterFalsePositiveProbability: 8, - MaxBloomFilterFalsePositiveProbability: 9, - LegacyPushGossipCacheSize: 10, + PushGossipPercentStake: .3, + PushGossipNumValidators: 4, + PushGossipNumPeers: 5, + PushRegossipNumValidators: 6, + PushRegossipNumPeers: 7, + PushGossipDiscardedCacheSize: 8, + PushGossipMaxRegossipFrequency: 9, + PushGossipFrequency: 10, + PullGossipPollSize: 11, + PullGossipFrequency: 12, + PullGossipThrottlingPeriod: 13, + PullGossipThrottlingLimit: 14, + ExpectedBloomFilterElements: 15, + ExpectedBloomFilterFalsePositiveProbability: 16, + MaxBloomFilterFalsePositiveProbability: 17, }, BlockCacheSize: 1, TxCacheSize: 2, @@ -91,55 +92,14 @@ func TestExecutionConfigUnmarshal(t *testing.T) { ChecksumsEnabled: true, MempoolPruneFrequency: time.Minute, } - require.Equal(expected, ec) - }) + verifyInitializedStruct(t, *expected) + verifyInitializedStruct(t, expected.Network) - t.Run("default values applied correctly", func(t *testing.T) { - require := require.New(t) - b := []byte(`{ - "network": { - "max-validator-set-staleness": 1, - "target-gossip-size": 2, - "pull-gossip-poll-size": 3, - "pull-gossip-frequency": 4, - "pull-gossip-throttling-period": 5 - }, - "block-cache-size": 1, - "tx-cache-size": 2, - "transformed-subnet-tx-cache-size": 3, - "reward-utxos-cache-size": 5, - "chain-cache-size": 6, - "chain-db-cache-size": 7, - "block-id-cache-size": 8, - "fx-owner-cache-size": 9, - "checksums-enabled": true - }`) - ec, err := GetExecutionConfig(b) + b, err := json.Marshal(expected) require.NoError(err) - expected := &ExecutionConfig{ - Network: network.Config{ - MaxValidatorSetStaleness: 1, - TargetGossipSize: 2, - PullGossipPollSize: 3, - PullGossipFrequency: 4, - PullGossipThrottlingPeriod: 5, - PullGossipThrottlingLimit: DefaultExecutionConfig.Network.PullGossipThrottlingLimit, - ExpectedBloomFilterElements: DefaultExecutionConfig.Network.ExpectedBloomFilterElements, - ExpectedBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.ExpectedBloomFilterFalsePositiveProbability, - MaxBloomFilterFalsePositiveProbability: DefaultExecutionConfig.Network.MaxBloomFilterFalsePositiveProbability, - LegacyPushGossipCacheSize: DefaultExecutionConfig.Network.LegacyPushGossipCacheSize, - }, - BlockCacheSize: 1, - TxCacheSize: 2, - TransformedSubnetTxCacheSize: 3, - RewardUTXOsCacheSize: 5, - ChainCacheSize: 6, - ChainDBCacheSize: 7, - BlockIDCacheSize: 8, - FxOwnerCacheSize: 9, - ChecksumsEnabled: true, - MempoolPruneFrequency: 30 * time.Minute, - } - require.Equal(expected, ec) + + actual, err := GetExecutionConfig(b) + require.NoError(err) + require.Equal(expected, actual) }) } diff --git a/vms/platformvm/docs/validators_versioning.md b/vms/platformvm/docs/validators_versioning.md index 7db716d12677..a1007670321c 100644 --- a/vms/platformvm/docs/validators_versioning.md +++ b/vms/platformvm/docs/validators_versioning.md @@ -68,7 +68,7 @@ Validator diffs layout is optimized to support iteration. Validator sets are reb Note that: 1. `Weight` diffs related to a Subnet are stored contiguously. -2. Diff height is serialized as `Reverse_Height`. It is stored with big endian format and has its bits flipped too. Big endianess ensures that heights are stored in order, bit flipping ensures that the top-most height is always the first. +2. Diff height is serialized as `Reverse_Height`. It is stored with big endian format and has its bits flipped too. Big endianness ensures that heights are stored in order, bit flipping ensures that the top-most height is always the first. 3. `NodeID` is part of the key and `state.ValidatorWeightDiff` is part of the value. `BLS Public` diffs are stored as follows: diff --git a/vms/platformvm/metrics/block_metrics.go b/vms/platformvm/metrics/block_metrics.go index 09239d8df40a..bf05de8e8dd8 100644 --- a/vms/platformvm/metrics/block_metrics.go +++ b/vms/platformvm/metrics/block_metrics.go @@ -4,70 +4,61 @@ package metrics import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/block" ) -var _ block.Visitor = (*blockMetrics)(nil) +const blkLabel = "blk" + +var ( + _ block.Visitor = (*blockMetrics)(nil) + + blkLabels = []string{blkLabel} +) type blockMetrics struct { txMetrics *txMetrics - - numAbortBlocks, - numAtomicBlocks, - numCommitBlocks, - numProposalBlocks, - numStandardBlocks prometheus.Counter + numBlocks *prometheus.CounterVec } -func newBlockMetrics( - namespace string, - registerer prometheus.Registerer, -) (*blockMetrics, error) { - txMetrics, err := newTxMetrics(namespace, registerer) - errs := wrappers.Errs{Err: err} - m := &blockMetrics{ - txMetrics: txMetrics, - numAbortBlocks: newBlockMetric(namespace, "abort", registerer, &errs), - numAtomicBlocks: newBlockMetric(namespace, "atomic", registerer, &errs), - numCommitBlocks: newBlockMetric(namespace, "commit", registerer, &errs), - numProposalBlocks: newBlockMetric(namespace, "proposal", registerer, &errs), - numStandardBlocks: newBlockMetric(namespace, "standard", registerer, &errs), +func newBlockMetrics(registerer prometheus.Registerer) (*blockMetrics, error) { + txMetrics, err := newTxMetrics(registerer) + if err != nil { + return nil, err } - return m, errs.Err -} -func newBlockMetric( - namespace string, - blockName string, - registerer prometheus.Registerer, - errs *wrappers.Errs, -) prometheus.Counter { - blockMetric := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: blockName + "_blks_accepted", - Help: fmt.Sprintf("Number of %s blocks accepted", blockName), - }) - errs.Add(registerer.Register(blockMetric)) - return blockMetric + m := &blockMetrics{ + txMetrics: txMetrics, + numBlocks: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "blks_accepted", + Help: "number of blocks accepted", + }, + blkLabels, + ), + } + return m, registerer.Register(m.numBlocks) } func (m *blockMetrics) BanffAbortBlock(*block.BanffAbortBlock) error { - m.numAbortBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "abort", + }).Inc() return nil } func (m *blockMetrics) BanffCommitBlock(*block.BanffCommitBlock) error { - m.numCommitBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "commit", + }).Inc() return nil } func (m *blockMetrics) BanffProposalBlock(b *block.BanffProposalBlock) error { - m.numProposalBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "proposal", + }).Inc() for _, tx := range b.Transactions { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { return err @@ -77,7 +68,9 @@ func (m *blockMetrics) BanffProposalBlock(b *block.BanffProposalBlock) error { } func (m *blockMetrics) BanffStandardBlock(b *block.BanffStandardBlock) error { - m.numStandardBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "standard", + }).Inc() for _, tx := range b.Transactions { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { return err @@ -87,22 +80,30 @@ func (m *blockMetrics) BanffStandardBlock(b *block.BanffStandardBlock) error { } func (m *blockMetrics) ApricotAbortBlock(*block.ApricotAbortBlock) error { - m.numAbortBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "abort", + }).Inc() return nil } func (m *blockMetrics) ApricotCommitBlock(*block.ApricotCommitBlock) error { - m.numCommitBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "commit", + }).Inc() return nil } func (m *blockMetrics) ApricotProposalBlock(b *block.ApricotProposalBlock) error { - m.numProposalBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "proposal", + }).Inc() return b.Tx.Unsigned.Visit(m.txMetrics) } func (m *blockMetrics) ApricotStandardBlock(b *block.ApricotStandardBlock) error { - m.numStandardBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "standard", + }).Inc() for _, tx := range b.Transactions { if err := tx.Unsigned.Visit(m.txMetrics); err != nil { return err @@ -112,6 +113,8 @@ func (m *blockMetrics) ApricotStandardBlock(b *block.ApricotStandardBlock) error } func (m *blockMetrics) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { - m.numAtomicBlocks.Inc() + m.numBlocks.With(prometheus.Labels{ + blkLabel: "atomic", + }).Inc() return b.Tx.Unsigned.Visit(m.txMetrics) } diff --git a/vms/platformvm/metrics/metrics.go b/vms/platformvm/metrics/metrics.go index 98b611a017ed..82b51dc8c34c 100644 --- a/vms/platformvm/metrics/metrics.go +++ b/vms/platformvm/metrics/metrics.go @@ -40,61 +40,50 @@ type Metrics interface { SetTimeUntilSubnetUnstake(subnetID ids.ID, timeUntilUnstake time.Duration) } -func New( - namespace string, - registerer prometheus.Registerer, -) (Metrics, error) { - blockMetrics, err := newBlockMetrics(namespace, registerer) +func New(registerer prometheus.Registerer) (Metrics, error) { + blockMetrics, err := newBlockMetrics(registerer) m := &metrics{ blockMetrics: blockMetrics, timeUntilUnstake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_until_unstake", - Help: "Time (in ns) until this node leaves the Primary Network's validator set", + Name: "time_until_unstake", + Help: "Time (in ns) until this node leaves the Primary Network's validator set", }), timeUntilSubnetUnstake: prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespace, - Name: "time_until_unstake_subnet", - Help: "Time (in ns) until this node leaves the subnet's validator set", + Name: "time_until_unstake_subnet", + Help: "Time (in ns) until this node leaves the subnet's validator set", }, []string{"subnetID"}, ), localStake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "local_staked", - Help: "Amount (in nAVAX) of AVAX staked on this node", + Name: "local_staked", + Help: "Amount (in nAVAX) of AVAX staked on this node", }), totalStake: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "total_staked", - Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", + Name: "total_staked", + Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", }), validatorSetsCached: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "validator_sets_cached", - Help: "Total number of validator sets cached", + Name: "validator_sets_cached", + Help: "Total number of validator sets cached", }), validatorSetsCreated: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "validator_sets_created", - Help: "Total number of validator sets created from applying difflayers", + Name: "validator_sets_created", + Help: "Total number of validator sets created from applying difflayers", }), validatorSetsHeightDiff: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validator_sets_height_diff_sum", - Help: "Total number of validator sets diffs applied for generating validator sets", + Name: "validator_sets_height_diff_sum", + Help: "Total number of validator sets diffs applied for generating validator sets", }), validatorSetsDuration: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "validator_sets_duration_sum", - Help: "Total amount of time generating validator sets in nanoseconds", + Name: "validator_sets_duration_sum", + Help: "Total amount of time generating validator sets in nanoseconds", }), } errs := wrappers.Errs{Err: err} - apiRequestMetrics, err := metric.NewAPIInterceptor(namespace, registerer) + apiRequestMetrics, err := metric.NewAPIInterceptor(registerer) errs.Add(err) m.APIInterceptor = apiRequestMetrics errs.Add( diff --git a/vms/platformvm/metrics/tx_metrics.go b/vms/platformvm/metrics/tx_metrics.go index 70b032765bfc..02f45f011624 100644 --- a/vms/platformvm/metrics/tx_metrics.go +++ b/vms/platformvm/metrics/tx_metrics.go @@ -4,145 +4,137 @@ package metrics import ( - "fmt" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ txs.Visitor = (*txMetrics)(nil) +const txLabel = "tx" + +var ( + _ txs.Visitor = (*txMetrics)(nil) + + txLabels = []string{txLabel} +) type txMetrics struct { - numAddDelegatorTxs, - numAddSubnetValidatorTxs, - numAddValidatorTxs, - numAdvanceTimeTxs, - numCreateChainTxs, - numCreateSubnetTxs, - numExportTxs, - numImportTxs, - numRewardValidatorTxs, - numRemoveSubnetValidatorTxs, - numTransformSubnetTxs, - numAddPermissionlessValidatorTxs, - numAddPermissionlessDelegatorTxs, - numTransferSubnetOwnershipTxs, - numBaseTxs prometheus.Counter -} - -func newTxMetrics( - namespace string, - registerer prometheus.Registerer, -) (*txMetrics, error) { - errs := wrappers.Errs{} - m := &txMetrics{ - numAddDelegatorTxs: newTxMetric(namespace, "add_delegator", registerer, &errs), - numAddSubnetValidatorTxs: newTxMetric(namespace, "add_subnet_validator", registerer, &errs), - numAddValidatorTxs: newTxMetric(namespace, "add_validator", registerer, &errs), - numAdvanceTimeTxs: newTxMetric(namespace, "advance_time", registerer, &errs), - numCreateChainTxs: newTxMetric(namespace, "create_chain", registerer, &errs), - numCreateSubnetTxs: newTxMetric(namespace, "create_subnet", registerer, &errs), - numExportTxs: newTxMetric(namespace, "export", registerer, &errs), - numImportTxs: newTxMetric(namespace, "import", registerer, &errs), - numRewardValidatorTxs: newTxMetric(namespace, "reward_validator", registerer, &errs), - numRemoveSubnetValidatorTxs: newTxMetric(namespace, "remove_subnet_validator", registerer, &errs), - numTransformSubnetTxs: newTxMetric(namespace, "transform_subnet", registerer, &errs), - numAddPermissionlessValidatorTxs: newTxMetric(namespace, "add_permissionless_validator", registerer, &errs), - numAddPermissionlessDelegatorTxs: newTxMetric(namespace, "add_permissionless_delegator", registerer, &errs), - numTransferSubnetOwnershipTxs: newTxMetric(namespace, "transfer_subnet_ownership", registerer, &errs), - numBaseTxs: newTxMetric(namespace, "base", registerer, &errs), - } - return m, errs.Err + numTxs *prometheus.CounterVec } -func newTxMetric( - namespace string, - txName string, - registerer prometheus.Registerer, - errs *wrappers.Errs, -) prometheus.Counter { - txMetric := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: txName + "_txs_accepted", - Help: fmt.Sprintf("Number of %s transactions accepted", txName), - }) - errs.Add(registerer.Register(txMetric)) - return txMetric +func newTxMetrics(registerer prometheus.Registerer) (*txMetrics, error) { + m := &txMetrics{ + numTxs: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "txs_accepted", + Help: "number of transactions accepted", + }, + txLabels, + ), + } + return m, registerer.Register(m.numTxs) } func (m *txMetrics) AddValidatorTx(*txs.AddValidatorTx) error { - m.numAddValidatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "add_validator", + }).Inc() return nil } func (m *txMetrics) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { - m.numAddSubnetValidatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "add_subnet_validator", + }).Inc() return nil } func (m *txMetrics) AddDelegatorTx(*txs.AddDelegatorTx) error { - m.numAddDelegatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "add_delegator", + }).Inc() return nil } func (m *txMetrics) CreateChainTx(*txs.CreateChainTx) error { - m.numCreateChainTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "create_chain", + }).Inc() return nil } func (m *txMetrics) CreateSubnetTx(*txs.CreateSubnetTx) error { - m.numCreateSubnetTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "create_subnet", + }).Inc() return nil } func (m *txMetrics) ImportTx(*txs.ImportTx) error { - m.numImportTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "import", + }).Inc() return nil } func (m *txMetrics) ExportTx(*txs.ExportTx) error { - m.numExportTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "export", + }).Inc() return nil } func (m *txMetrics) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - m.numAdvanceTimeTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "advance_time", + }).Inc() return nil } func (m *txMetrics) RewardValidatorTx(*txs.RewardValidatorTx) error { - m.numRewardValidatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "reward_validator", + }).Inc() return nil } func (m *txMetrics) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { - m.numRemoveSubnetValidatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "remove_subnet_validator", + }).Inc() return nil } func (m *txMetrics) TransformSubnetTx(*txs.TransformSubnetTx) error { - m.numTransformSubnetTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "transform_subnet", + }).Inc() return nil } func (m *txMetrics) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { - m.numAddPermissionlessValidatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "add_permissionless_validator", + }).Inc() return nil } func (m *txMetrics) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { - m.numAddPermissionlessDelegatorTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "add_permissionless_delegator", + }).Inc() return nil } func (m *txMetrics) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { - m.numTransferSubnetOwnershipTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "transfer_subnet_ownership", + }).Inc() return nil } func (m *txMetrics) BaseTx(*txs.BaseTx) error { - m.numBaseTxs.Inc() + m.numTxs.With(prometheus.Labels{ + txLabel: "base", + }).Inc() return nil } diff --git a/vms/platformvm/network/config.go b/vms/platformvm/network/config.go index 8536504d8383..797138ab93f9 100644 --- a/vms/platformvm/network/config.go +++ b/vms/platformvm/network/config.go @@ -12,6 +12,14 @@ import ( var DefaultConfig = Config{ MaxValidatorSetStaleness: time.Minute, TargetGossipSize: 20 * units.KiB, + PushGossipPercentStake: .9, + PushGossipNumValidators: 100, + PushGossipNumPeers: 0, + PushRegossipNumValidators: 10, + PushRegossipNumPeers: 0, + PushGossipDiscardedCacheSize: 16384, + PushGossipMaxRegossipFrequency: 30 * time.Second, + PushGossipFrequency: 500 * time.Millisecond, PullGossipPollSize: 1, PullGossipFrequency: 1500 * time.Millisecond, PullGossipThrottlingPeriod: 10 * time.Second, @@ -19,7 +27,6 @@ var DefaultConfig = Config{ ExpectedBloomFilterElements: 8 * 1024, ExpectedBloomFilterFalsePositiveProbability: .01, MaxBloomFilterFalsePositiveProbability: .05, - LegacyPushGossipCacheSize: 512, } type Config struct { @@ -30,6 +37,32 @@ type Config struct { // sent when pushing transactions and when responded to transaction pull // requests. TargetGossipSize int `json:"target-gossip-size"` + // PushGossipPercentStake is the percentage of total stake to push + // transactions to in the first round of gossip. Nodes with higher stake are + // preferred over nodes with less stake to minimize the number of messages + // sent over the p2p network. + PushGossipPercentStake float64 `json:"push-gossip-percent-stake"` + // PushGossipNumValidators is the number of validators to push transactions + // to in the first round of gossip. + PushGossipNumValidators int `json:"push-gossip-num-validators"` + // PushGossipNumPeers is the number of peers to push transactions to in the + // first round of gossip. + PushGossipNumPeers int `json:"push-gossip-num-peers"` + // PushRegossipNumValidators is the number of validators to push + // transactions to after the first round of gossip. + PushRegossipNumValidators int `json:"push-regossip-num-validators"` + // PushRegossipNumPeers is the number of peers to push transactions to after + // the first round of gossip. + PushRegossipNumPeers int `json:"push-regossip-num-peers"` + // PushGossipDiscardedCacheSize is the number of txIDs to cache to avoid + // pushing transactions that were recently dropped from the mempool. + PushGossipDiscardedCacheSize int `json:"push-gossip-discarded-cache-size"` + // PushGossipMaxRegossipFrequency is the limit for how frequently a + // transaction will be push gossiped. + PushGossipMaxRegossipFrequency time.Duration `json:"push-gossip-max-regossip-frequency"` + // PushGossipFrequency is how frequently rounds of push gossip are + // performed. + PushGossipFrequency time.Duration `json:"push-gossip-frequency"` // PullGossipPollSize is the number of validators to sample when performing // a round of pull gossip. PullGossipPollSize int `json:"pull-gossip-poll-size"` @@ -57,10 +90,4 @@ type Config struct { // The smaller this number is, the more frequently that the bloom filter // will be regenerated. MaxBloomFilterFalsePositiveProbability float64 `json:"max-bloom-filter-false-positive-probability"` - // LegacyPushGossipCacheSize tracks the most recently received transactions - // and ensures to only gossip them once. - // - // Deprecated: The legacy push gossip mechanism is deprecated in favor of - // the p2p SDK's push gossip mechanism. - LegacyPushGossipCacheSize int `json:"legacy-push-gossip-cache-size"` } diff --git a/vms/platformvm/network/gossip.go b/vms/platformvm/network/gossip.go index 6e71719bc2c9..7e6e7adc341c 100644 --- a/vms/platformvm/network/gossip.go +++ b/vms/platformvm/network/gossip.go @@ -17,7 +17,9 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/txs/mempool" + + pmempool "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) var ( @@ -65,7 +67,7 @@ func (txMarshaller) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { } func newGossipMempool( - mempool mempool.Mempool, + mempool pmempool.Mempool, registerer prometheus.Registerer, log logging.Logger, txVerifier TxVerifier, @@ -83,7 +85,7 @@ func newGossipMempool( } type gossipMempool struct { - mempool.Mempool + pmempool.Mempool log logging.Logger txVerifier TxVerifier @@ -136,6 +138,11 @@ func (g *gossipMempool) Add(tx *txs.Tx) error { return nil } +func (g *gossipMempool) Has(txID ids.ID) bool { + _, ok := g.Mempool.Get(txID) + return ok +} + func (g *gossipMempool) GetFilter() (bloom []byte, salt []byte) { g.lock.RLock() defer g.lock.RUnlock() diff --git a/vms/platformvm/network/gossip_test.go b/vms/platformvm/network/gossip_test.go index 47f0602c6bc1..eea36ed5230e 100644 --- a/vms/platformvm/network/gossip_test.go +++ b/vms/platformvm/network/gossip_test.go @@ -14,7 +14,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/txs/mempool" + + pmempool "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) var errFoo = errors.New("foo") @@ -29,7 +31,7 @@ func TestGossipMempoolAddVerificationError(t *testing.T) { TxID: txID, } - mempool := mempool.NewMockMempool(ctrl) + mempool := pmempool.NewMockMempool(ctrl) txVerifier := testTxVerifier{err: errFoo} mempool.EXPECT().Get(txID).Return(nil, false) @@ -63,7 +65,7 @@ func TestGossipMempoolAddError(t *testing.T) { } txVerifier := testTxVerifier{} - mempool := mempool.NewMockMempool(ctrl) + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(txID).Return(nil, false) mempool.EXPECT().GetDropReason(txID).Return(nil) @@ -91,7 +93,7 @@ func TestMempoolDuplicate(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - testMempool := mempool.NewMockMempool(ctrl) + testMempool := pmempool.NewMockMempool(ctrl) txVerifier := testTxVerifier{} txID := ids.GenerateTestID() @@ -128,7 +130,7 @@ func TestGossipAddBloomFilter(t *testing.T) { } txVerifier := testTxVerifier{} - mempool := mempool.NewMockMempool(ctrl) + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(txID).Return(nil, false) mempool.EXPECT().GetDropReason(txID).Return(nil) diff --git a/vms/platformvm/network/network.go b/vms/platformvm/network/network.go index af51c4755f4d..a43bb4b99aa1 100644 --- a/vms/platformvm/network/network.go +++ b/vms/platformvm/network/network.go @@ -5,37 +5,27 @@ package network import ( "context" - "sync" + "errors" "time" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/components/message" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) const TxGossipHandlerID = 0 -type Network interface { - common.AppHandler +var errMempoolDisabledWithPartialSync = errors.New("mempool is disabled partial syncing") - // Gossip starts gossiping transactions and blocks until it completes. - Gossip(ctx context.Context) - // IssueTx verifies the transaction at the currently preferred state, adds - // it to the mempool, and gossips it to the network. - IssueTx(context.Context, *txs.Tx) error -} - -type network struct { +type Network struct { *p2p.Network log logging.Logger @@ -44,13 +34,10 @@ type network struct { partialSyncPrimaryNetwork bool appSender common.AppSender - txPushGossiper gossip.Accumulator[*txs.Tx] - txPullGossiper gossip.Gossiper - txGossipFrequency time.Duration - - // gossip related attributes - recentTxsLock sync.Mutex - recentTxs *cache.LRU[ids.ID, struct{}] + txPushGossiper *gossip.PushGossiper[*txs.Tx] + txPushGossipFrequency time.Duration + txPullGossiper gossip.Gossiper + txPullGossipFrequency time.Duration } func New( @@ -64,7 +51,7 @@ func New( appSender common.AppSender, registerer prometheus.Registerer, config Config, -) (Network, error) { +) (*Network, error) { p2pNetwork, err := p2p.NewNetwork(log, appSender, registerer, "p2p") if err != nil { return nil, err @@ -87,13 +74,6 @@ func New( return nil, err } - txPushGossiper := gossip.NewPushGossiper[*txs.Tx]( - marshaller, - txGossipClient, - txGossipMetrics, - config.TargetGossipSize, - ) - gossipMempool, err := newGossipMempool( mempool, registerer, @@ -107,8 +87,30 @@ func New( return nil, err } - var txPullGossiper gossip.Gossiper - txPullGossiper = gossip.NewPullGossiper[*txs.Tx]( + txPushGossiper, err := gossip.NewPushGossiper[*txs.Tx]( + marshaller, + gossipMempool, + validators, + txGossipClient, + txGossipMetrics, + gossip.BranchingFactor{ + StakePercentage: config.PushGossipPercentStake, + Validators: config.PushGossipNumValidators, + Peers: config.PushGossipNumPeers, + }, + gossip.BranchingFactor{ + Validators: config.PushRegossipNumValidators, + Peers: config.PushRegossipNumPeers, + }, + config.PushGossipDiscardedCacheSize, + config.TargetGossipSize, + config.PushGossipMaxRegossipFrequency, + ) + if err != nil { + return nil, err + } + + var txPullGossiper gossip.Gossiper = gossip.NewPullGossiper[*txs.Tx]( log, marshaller, gossipMempool, @@ -127,7 +129,6 @@ func New( handler := gossip.NewHandler[*txs.Tx]( log, marshaller, - txPushGossiper, gossipMempool, txGossipMetrics, config.TargetGossipSize, @@ -157,7 +158,7 @@ func New( return nil, err } - return &network{ + return &Network{ Network: p2pNetwork, log: log, txVerifier: txVerifier, @@ -165,28 +166,33 @@ func New( partialSyncPrimaryNetwork: partialSyncPrimaryNetwork, appSender: appSender, txPushGossiper: txPushGossiper, + txPushGossipFrequency: config.PushGossipFrequency, txPullGossiper: txPullGossiper, - txGossipFrequency: config.PullGossipFrequency, - recentTxs: &cache.LRU[ids.ID, struct{}]{Size: config.LegacyPushGossipCacheSize}, + txPullGossipFrequency: config.PullGossipFrequency, }, nil } -func (n *network) Gossip(ctx context.Context) { +func (n *Network) PushGossip(ctx context.Context) { + // TODO: Even though the node is running partial sync, we should support + // issuing transactions from the RPC. + if n.partialSyncPrimaryNetwork { + return + } + + gossip.Every(ctx, n.log, n.txPushGossiper, n.txPushGossipFrequency) +} + +func (n *Network) PullGossip(ctx context.Context) { // If the node is running partial sync, we should not perform any pull // gossip. if n.partialSyncPrimaryNetwork { return } - gossip.Every(ctx, n.log, n.txPullGossiper, n.txGossipFrequency) + gossip.Every(ctx, n.log, n.txPullGossiper, n.txPullGossipFrequency) } -func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { - n.log.Debug("called AppGossip message handler", - zap.Stringer("nodeID", nodeID), - zap.Int("messageLen", len(msgBytes)), - ) - +func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { if n.partialSyncPrimaryNetwork { n.log.Debug("dropping AppGossip message", zap.String("reason", "primary network is not being fully synced"), @@ -194,102 +200,22 @@ func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []b return nil } - msgIntf, err := message.Parse(msgBytes) - if err != nil { - n.log.Debug("forwarding AppGossip to p2p network", - zap.String("reason", "failed to parse message"), - ) - - return n.Network.AppGossip(ctx, nodeID, msgBytes) - } - - msg, ok := msgIntf.(*message.Tx) - if !ok { - n.log.Debug("dropping unexpected message", - zap.Stringer("nodeID", nodeID), - ) - return nil - } - - tx, err := txs.Parse(txs.Codec, msg.Tx) - if err != nil { - n.log.Verbo("received invalid tx", - zap.Stringer("nodeID", nodeID), - zap.Binary("tx", msg.Tx), - zap.Error(err), - ) - return nil - } - txID := tx.ID() - - if err := n.issueTx(tx); err == nil { - n.legacyGossipTx(ctx, txID, msgBytes) - - n.txPushGossiper.Add(tx) - return n.txPushGossiper.Gossip(ctx) - } - return nil + return n.Network.AppGossip(ctx, nodeID, msgBytes) } -func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { - if err := n.issueTx(tx); err != nil { - return err - } - - txBytes := tx.Bytes() - msg := &message.Tx{ - Tx: txBytes, - } - msgBytes, err := message.Build(msg) - if err != nil { - return err - } - - txID := tx.ID() - n.legacyGossipTx(ctx, txID, msgBytes) - n.txPushGossiper.Add(tx) - return n.txPushGossiper.Gossip(ctx) -} - -// returns nil if the tx is in the mempool -func (n *network) issueTx(tx *txs.Tx) error { +func (n *Network) IssueTxFromRPC(tx *txs.Tx) error { // If we are partially syncing the Primary Network, we should not be // maintaining the transaction mempool locally. + // + // TODO: We should still push the transaction to some peers when partial + // syncing. if n.partialSyncPrimaryNetwork { - return nil + return errMempoolDisabledWithPartialSync } if err := n.mempool.Add(tx); err != nil { - n.log.Debug("tx failed to be added to the mempool", - zap.Stringer("txID", tx.ID()), - zap.Error(err), - ) - return err } - + n.txPushGossiper.Add(tx) return nil } - -func (n *network) legacyGossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { - n.recentTxsLock.Lock() - _, has := n.recentTxs.Get(txID) - n.recentTxs.Put(txID, struct{}{}) - n.recentTxsLock.Unlock() - - // Don't gossip a transaction if it has been recently gossiped. - if has { - return - } - - n.log.Debug("gossiping tx", - zap.Stringer("txID", txID), - ) - - if err := n.appSender.SendAppGossip(ctx, msgBytes); err != nil { - n.log.Error("failed to gossip tx", - zap.Stringer("txID", txID), - zap.Error(err), - ) - } -} diff --git a/vms/platformvm/network/network_test.go b/vms/platformvm/network/network_test.go index 56957b0007c2..f19600461af7 100644 --- a/vms/platformvm/network/network_test.go +++ b/vms/platformvm/network/network_test.go @@ -16,11 +16,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/snowtest" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/message" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/txs/mempool" + + pmempool "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) var ( @@ -29,6 +28,13 @@ var ( testConfig = Config{ MaxValidatorSetStaleness: time.Second, TargetGossipSize: 1, + PushGossipNumValidators: 1, + PushGossipNumPeers: 0, + PushRegossipNumValidators: 1, + PushRegossipNumPeers: 0, + PushGossipDiscardedCacheSize: 1, + PushGossipMaxRegossipFrequency: time.Second, + PushGossipFrequency: time.Second, PullGossipPollSize: 1, PullGossipFrequency: time.Second, PullGossipThrottlingPeriod: time.Second, @@ -36,7 +42,6 @@ var ( ExpectedBloomFilterElements: 10, ExpectedBloomFilterFalsePositiveProbability: .1, MaxBloomFilterFalsePositiveProbability: .5, - LegacyPushGossipCacheSize: 512, } ) @@ -50,163 +55,12 @@ func (t testTxVerifier) VerifyTx(*txs.Tx) error { return t.err } -func TestNetworkAppGossip(t *testing.T) { - testTx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: 1, - BlockchainID: ids.GenerateTestID(), - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - } - require.NoError(t, testTx.Initialize(txs.Codec)) - - type test struct { - name string - msgBytesFunc func() []byte - mempoolFunc func(*gomock.Controller) mempool.Mempool - partialSyncPrimaryNetwork bool - appSenderFunc func(*gomock.Controller) common.AppSender - } - - tests := []test{ - { - // Shouldn't attempt to issue or gossip the tx - name: "invalid message bytes", - msgBytesFunc: func() []byte { - return []byte{0x00} - }, - mempoolFunc: func(*gomock.Controller) mempool.Mempool { - return nil - }, - appSenderFunc: func(*gomock.Controller) common.AppSender { - return nil - }, - }, - { - // Shouldn't attempt to issue or gossip the tx - name: "invalid tx bytes", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: []byte{0x00}, - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - return mempool.NewMockMempool(ctrl) - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - return common.NewMockSender(ctrl) - }, - }, - { - name: "issuance succeeds", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Get(gomock.Any()).Return(nil, false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) - mempool.EXPECT().Add(gomock.Any()).Return(nil) - mempool.EXPECT().Len().Return(0) - mempool.EXPECT().RequestBuildBlock(false) - return mempool - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // we should gossip the tx twice because sdk and legacy gossip - // currently runs together - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Times(2) - return appSender - }, - }, - { - // Issue returns error because tx was dropped. We shouldn't gossip the tx. - name: "issuance fails", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().Get(gomock.Any()).Return(nil, false) - mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) - return mempool - }, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - return common.NewMockSender(ctrl) - }, - }, - { - name: "should AppGossip if primary network is not being fully synced", - msgBytesFunc: func() []byte { - msg := message.Tx{ - Tx: testTx.Bytes(), - } - msgBytes, err := message.Build(&msg) - require.NoError(t, err) - return msgBytes - }, - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) - // mempool.EXPECT().Has(gomock.Any()).Return(true) - return mempool - }, - partialSyncPrimaryNetwork: true, - appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - appSender := common.NewMockSender(ctrl) - // appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) - return appSender - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctx := context.Background() - ctrl := gomock.NewController(t) - - snowCtx := snowtest.Context(t, ids.Empty) - n, err := New( - logging.NoLog{}, - ids.EmptyNodeID, - ids.Empty, - snowCtx.ValidatorState, - testTxVerifier{}, - tt.mempoolFunc(ctrl), - tt.partialSyncPrimaryNetwork, - tt.appSenderFunc(ctrl), - prometheus.NewRegistry(), - DefaultConfig, - ) - require.NoError(err) - - require.NoError(n.AppGossip(ctx, ids.GenerateTestNodeID(), tt.msgBytesFunc())) - }) - } -} - -func TestNetworkIssueTx(t *testing.T) { +func TestNetworkIssueTxFromRPC(t *testing.T) { tx := &txs.Tx{} type test struct { name string - mempoolFunc func(*gomock.Controller) mempool.Mempool + mempoolFunc func(*gomock.Controller) pmempool.Mempool txVerifier testTxVerifier partialSyncPrimaryNetwork bool appSenderFunc func(*gomock.Controller) common.AppSender @@ -216,8 +70,8 @@ func TestNetworkIssueTx(t *testing.T) { tests := []test{ { name: "mempool has transaction", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) pmempool.Mempool { + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(tx, true) return mempool }, @@ -228,8 +82,8 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "transaction marked as dropped in mempool", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) pmempool.Mempool { + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) return mempool @@ -242,8 +96,8 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "transaction invalid", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) pmempool.Mempool { + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) @@ -258,8 +112,8 @@ func TestNetworkIssueTx(t *testing.T) { }, { name: "can't add transaction to mempool", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) pmempool.Mempool { + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(errTest) @@ -273,36 +127,31 @@ func TestNetworkIssueTx(t *testing.T) { expectedErr: errTest, }, { - name: "AppGossip tx but do not add to mempool if primary network is not being fully synced", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - return mempool.NewMockMempool(ctrl) + name: "mempool is disabled if primary network is not being fully synced", + mempoolFunc: func(ctrl *gomock.Controller) pmempool.Mempool { + return pmempool.NewMockMempool(ctrl) }, partialSyncPrimaryNetwork: true, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // we should gossip the tx twice because sdk and legacy gossip - // currently runs together - appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) - return appSender + return common.NewMockSender(ctrl) }, - expectedErr: nil, + expectedErr: errMempoolDisabledWithPartialSync, }, { name: "happy path", - mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { - mempool := mempool.NewMockMempool(ctrl) + mempoolFunc: func(ctrl *gomock.Controller) pmempool.Mempool { + mempool := pmempool.NewMockMempool(ctrl) mempool.EXPECT().Get(gomock.Any()).Return(nil, false) mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) mempool.EXPECT().Add(gomock.Any()).Return(nil) mempool.EXPECT().Len().Return(0) mempool.EXPECT().RequestBuildBlock(false) + mempool.EXPECT().Get(gomock.Any()).Return(nil, true).Times(2) return mempool }, appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { - // we should gossip the tx twice because sdk and legacy gossip - // currently runs together appSender := common.NewMockSender(ctrl) - appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil).Times(2) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) return appSender }, expectedErr: nil, @@ -329,44 +178,10 @@ func TestNetworkIssueTx(t *testing.T) { ) require.NoError(err) - err = n.IssueTx(context.Background(), tx) + err = n.IssueTxFromRPC(tx) require.ErrorIs(err, tt.expectedErr) + + require.NoError(n.txPushGossiper.Gossip(context.Background())) }) } } - -func TestNetworkGossipTx(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - appSender := common.NewMockSender(ctrl) - - snowCtx := snowtest.Context(t, ids.Empty) - nIntf, err := New( - snowCtx.Log, - snowCtx.NodeID, - snowCtx.SubnetID, - snowCtx.ValidatorState, - testTxVerifier{}, - mempool.NewMockMempool(ctrl), - false, - appSender, - prometheus.NewRegistry(), - testConfig, - ) - require.NoError(err) - require.IsType(&network{}, nIntf) - n := nIntf.(*network) - - // Case: Tx was recently gossiped - txID := ids.GenerateTestID() - n.recentTxs.Put(txID, struct{}{}) - n.legacyGossipTx(context.Background(), txID, []byte{}) - // Didn't make a call to SendAppGossip - - // Case: Tx was not recently gossiped - msgBytes := []byte{1, 2, 3} - appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) - n.legacyGossipTx(context.Background(), ids.GenerateTestID(), msgBytes) - // Did make a call to SendAppGossip -} diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 289fc975edde..d1bdf60a6529 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -36,8 +36,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" avajson "github.com/ava-labs/avalanchego/utils/json" @@ -52,9 +50,8 @@ const ( // Max number of addresses that can be passed in as argument to GetStake maxGetStakeAddrs = 256 - // Minimum amount of delay to allow a transaction to be issued through the - // API - minAddStakerDelay = 2 * executor.SyncBound + // Max number of items allowed in a page + maxPageSize = 1024 // Note: Staker attributes cache should be large enough so that no evictions // happen when the API loops through all stakers. @@ -63,22 +60,9 @@ const ( var ( errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") - errNoSubnetID = errors.New("argument 'subnetID' not provided") errPrimaryNetworkIsNotASubnet = errors.New("the primary network isn't a subnet") - errNoRewardAddress = errors.New("argument 'rewardAddress' not provided") - errInvalidDelegationRate = errors.New("argument 'delegationFeeRate' must be between 0 and 100, inclusive") errNoAddresses = errors.New("no addresses provided") - errNoKeys = errors.New("user has no keys or funds") - errStartTimeTooSoon = fmt.Errorf("start time must be at least %s in the future", minAddStakerDelay) - errStartTimeTooLate = errors.New("start time is too far in the future") - errNamedSubnetCantBePrimary = errors.New("subnet validator attempts to validate primary network") - errNoAmount = errors.New("argument 'amount' must be > 0") - errMissingName = errors.New("argument 'name' not given") - errMissingVMID = errors.New("argument 'vmID' not given") errMissingBlockchainID = errors.New("argument 'blockchainID' not given") - errMissingPrivateKey = errors.New("argument 'privateKey' not given") - errStartAfterEndTime = errors.New("start time must be before end time") - errStartTimeInThePast = errors.New("start time in the past") ) // Service defines the API calls that can be made to the platform chain @@ -156,51 +140,6 @@ func (s *Service) ExportKey(_ *http.Request, args *ExportKeyArgs, reply *ExportK return user.Close() } -// ImportKeyArgs are arguments for ImportKey -type ImportKeyArgs struct { - api.UserPass - PrivateKey *secp256k1.PrivateKey `json:"privateKey"` -} - -// ImportKey adds a private key to the provided user -func (s *Service) ImportKey(_ *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "importKey"), - logging.UserString("username", args.Username), - ) - - if args.PrivateKey == nil { - return errMissingPrivateKey - } - - var err error - reply.Address, err = s.addrManager.FormatLocalAddress(args.PrivateKey.PublicKey().Address()) - if err != nil { - return fmt.Errorf("problem formatting address: %w", err) - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return err - } - defer user.Close() - - if err := user.PutKeys(args.PrivateKey); err != nil { - return fmt.Errorf("problem saving key %w", err) - } - return user.Close() -} - -/* - ****************************************************** - ************* Balances / Addresses ****************** - ****************************************************** - */ - type GetBalanceRequest struct { Addresses []string `json:"addresses"` } @@ -341,36 +280,6 @@ func newJSONBalanceMap(balanceMap map[ids.ID]uint64) map[ids.ID]avajson.Uint64 { return jsonBalanceMap } -// CreateAddress creates an address controlled by [args.Username] -// Returns the newly created address -func (s *Service) CreateAddress(_ *http.Request, args *api.UserPass, response *api.JSONAddress) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "createAddress"), - logging.UserString("username", args.Username), - ) - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return err - } - defer user.Close() - - key, err := keystore.NewKey(user) - if err != nil { - return err - } - - response.Address, err = s.addrManager.FormatLocalAddress(key.PublicKey().Address()) - if err != nil { - return fmt.Errorf("problem formatting address: %w", err) - } - return user.Close() -} - // ListAddresses returns the addresses controlled by [args.Username] func (s *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *api.JSONAddresses) error { s.vm.ctx.Log.Warn("deprecated API called", @@ -458,8 +367,8 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *ap endUTXOID ids.ID ) limit := int(args.Limit) - if limit <= 0 || builder.MaxPageSize < limit { - limit = builder.MaxPageSize + if limit <= 0 || maxPageSize < limit { + limit = maxPageSize } s.vm.ctx.Lock.Lock() @@ -474,7 +383,9 @@ func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *ap limit, ) } else { - utxos, endAddr, endUTXOID, err = s.vm.atomicUtxosManager.GetAtomicUTXOs( + utxos, endAddr, endUTXOID, err = avax.GetAtomicUTXOs( + s.vm.ctx.SharedMemory, + txs.Codec, sourceChain, addrSet, startAddr, @@ -577,12 +488,6 @@ func (s *Service) GetSubnet(_ *http.Request, args *GetSubnetArgs, response *GetS return nil } -/* - ****************************************************** - ******************* Get Subnets ********************** - ****************************************************** - */ - // APISubnet is a representation of a subnet used in API calls type APISubnet struct { // ID of the subnet @@ -622,14 +527,13 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge getAll := len(args.IDs) == 0 if getAll { - subnets, err := s.vm.state.GetSubnets() // all subnets + subnetIDs, err := s.vm.state.GetSubnetIDs() // all subnets if err != nil { return fmt.Errorf("error getting subnets from database: %w", err) } - response.Subnets = make([]APISubnet, len(subnets)+1) - for i, subnet := range subnets { - subnetID := subnet.ID() + response.Subnets = make([]APISubnet, len(subnetIDs)+1) + for i, subnetID := range subnetIDs { if _, err := s.vm.state.GetSubnetTransformation(subnetID); err == nil { response.Subnets[i] = APISubnet{ ID: subnetID, @@ -639,15 +543,23 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge continue } - unsignedTx := subnet.Unsigned.(*txs.CreateSubnetTx) - owner := unsignedTx.Owner.(*secp256k1fx.OutputOwners) - controlAddrs := []string{} - for _, controlKeyID := range owner.Addrs { + subnetOwner, err := s.vm.state.GetSubnetOwner(subnetID) + if err != nil { + return err + } + + owner, ok := subnetOwner.(*secp256k1fx.OutputOwners) + if !ok { + return fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnetOwner) + } + + controlAddrs := make([]string, len(owner.Addrs)) + for i, controlKeyID := range owner.Addrs { addr, err := s.addrManager.FormatLocalAddress(controlKeyID) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } - controlAddrs = append(controlAddrs, addr) + controlAddrs[i] = addr } response.Subnets[i] = APISubnet{ ID: subnetID, @@ -656,7 +568,7 @@ func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *Ge } } // Include primary network - response.Subnets[len(subnets)] = APISubnet{ + response.Subnets[len(subnetIDs)] = APISubnet{ ID: constants.PrimaryNetworkID, ControlKeys: []string{}, Threshold: avajson.Uint32(0), @@ -768,12 +680,6 @@ func (s *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAssetIDArgs return nil } -/* - ****************************************************** - **************** Get/Sample Validators *************** - ****************************************************** - */ - // GetCurrentValidatorsArgs are the arguments for calling GetCurrentValidators type GetCurrentValidatorsArgs struct { // Subnet we're listing the validators of @@ -1037,128 +943,6 @@ func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidato return nil } -// GetPendingValidatorsArgs are the arguments for calling GetPendingValidators -type GetPendingValidatorsArgs struct { - // Subnet we're getting the pending validators of - // If omitted, defaults to primary network - SubnetID ids.ID `json:"subnetID"` - // NodeIDs of validators to request. If [NodeIDs] - // is empty, it fetches all pending validators. If - // some requested nodeIDs are not pending validators, - // they are omitted from the response. - NodeIDs []ids.NodeID `json:"nodeIDs"` -} - -// GetPendingValidatorsReply are the results from calling GetPendingValidators. -type GetPendingValidatorsReply struct { - Validators []interface{} `json:"validators"` - Delegators []interface{} `json:"delegators"` -} - -// GetPendingValidators returns the lists of pending validators and delegators. -func (s *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { - s.vm.ctx.Log.Debug("API called", - zap.String("service", "platform"), - zap.String("method", "getPendingValidators"), - ) - - reply.Validators = []interface{}{} - reply.Delegators = []interface{}{} - - // Create set of nodeIDs - nodeIDs := set.Of(args.NodeIDs...) - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - numNodeIDs := nodeIDs.Len() - targetStakers := make([]*state.Staker, 0, numNodeIDs) - if numNodeIDs == 0 { // Include all nodes - pendingStakerIterator, err := s.vm.state.GetPendingStakerIterator() - if err != nil { - return err - } - for pendingStakerIterator.Next() { // Iterates in order of increasing stop time - staker := pendingStakerIterator.Value() - if args.SubnetID != staker.SubnetID { - continue - } - targetStakers = append(targetStakers, staker) - } - pendingStakerIterator.Release() - } else { - for nodeID := range nodeIDs { - staker, err := s.vm.state.GetPendingValidator(args.SubnetID, nodeID) - switch err { - case nil: - case database.ErrNotFound: - // nothing to do, continue - continue - default: - return err - } - targetStakers = append(targetStakers, staker) - - delegatorsIt, err := s.vm.state.GetPendingDelegatorIterator(args.SubnetID, nodeID) - if err != nil { - return err - } - for delegatorsIt.Next() { - staker := delegatorsIt.Value() - targetStakers = append(targetStakers, staker) - } - delegatorsIt.Release() - } - } - - for _, pendingStaker := range targetStakers { - nodeID := pendingStaker.NodeID - weight := avajson.Uint64(pendingStaker.Weight) - apiStaker := platformapi.Staker{ - TxID: pendingStaker.TxID, - NodeID: nodeID, - StartTime: avajson.Uint64(pendingStaker.StartTime.Unix()), - EndTime: avajson.Uint64(pendingStaker.EndTime.Unix()), - Weight: weight, - StakeAmount: &weight, - } - - switch pendingStaker.Priority { - case txs.PrimaryNetworkValidatorPendingPriority, txs.SubnetPermissionlessValidatorPendingPriority: - attr, err := s.loadStakerTxAttributes(pendingStaker.TxID) - if err != nil { - return err - } - - shares := attr.shares - delegationFee := avajson.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) - - connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) - vdr := platformapi.PermissionlessValidator{ - Staker: apiStaker, - DelegationFee: delegationFee, - Connected: connected, - Signer: attr.proofOfPossession, - } - reply.Validators = append(reply.Validators, vdr) - - case txs.PrimaryNetworkDelegatorApricotPendingPriority, txs.PrimaryNetworkDelegatorBanffPendingPriority, txs.SubnetPermissionlessDelegatorPendingPriority: - reply.Delegators = append(reply.Delegators, apiStaker) - - case txs.SubnetPermissionedValidatorPendingPriority: - connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) - reply.Validators = append(reply.Validators, platformapi.PermissionedValidator{ - Staker: apiStaker, - Connected: connected, - }) - - default: - return fmt.Errorf("unexpected staker priority %d", pendingStaker.Priority) - } - } - return nil -} - // GetCurrentSupplyArgs are the arguments for calling GetCurrentSupply type GetCurrentSupplyArgs struct { SubnetID ids.ID `json:"subnetID"` @@ -1233,918 +1017,136 @@ func (s *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, return nil } -/* - ****************************************************** - ************ Add Validators to Subnets *************** - ****************************************************** - */ +// GetBlockchainStatusArgs is the arguments for calling GetBlockchainStatus +// [BlockchainID] is the ID of or an alias of the blockchain to get the status of. +type GetBlockchainStatusArgs struct { + BlockchainID string `json:"blockchainID"` +} -// AddValidatorArgs are the arguments to AddValidator -type AddValidatorArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - platformapi.Staker - // The address the staking reward, if applicable, will go to - RewardAddress string `json:"rewardAddress"` - DelegationFeeRate avajson.Float32 `json:"delegationFeeRate"` +// GetBlockchainStatusReply is the reply from calling GetBlockchainStatus +// [Status] is the blockchain's status. +type GetBlockchainStatusReply struct { + Status status.BlockchainStatus `json:"status"` } -// AddValidator creates and signs and issues a transaction to add a validator to -// the primary network -func (s *Service) AddValidator(req *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", +// GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. +func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { + s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), - zap.String("method", "addValidator"), + zap.String("method", "getBlockchainStatus"), ) - tx, changeAddr, err := s.buildAddValidatorTx(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - reply.TxID = tx.ID() - reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("couldn't format address: %w", err) + if args.BlockchainID == "" { + return errMissingBlockchainID } - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildAddValidatorTx(args *AddValidatorArgs) (*txs.Tx, ids.ShortID, error) { - now := s.vm.clock.Time() - minAddStakerTime := now.Add(minAddStakerDelay) - minAddStakerUnix := avajson.Uint64(minAddStakerTime.Unix()) - maxAddStakerTime := now.Add(executor.MaxFutureStartTime) - maxAddStakerUnix := avajson.Uint64(maxAddStakerTime.Unix()) + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() - if args.StartTime == 0 { - args.StartTime = minAddStakerUnix - } + // if its aliased then vm created this chain. + if aliasedID, err := s.vm.Chains.Lookup(args.BlockchainID); err == nil { + if s.nodeValidates(aliasedID) { + reply.Status = status.Validating + return nil + } - switch { - case args.RewardAddress == "": - return nil, ids.ShortEmpty, errNoRewardAddress - case args.StartTime < minAddStakerUnix: - return nil, ids.ShortEmpty, errStartTimeTooSoon - case args.StartTime > maxAddStakerUnix: - return nil, ids.ShortEmpty, errStartTimeTooLate - case args.DelegationFeeRate < 0 || args.DelegationFeeRate > 100: - return nil, ids.ShortEmpty, errInvalidDelegationRate + reply.Status = status.Syncing + return nil } - // Parse the node ID - var nodeID ids.NodeID - if args.NodeID == ids.EmptyNodeID { // If ID unspecified, use this node's ID - nodeID = s.vm.ctx.NodeID - } else { - nodeID = args.NodeID + blockchainID, err := ids.FromString(args.BlockchainID) + if err != nil { + return fmt.Errorf("problem parsing blockchainID %q: %w", args.BlockchainID, err) } - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) + ctx := r.Context() + lastAcceptedID, err := s.vm.LastAccepted(ctx) if err != nil { - return nil, ids.ShortEmpty, err + return fmt.Errorf("problem loading last accepted ID: %w", err) } - // Parse the reward address - rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) + exists, err := s.chainExists(ctx, lastAcceptedID, blockchainID) if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("problem while parsing reward address: %w", err) + return fmt.Errorf("problem looking up blockchain: %w", err) + } + if exists { + reply.Status = status.Created + return nil } - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) + preferredBlkID := s.vm.manager.Preferred() + preferred, err := s.chainExists(ctx, preferredBlkID, blockchainID) if err != nil { - return nil, ids.ShortEmpty, err + return fmt.Errorf("problem looking up blockchain: %w", err) } - defer user.Close() + if preferred { + reply.Status = status.Preferred + } else { + reply.Status = status.UnknownChain + } + return nil +} - // Get the user's keys - privKeys, err := keystore.GetKeychain(user, fromAddrs) +func (s *Service) nodeValidates(blockchainID ids.ID) bool { + chainTx, _, err := s.vm.state.GetTx(blockchainID) if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) + return false } - // Parse the change address. - if len(privKeys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys + chain, ok := chainTx.Unsigned.(*txs.CreateChainTx) + if !ok { + return false } - changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) + + _, isValidator := s.vm.Validators.GetValidator(chain.SubnetID, s.vm.ctx.NodeID) + return isValidator +} + +func (s *Service) chainExists(ctx context.Context, blockID ids.ID, chainID ids.ID) (bool, error) { + state, ok := s.vm.manager.GetState(blockID) + if !ok { + block, err := s.vm.GetBlock(ctx, blockID) if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) + return false, err + } + state, ok = s.vm.manager.GetState(block.Parent()) + if !ok { + return false, errMissingDecisionBlock } } - // TODO: Remove after StakeAmount is removed from [args]. - if args.StakeAmount != nil { - args.Weight = *args.StakeAmount + tx, _, err := state.GetTx(chainID) + if err == database.ErrNotFound { + return false, nil } - - // Create the transaction - tx, err := s.vm.txBuilder.NewAddValidatorTx( - uint64(args.Weight), // Stake amount - uint64(args.StartTime), // Start time - uint64(args.EndTime), // End time - nodeID, // Node ID - rewardAddress, // Reward Address - uint32(10000*args.DelegationFeeRate), // Shares - privKeys.Keys, // Keys providing the staked tokens - changeAddr, - nil, - ) if err != nil { - return nil, ids.ShortEmpty, err + return false, err } + _, ok = tx.Unsigned.(*txs.CreateChainTx) + return ok, nil +} - return tx, changeAddr, user.Close() +// ValidatedByArgs is the arguments for calling ValidatedBy +type ValidatedByArgs struct { + // ValidatedBy returns the ID of the Subnet validating the blockchain with this ID + BlockchainID ids.ID `json:"blockchainID"` } -// AddDelegatorArgs are the arguments to AddDelegator -type AddDelegatorArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - platformapi.Staker - RewardAddress string `json:"rewardAddress"` +// ValidatedByResponse is the reply from calling ValidatedBy +type ValidatedByResponse struct { + // ID of the Subnet validating the specified blockchain + SubnetID ids.ID `json:"subnetID"` } -// AddDelegator creates and signs and issues a transaction to add a delegator to -// the primary network -func (s *Service) AddDelegator(req *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", +// ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] +func (s *Service) ValidatedBy(r *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { + s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), - zap.String("method", "addDelegator"), + zap.String("method", "validatedBy"), ) - tx, changeAddr, err := s.buildAddDelegatorTx(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - reply.TxID = tx.ID() - reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("couldn't format address: %w", err) - } - - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildAddDelegatorTx(args *AddDelegatorArgs) (*txs.Tx, ids.ShortID, error) { - now := s.vm.clock.Time() - minAddStakerTime := now.Add(minAddStakerDelay) - minAddStakerUnix := avajson.Uint64(minAddStakerTime.Unix()) - maxAddStakerTime := now.Add(executor.MaxFutureStartTime) - maxAddStakerUnix := avajson.Uint64(maxAddStakerTime.Unix()) - - if args.StartTime == 0 { - args.StartTime = minAddStakerUnix - } - - switch { - case args.RewardAddress == "": - return nil, ids.ShortEmpty, errNoRewardAddress - case args.StartTime < minAddStakerUnix: - return nil, ids.ShortEmpty, errStartTimeTooSoon - case args.StartTime > maxAddStakerUnix: - return nil, ids.ShortEmpty, errStartTimeTooLate - } - - var nodeID ids.NodeID - if args.NodeID == ids.EmptyNodeID { // If ID unspecified, use this node's ID - nodeID = s.vm.ctx.NodeID - } else { - nodeID = args.NodeID - } - - // Parse the reward address - rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("problem parsing 'rewardAddress': %w", err) - } - - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) - if err != nil { - return nil, ids.ShortEmpty, err - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return nil, ids.ShortEmpty, err - } - defer user.Close() - - privKeys, err := keystore.GetKeychain(user, fromAddrs) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - // Parse the change address. Assumes that if the user has no keys, - // this operation will fail so the change address can be anything. - if len(privKeys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys - } - changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) - } - } - - // TODO: Remove after StakeAmount is removed from [args]. - if args.StakeAmount != nil { - args.Weight = *args.StakeAmount - } - - // Create the transaction - tx, err := s.vm.txBuilder.NewAddDelegatorTx( - uint64(args.Weight), // Stake amount - uint64(args.StartTime), // Start time - uint64(args.EndTime), // End time - nodeID, // Node ID - rewardAddress, // Reward Address - privKeys.Keys, // Private keys - changeAddr, // Change address - nil, // Memo - ) - if err != nil { - return nil, ids.ShortEmpty, err - } - - return tx, changeAddr, user.Close() -} - -// AddSubnetValidatorArgs are the arguments to AddSubnetValidator -type AddSubnetValidatorArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - platformapi.Staker - // ID of subnet to validate - SubnetID string `json:"subnetID"` -} - -// AddSubnetValidator creates and signs and issues a transaction to add a -// validator to a subnet other than the primary network -func (s *Service) AddSubnetValidator(req *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "addSubnetValidator"), - ) - - tx, changeAddr, err := s.buildAddSubnetValidatorTx(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("couldn't format address: %w", err) - } - - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildAddSubnetValidatorTx(args *AddSubnetValidatorArgs) (*txs.Tx, ids.ShortID, error) { - now := s.vm.clock.Time() - minAddStakerTime := now.Add(minAddStakerDelay) - minAddStakerUnix := avajson.Uint64(minAddStakerTime.Unix()) - maxAddStakerTime := now.Add(executor.MaxFutureStartTime) - maxAddStakerUnix := avajson.Uint64(maxAddStakerTime.Unix()) - - if args.StartTime == 0 { - args.StartTime = minAddStakerUnix - } - - switch { - case args.SubnetID == "": - return nil, ids.ShortEmpty, errNoSubnetID - case args.StartTime < minAddStakerUnix: - return nil, ids.ShortEmpty, errStartTimeTooSoon - case args.StartTime > maxAddStakerUnix: - return nil, ids.ShortEmpty, errStartTimeTooLate - } - - // Parse the subnet ID - subnetID, err := ids.FromString(args.SubnetID) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("problem parsing subnetID %q: %w", args.SubnetID, err) - } - if subnetID == constants.PrimaryNetworkID { - return nil, ids.ShortEmpty, errNamedSubnetCantBePrimary - } - - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) - if err != nil { - return nil, ids.ShortEmpty, err - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return nil, ids.ShortEmpty, err - } - defer user.Close() - - keys, err := keystore.GetKeychain(user, fromAddrs) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - // Parse the change address. - if len(keys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys - } - changeAddr := keys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) - } - } - - // TODO: Remove after StakeAmount is removed from [args]. - if args.StakeAmount != nil { - args.Weight = *args.StakeAmount - } - - // Create the transaction - tx, err := s.vm.txBuilder.NewAddSubnetValidatorTx( - uint64(args.Weight), // Stake amount - uint64(args.StartTime), // Start time - uint64(args.EndTime), // End time - args.NodeID, // Node ID - subnetID, // Subnet ID - keys.Keys, - changeAddr, - nil, - ) - if err != nil { - return nil, ids.ShortEmpty, err - } - - return tx, changeAddr, user.Close() -} - -// CreateSubnetArgs are the arguments to CreateSubnet -type CreateSubnetArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - // The ID member of APISubnet is ignored - APISubnet -} - -// CreateSubnet creates and signs and issues a transaction to create a new -// subnet -func (s *Service) CreateSubnet(req *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "createSubnet"), - ) - - tx, changeAddr, err := s.buildCreateSubnetTx(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("couldn't format address: %w", err) - } - - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildCreateSubnetTx(args *CreateSubnetArgs) (*txs.Tx, ids.ShortID, error) { - // Parse the control keys - controlKeys, err := avax.ParseServiceAddresses(s.addrManager, args.ControlKeys) - if err != nil { - return nil, ids.ShortEmpty, err - } - - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) - if err != nil { - return nil, ids.ShortEmpty, err - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return nil, ids.ShortEmpty, err - } - defer user.Close() - - privKeys, err := keystore.GetKeychain(user, fromAddrs) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - // Parse the change address. Assumes that if the user has no keys, - // this operation will fail so the change address can be anything. - if len(privKeys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys - } - changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) - } - } - - // Create the transaction - tx, err := s.vm.txBuilder.NewCreateSubnetTx( - uint32(args.Threshold), // Threshold - controlKeys.List(), // Control Addresses - privKeys.Keys, // Private keys - changeAddr, - nil, - ) - if err != nil { - return nil, ids.ShortEmpty, err - } - - return tx, changeAddr, user.Close() -} - -// ExportAVAXArgs are the arguments to ExportAVAX -type ExportAVAXArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - - // Amount of AVAX to send - Amount avajson.Uint64 `json:"amount"` - - // Chain the funds are going to. Optional. Used if To address does not include the chainID. - TargetChain string `json:"targetChain"` - - // ID of the address that will receive the AVAX. This address may include the - // chainID, which is used to determine what the destination chain is. - To string `json:"to"` -} - -// ExportAVAX exports AVAX from the P-Chain to the X-Chain -// It must be imported on the X-Chain to complete the transfer -func (s *Service) ExportAVAX(req *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "exportAVAX"), - ) - - tx, changeAddr, err := s.buildExportAVAX(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("couldn't format address: %w", err) - } - - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildExportAVAX(args *ExportAVAXArgs) (*txs.Tx, ids.ShortID, error) { - if args.Amount == 0 { - return nil, ids.ShortEmpty, errNoAmount - } - - // Get the chainID and parse the to address - chainID, to, err := s.addrManager.ParseAddress(args.To) - if err != nil { - chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) - if err != nil { - return nil, ids.ShortEmpty, err - } - to, err = ids.ShortFromString(args.To) - if err != nil { - return nil, ids.ShortEmpty, err - } - } - - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) - if err != nil { - return nil, ids.ShortEmpty, err - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return nil, ids.ShortEmpty, err - } - defer user.Close() - - privKeys, err := keystore.GetKeychain(user, fromAddrs) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - // Parse the change address. Assumes that if the user has no keys, - // this operation will fail so the change address can be anything. - if len(privKeys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys - } - changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) - } - } - - // Create the transaction - tx, err := s.vm.txBuilder.NewExportTx( - uint64(args.Amount), // Amount - chainID, // ID of the chain to send the funds to - to, // Address - privKeys.Keys, // Private keys - changeAddr, // Change address - nil, - ) - if err != nil { - return nil, ids.ShortEmpty, err - } - - return tx, changeAddr, user.Close() -} - -// ImportAVAXArgs are the arguments to ImportAVAX -type ImportAVAXArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - - // Chain the funds are coming from - SourceChain string `json:"sourceChain"` - - // The address that will receive the imported funds - To string `json:"to"` -} - -// ImportAVAX issues a transaction to import AVAX from the X-chain. The AVAX -// must have already been exported from the X-Chain. -func (s *Service) ImportAVAX(req *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "importAVAX"), - ) - - tx, changeAddr, err := s.buildImportAVAXTx(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("problem formatting address: %w", err) - } - - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildImportAVAXTx(args *ImportAVAXArgs) (*txs.Tx, ids.ShortID, error) { - // Parse the sourceCHain - chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) - } - - // Parse the to address - to, err := avax.ParseServiceAddress(s.addrManager, args.To) - if err != nil { // Parse address - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) - } - - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) - if err != nil { - return nil, ids.ShortEmpty, err - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return nil, ids.ShortEmpty, err - } - defer user.Close() - - privKeys, err := keystore.GetKeychain(user, fromAddrs) - if err != nil { // Get keys - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get keys controlled by the user: %w", err) - } - - // Parse the change address. Assumes that if the user has no keys, - // this operation will fail so the change address can be anything. - if len(privKeys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys - } - changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) - } - } - - tx, err := s.vm.txBuilder.NewImportTx( - chainID, - to, - privKeys.Keys, - changeAddr, - nil, - ) - if err != nil { - return nil, ids.ShortEmpty, err - } - - return tx, changeAddr, user.Close() -} - -/* - ****************************************************** - ******** Create/get status of a blockchain *********** - ****************************************************** - */ - -// CreateBlockchainArgs is the arguments for calling CreateBlockchain -type CreateBlockchainArgs struct { - // User, password, from addrs, change addr - api.JSONSpendHeader - // ID of Subnet that validates the new blockchain - SubnetID ids.ID `json:"subnetID"` - // ID of the VM the new blockchain is running - VMID string `json:"vmID"` - // IDs of the FXs the VM is running - FxIDs []string `json:"fxIDs"` - // Human-readable name for the new blockchain, not necessarily unique - Name string `json:"name"` - // Genesis state of the blockchain being created - GenesisData string `json:"genesisData"` - // Encoding format to use for genesis data - Encoding formatting.Encoding `json:"encoding"` -} - -// CreateBlockchain issues a transaction to create a new blockchain -func (s *Service) CreateBlockchain(req *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { - s.vm.ctx.Log.Warn("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "createBlockchain"), - ) - - tx, changeAddr, err := s.buildCreateBlockchainTx(args) - if err != nil { - return fmt.Errorf("couldn't create tx: %w", err) - } - - response.TxID = tx.ID() - response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) - if err != nil { - return fmt.Errorf("problem formatting address: %w", err) - } - - return s.vm.issueTx(req.Context(), tx) -} - -func (s *Service) buildCreateBlockchainTx(args *CreateBlockchainArgs) (*txs.Tx, ids.ShortID, error) { - switch { - case args.Name == "": - return nil, ids.ShortEmpty, errMissingName - case args.VMID == "": - return nil, ids.ShortEmpty, errMissingVMID - } - - genesisBytes, err := formatting.Decode(args.Encoding, args.GenesisData) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("problem parsing genesis data: %w", err) - } - - vmID, err := s.vm.Chains.LookupVM(args.VMID) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("no VM with ID '%s' found", args.VMID) - } - - fxIDs := []ids.ID(nil) - for _, fxIDStr := range args.FxIDs { - fxID, err := s.vm.Chains.LookupVM(fxIDStr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("no FX with ID '%s' found", fxIDStr) - } - fxIDs = append(fxIDs, fxID) - } - // If creating AVM instance, use secp256k1fx - // TODO: Document FXs and have user specify them in API call - fxIDsSet := set.Of(fxIDs...) - if vmID == constants.AVMID && !fxIDsSet.Contains(secp256k1fx.ID) { - fxIDs = append(fxIDs, secp256k1fx.ID) - } - - if args.SubnetID == constants.PrimaryNetworkID { - return nil, ids.ShortEmpty, txs.ErrCantValidatePrimaryNetwork - } - - // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) - if err != nil { - return nil, ids.ShortEmpty, err - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) - if err != nil { - return nil, ids.ShortEmpty, err - } - defer user.Close() - - keys, err := keystore.GetKeychain(user, fromAddrs) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't get addresses controlled by the user: %w", err) - } - - // Parse the change address. Assumes that if the user has no keys, - // this operation will fail so the change address can be anything. - if len(keys.Keys) == 0 { - return nil, ids.ShortEmpty, errNoKeys - } - changeAddr := keys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user - if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) - if err != nil { - return nil, ids.ShortEmpty, fmt.Errorf("couldn't parse changeAddr: %w", err) - } - } - - // Create the transaction - tx, err := s.vm.txBuilder.NewCreateChainTx( - args.SubnetID, - genesisBytes, - vmID, - fxIDs, - args.Name, - keys.Keys, - changeAddr, // Change address - nil, - ) - if err != nil { - return nil, ids.ShortEmpty, err - } - - return tx, changeAddr, user.Close() -} - -// GetBlockchainStatusArgs is the arguments for calling GetBlockchainStatus -// [BlockchainID] is the ID of or an alias of the blockchain to get the status of. -type GetBlockchainStatusArgs struct { - BlockchainID string `json:"blockchainID"` -} - -// GetBlockchainStatusReply is the reply from calling GetBlockchainStatus -// [Status] is the blockchain's status. -type GetBlockchainStatusReply struct { - Status status.BlockchainStatus `json:"status"` -} - -// GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. -func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - s.vm.ctx.Log.Debug("API called", - zap.String("service", "platform"), - zap.String("method", "getBlockchainStatus"), - ) - - if args.BlockchainID == "" { - return errMissingBlockchainID - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - // if its aliased then vm created this chain. - if aliasedID, err := s.vm.Chains.Lookup(args.BlockchainID); err == nil { - if s.nodeValidates(aliasedID) { - reply.Status = status.Validating - return nil - } - - reply.Status = status.Syncing - return nil - } - - blockchainID, err := ids.FromString(args.BlockchainID) - if err != nil { - return fmt.Errorf("problem parsing blockchainID %q: %w", args.BlockchainID, err) - } - - ctx := r.Context() - lastAcceptedID, err := s.vm.LastAccepted(ctx) - if err != nil { - return fmt.Errorf("problem loading last accepted ID: %w", err) - } - - exists, err := s.chainExists(ctx, lastAcceptedID, blockchainID) - if err != nil { - return fmt.Errorf("problem looking up blockchain: %w", err) - } - if exists { - reply.Status = status.Created - return nil - } - - preferredBlkID := s.vm.manager.Preferred() - preferred, err := s.chainExists(ctx, preferredBlkID, blockchainID) - if err != nil { - return fmt.Errorf("problem looking up blockchain: %w", err) - } - if preferred { - reply.Status = status.Preferred - } else { - reply.Status = status.UnknownChain - } - return nil -} - -func (s *Service) nodeValidates(blockchainID ids.ID) bool { - chainTx, _, err := s.vm.state.GetTx(blockchainID) - if err != nil { - return false - } - - chain, ok := chainTx.Unsigned.(*txs.CreateChainTx) - if !ok { - return false - } - - _, isValidator := s.vm.Validators.GetValidator(chain.SubnetID, s.vm.ctx.NodeID) - return isValidator -} - -func (s *Service) chainExists(ctx context.Context, blockID ids.ID, chainID ids.ID) (bool, error) { - state, ok := s.vm.manager.GetState(blockID) - if !ok { - block, err := s.vm.GetBlock(ctx, blockID) - if err != nil { - return false, err - } - state, ok = s.vm.manager.GetState(block.Parent()) - if !ok { - return false, errMissingDecisionBlock - } - } - - tx, _, err := state.GetTx(chainID) - if err == database.ErrNotFound { - return false, nil - } - if err != nil { - return false, err - } - _, ok = tx.Unsigned.(*txs.CreateChainTx) - return ok, nil -} - -// ValidatedByArgs is the arguments for calling ValidatedBy -type ValidatedByArgs struct { - // ValidatedBy returns the ID of the Subnet validating the blockchain with this ID - BlockchainID ids.ID `json:"blockchainID"` -} - -// ValidatedByResponse is the reply from calling ValidatedBy -type ValidatedByResponse struct { - // ID of the Subnet validating the specified blockchain - SubnetID ids.ID `json:"subnetID"` -} - -// ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] -func (s *Service) ValidatedBy(r *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { - s.vm.ctx.Log.Debug("API called", - zap.String("service", "platform"), - zap.String("method", "validatedBy"), - ) - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() var err error ctx := r.Context() @@ -2231,14 +1233,13 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc s.vm.ctx.Lock.Lock() defer s.vm.ctx.Lock.Unlock() - subnets, err := s.vm.state.GetSubnets() + subnetIDs, err := s.vm.state.GetSubnetIDs() if err != nil { return fmt.Errorf("couldn't retrieve subnets: %w", err) } response.Blockchains = []APIBlockchain{} - for _, subnet := range subnets { - subnetID := subnet.ID() + for _, subnetID := range subnetIDs { chains, err := s.vm.state.GetChains(subnetID) if err != nil { return fmt.Errorf( @@ -2284,7 +1285,7 @@ func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBloc return nil } -func (s *Service) IssueTx(req *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { +func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), zap.String("method", "issueTx"), @@ -2299,7 +1300,7 @@ func (s *Service) IssueTx(req *http.Request, args *api.FormattedTx, response *ap return fmt.Errorf("couldn't parse tx: %w", err) } - if err := s.vm.issueTx(req.Context(), tx); err != nil { + if err := s.vm.issueTxFromRPC(tx); err != nil { return fmt.Errorf("couldn't issue tx: %w", err) } @@ -2596,62 +1597,6 @@ func (s *Service) GetTotalStake(_ *http.Request, args *GetTotalStakeArgs, reply return nil } -// GetMaxStakeAmountArgs is the request for calling GetMaxStakeAmount. -type GetMaxStakeAmountArgs struct { - SubnetID ids.ID `json:"subnetID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime avajson.Uint64 `json:"startTime"` - EndTime avajson.Uint64 `json:"endTime"` -} - -// GetMaxStakeAmountReply is the response from calling GetMaxStakeAmount. -type GetMaxStakeAmountReply struct { - Amount avajson.Uint64 `json:"amount"` -} - -// GetMaxStakeAmount returns the maximum amount of nAVAX staking to the named -// node during the time period. -func (s *Service) GetMaxStakeAmount(_ *http.Request, args *GetMaxStakeAmountArgs, reply *GetMaxStakeAmountReply) error { - s.vm.ctx.Log.Debug("deprecated API called", - zap.String("service", "platform"), - zap.String("method", "getMaxStakeAmount"), - ) - - startTime := time.Unix(int64(args.StartTime), 0) - endTime := time.Unix(int64(args.EndTime), 0) - - if startTime.After(endTime) { - return errStartAfterEndTime - } - - s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() - - now := s.vm.state.GetTimestamp() - if startTime.Before(now) { - return errStartTimeInThePast - } - - staker, err := executor.GetValidator(s.vm.state, args.SubnetID, args.NodeID) - if err == database.ErrNotFound { - return nil - } - if err != nil { - return err - } - - if startTime.After(staker.EndTime) { - return nil - } - if endTime.Before(staker.StartTime) { - return nil - } - - maxStakeAmount, err := executor.GetMaxWeight(s.vm.state, staker, startTime, endTime) - reply.Amount = avajson.Uint64(maxStakeAmount) - return err -} - // GetRewardUTXOsReply defines the GetRewardUTXOs replies returned from the API type GetRewardUTXOsReply struct { // Number of UTXOs returned @@ -2735,7 +1680,7 @@ func (v *GetValidatorsAtReply) MarshalJSON() ([]byte, error) { } if vdr.PublicKey != nil { - pk, err := formatting.Encode(formatting.HexNC, bls.PublicKeyToBytes(vdr.PublicKey)) + pk, err := formatting.Encode(formatting.HexNC, bls.PublicKeyToCompressedBytes(vdr.PublicKey)) if err != nil { return nil, err } @@ -2770,7 +1715,7 @@ func (v *GetValidatorsAtReply) UnmarshalJSON(b []byte) error { if err != nil { return err } - vdr.PublicKey, err = bls.PublicKeyFromBytes(pkBytes) + vdr.PublicKey, err = bls.PublicKeyFromCompressedBytes(pkBytes) if err != nil { return err } diff --git a/vms/platformvm/service.md b/vms/platformvm/service.md new file mode 100644 index 000000000000..b6005a8f1569 --- /dev/null +++ b/vms/platformvm/service.md @@ -0,0 +1,1983 @@ +--- +tags: [P-Chain, Platform Chain, AvalancheGo APIs] +description: This page is an overview of the P-Chain API associated with AvalancheGo. +sidebar_label: API +pagination_label: P-Chain Transaction Format +--- + +# Platform Chain API + +This API allows clients to interact with the +[P-Chain](/learn/avalanche/avalanche-platform.md#p-chain), which +maintains Avalanche’s [validator](/nodes/validate/how-to-stake#validators) set and handles +blockchain creation. + +## Endpoint + +```sh +/ext/bc/P +``` + +## Format + +This API uses the `json 2.0` RPC format. + +## Methods + +### `platform.exportKey` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning + +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). + +::: + +Get the private key that controls a given address. + +**Signature:** + +```sh +platform.exportKey({ + username: string, + password: string, + address: string +}) -> {privateKey: string} +``` + +- `username` is the user that controls `address`. +- `password` is `username`‘s password. +- `privateKey` is the string representation of the private key that controls `address`. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"platform.exportKey", + "params" :{ + "username" :"myUsername", + "password": "myPassword", + "address": "P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "privateKey": "PrivateKey-Lf49kAJw3CbaL783vmbeAJvhscJqC7vi5yBYLxw2XfbzNS5RS" + } +} +``` + +### `platform.getBalance` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Get the balance of AVAX controlled by a given address. + +**Signature:** + +```sh +platform.getBalance({ + addresses: []string +}) -> { + balances: string -> int, + unlockeds: string -> int, + lockedStakeables: string -> int, + lockedNotStakeables: string -> int, + utxoIDs: []{ + txID: string, + outputIndex: int + } +} +``` + +- `addresses` are the addresses to get the balance of. +- `balances` is a map from assetID to the total balance. +- `unlockeds` is a map from assetID to the unlocked balance. +- `lockedStakeables` is a map from assetID to the locked stakeable balance. +- `lockedNotStakeables` is a map from assetID to the locked and not stakeable balance. +- `utxoIDs` are the IDs of the UTXOs that reference `address`. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" : 1, + "method" :"platform.getBalance", + "params" :{ + "addresses":["P-custom18jma8ppw3nhx5r4ap8clazz0dps7rv5u9xde7p"] + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "balance": "30000000000000000", + "unlocked": "20000000000000000", + "lockedStakeable": "10000000000000000", + "lockedNotStakeable": "0", + "balances": { + "BUuypiq2wyuLMvyhzFXcPyxPMCgSp7eeDohhQRqTChoBjKziC": "30000000000000000" + }, + "unlockeds": { + "BUuypiq2wyuLMvyhzFXcPyxPMCgSp7eeDohhQRqTChoBjKziC": "20000000000000000" + }, + "lockedStakeables": { + "BUuypiq2wyuLMvyhzFXcPyxPMCgSp7eeDohhQRqTChoBjKziC": "10000000000000000" + }, + "lockedNotStakeables": {}, + "utxoIDs": [ + { + "txID": "11111111111111111111111111111111LpoYY", + "outputIndex": 1 + }, + { + "txID": "11111111111111111111111111111111LpoYY", + "outputIndex": 0 + } + ] + }, + "id": 1 +} +``` + +### `platform.getBlock` + +Get a block by its ID. + +**Signature:** + +```sh +platform.getBlock({ + blockID: string + encoding: string // optional +}) -> { + block: string, + encoding: string +} +``` + +**Request:** + +- `blockID` is the block ID. It should be in cb58 format. +- `encoding` is the encoding format to use. Can be either `hex` or `json`. Defaults to `hex`. + +**Response:** + +- `block` is the block encoded to `encoding`. +- `encoding` is the `encoding`. + +#### Hex Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getBlock", + "params": { + "blockID": "d7WYmb8VeZNHsny3EJCwMm6QA37s1EHwMxw1Y71V3FqPZ5EFG", + "encoding": "hex" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "block": "0x00000000000309473dc99a0851a29174d84e522da8ccb1a56ac23f7b0ba79f80acce34cf576900000000000f4241000000010000001200000001000000000000000000000000000000000000000000000000000000000000000000000000000000011c4c57e1bcb3c567f9f03caa75563502d1a21393173c06d9d79ea247b20e24800000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000338e0465f0000000100000000000000000427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd6520000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000070000000338d1041f0000000000000000000000010000000195a4467dd8f939554ea4e6501c08294386938cbf000000010000000900000001c79711c4b48dcde205b63603efef7c61773a0eb47efb503fcebe40d21962b7c25ebd734057400a12cce9cf99aceec8462923d5d91fffe1cb908372281ed738580119286dde", + "encoding": "hex" + }, + "id": 1 +} +``` + +#### JSON Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getBlock", + "params": { + "blockID": "d7WYmb8VeZNHsny3EJCwMm6QA37s1EHwMxw1Y71V3FqPZ5EFG", + "encoding": "json" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "block": { + "parentID": "5615di9ytxujackzaXNrVuWQy5y8Yrt8chPCscMr5Ku9YxJ1S", + "height": 1000001, + "txs": [ + { + "unsignedTx": { + "inputs": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [ + { + "txID": "DTqiagiMFdqbNQ62V2Gt1GddTVLkKUk2caGr4pyza9hTtsfta", + "outputIndex": 0, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 13839124063, + "signatureIndices": [0] + } + } + ], + "memo": "0x" + }, + "destinationChain": "2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5", + "exportedOutputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "P-avax1jkjyvlwclyu42n4yuegpczpfgwrf8r9lyj0d3c" + ], + "amount": 13838124063, + "locktime": 0, + "threshold": 1 + } + } + ] + }, + "credentials": [ + { + "signatures": [ + "0xc79711c4b48dcde205b63603efef7c61773a0eb47efb503fcebe40d21962b7c25ebd734057400a12cce9cf99aceec8462923d5d91fffe1cb908372281ed7385801" + ] + } + ] + } + ] + }, + "encoding": "json" + }, + "id": 1 +} +``` + +### `platform.getBlockByHeight` + +Get a block by its height. + +**Signature:** + +```sh +platform.getBlockByHeight({ + height: int + encoding: string // optional +}) -> { + block: string, + encoding: string +} +``` + +**Request:** + +- `height` is the block height. +- `encoding` is the encoding format to use. Can be either `hex` or `json`. Defaults to `hex`. + +**Response:** + +- `block` is the block encoded to `encoding`. +- `encoding` is the `encoding`. + +#### Hex Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getBlockByHeight", + "params": { + "height": 1000001, + "encoding": "hex" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "block": "0x00000000000309473dc99a0851a29174d84e522da8ccb1a56ac23f7b0ba79f80acce34cf576900000000000f4241000000010000001200000001000000000000000000000000000000000000000000000000000000000000000000000000000000011c4c57e1bcb3c567f9f03caa75563502d1a21393173c06d9d79ea247b20e24800000000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000050000000338e0465f0000000100000000000000000427d4b22a2a78bcddd456742caf91b56badbff985ee19aef14573e7343fd6520000000121e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff000000070000000338d1041f0000000000000000000000010000000195a4467dd8f939554ea4e6501c08294386938cbf000000010000000900000001c79711c4b48dcde205b63603efef7c61773a0eb47efb503fcebe40d21962b7c25ebd734057400a12cce9cf99aceec8462923d5d91fffe1cb908372281ed738580119286dde", + "encoding": "hex" + }, + "id": 1 +} +``` + +#### JSON Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getBlockByHeight", + "params": { + "height": 1000001, + "encoding": "json" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "block": { + "parentID": "5615di9ytxujackzaXNrVuWQy5y8Yrt8chPCscMr5Ku9YxJ1S", + "height": 1000001, + "txs": [ + { + "unsignedTx": { + "inputs": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [ + { + "txID": "DTqiagiMFdqbNQ62V2Gt1GddTVLkKUk2caGr4pyza9hTtsfta", + "outputIndex": 0, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 13839124063, + "signatureIndices": [0] + } + } + ], + "memo": "0x" + }, + "destinationChain": "2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5", + "exportedOutputs": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": [ + "P-avax1jkjyvlwclyu42n4yuegpczpfgwrf8r9lyj0d3c" + ], + "amount": 13838124063, + "locktime": 0, + "threshold": 1 + } + } + ] + }, + "credentials": [ + { + "signatures": [ + "0xc79711c4b48dcde205b63603efef7c61773a0eb47efb503fcebe40d21962b7c25ebd734057400a12cce9cf99aceec8462923d5d91fffe1cb908372281ed7385801" + ] + } + ] + } + ] + }, + "encoding": "json" + }, + "id": 1 +} +``` + +### `platform.getBlockchains` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Get all the blockchains that exist (excluding the P-Chain). + +**Signature:** + +```sh +platform.getBlockchains() -> +{ + blockchains: []{ + id: string, + name:string, + subnetID: string, + vmID: string + } +} +``` + +- `blockchains` is all of the blockchains that exists on the Avalanche network. +- `name` is the human-readable name of this blockchain. +- `id` is the blockchain’s ID. +- `subnetID` is the ID of the Subnet that validates this blockchain. +- `vmID` is the ID of the Virtual Machine the blockchain runs. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getBlockchains", + "params": {}, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "blockchains": [ + { + "id": "2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM", + "name": "X-Chain", + "subnetID": "11111111111111111111111111111111LpoYY", + "vmID": "jvYyfQTxGMJLuGWa55kdP2p2zSUYsQ5Raupu4TW34ZAUBAbtq" + }, + { + "id": "2q9e4r6Mu3U68nU1fYjgbR6JvwrRx36CohpAX5UQxse55x1Q5", + "name": "C-Chain", + "subnetID": "11111111111111111111111111111111LpoYY", + "vmID": "mgj786NP7uDwBCcq6YwThhaN8FLyybkCa4zBWTQbNgmK6k9A6" + }, + { + "id": "CqhF97NNugqYLiGaQJ2xckfmkEr8uNeGG5TQbyGcgnZ5ahQwa", + "name": "Simple DAG Payments", + "subnetID": "11111111111111111111111111111111LpoYY", + "vmID": "sqjdyTKUSrQs1YmKDTUbdUhdstSdtRTGRbUn8sqK8B6pkZkz1" + }, + { + "id": "VcqKNBJsYanhVFxGyQE5CyNVYxL3ZFD7cnKptKWeVikJKQkjv", + "name": "Simple Chain Payments", + "subnetID": "11111111111111111111111111111111LpoYY", + "vmID": "sqjchUjzDqDfBPGjfQq2tXW1UCwZTyvzAWHsNzF2cb1eVHt6w" + }, + { + "id": "2SMYrx4Dj6QqCEA3WjnUTYEFSnpqVTwyV3GPNgQqQZbBbFgoJX", + "name": "Simple Timestamp Server", + "subnetID": "11111111111111111111111111111111LpoYY", + "vmID": "tGas3T58KzdjLHhBDMnH2TvrddhqTji5iZAMZ3RXs2NLpSnhH" + }, + { + "id": "KDYHHKjM4yTJTT8H8qPs5KXzE6gQH5TZrmP1qVr1P6qECj3XN", + "name": "My new timestamp", + "subnetID": "2bRCr6B4MiEfSjidDwxDpdCyviwnfUVqB2HGwhm947w9YYqb7r", + "vmID": "tGas3T58KzdjLHhBDMnH2TvrddhqTji5iZAMZ3RXs2NLpSnhH" + }, + { + "id": "2TtHFqEAAJ6b33dromYMqfgavGPF3iCpdG3hwNMiart2aB5QHi", + "name": "My new AVM", + "subnetID": "2bRCr6B4MiEfSjidDwxDpdCyviwnfUVqB2HGwhm947w9YYqb7r", + "vmID": "jvYyfQTxGMJLuGWa55kdP2p2zSUYsQ5Raupu4TW34ZAUBAbtq" + } + ] + }, + "id": 1 +} +``` + +### `platform.getBlockchainStatus` + +Get the status of a blockchain. + +**Signature:** + +```sh +platform.getBlockchainStatus( + { + blockchainID: string + } +) -> {status: string} +``` + +`status` is one of: + +- `Validating`: The blockchain is being validated by this node. +- `Created`: The blockchain exists but isn’t being validated by this node. +- `Preferred`: The blockchain was proposed to be created and is likely to be created but the + transaction isn’t yet accepted. +- `Syncing`: This node is participating in this blockchain as a non-validating node. +- `Unknown`: The blockchain either wasn’t proposed or the proposal to create it isn’t preferred. The + proposal may be resubmitted. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getBlockchainStatus", + "params":{ + "blockchainID":"2NbS4dwGaf2p1MaXb65PrkZdXRwmSX4ZzGnUu7jm3aykgThuZE" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "status": "Created" + }, + "id": 1 +} +``` + +### `platform.getCurrentSupply` + +Returns an upper bound on amount of tokens that exist that can stake the requested Subnet. This is +an upper bound because it does not account for burnt tokens, including transaction fees. + +**Signature:** + +```sh +platform.getCurrentSupply({ + subnetID: string // optional +}) -> {supply: int} +``` + +- `supply` is an upper bound on the number of tokens that exist. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getCurrentSupply", + "params": { + "subnetID": "11111111111111111111111111111111LpoYY" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "supply": "365865167637779183" + }, + "id": 1 +} +``` + +The response in this example indicates that AVAX’s supply is at most 365.865 million. + +### `platform.getCurrentValidators` + +List the current validators of the given Subnet. + +**Signature:** + +```sh +platform.getCurrentValidators({ + subnetID: string, // optional + nodeIDs: string[], // optional +}) -> { + validators: []{ + txID: string, + startTime: string, + endTime: string, + stakeAmount: string, + nodeID: string, + weight: string, + validationRewardOwner: { + locktime: string, + threshold: string, + addresses: string[] + }, + delegationRewardOwner: { + locktime: string, + threshold: string, + addresses: string[] + }, + potentialReward: string, + delegationFee: string, + uptime: string, + connected: bool, + signer: { + publicKey: string, + proofOfPosession: string + }, + delegatorCount: string, + delegatorWeight: string, + delegators: []{ + txID: string, + startTime: string, + endTime: string, + stakeAmount: string, + nodeID: string, + rewardOwner: { + locktime: string, + threshold: string, + addresses: string[] + }, + potentialReward: string, + } + } +} +``` + +- `subnetID` is the Subnet whose current validators are returned. If omitted, returns the current + validators of the Primary Network. +- `nodeIDs` is a list of the NodeIDs of current validators to request. If omitted, all current + validators are returned. If a specified NodeID is not in the set of current validators, it will + not be included in the response. +- `validators`: + - `txID` is the validator transaction. + - `startTime` is the Unix time when the validator starts validating the Subnet. + - `endTime` is the Unix time when the validator stops validating the Subnet. + - `stakeAmount` is the amount of tokens this validator staked. Omitted if `subnetID` is not a PoS + Subnet. + - `nodeID` is the validator’s node ID. + - `weight` is the validator’s weight when sampling validators. Omitted if `subnetID` is a PoS + Subnet. + - `validationRewardOwner` is an `OutputOwners` output which includes `locktime`, `threshold` and + array of `addresses`. Specifies the owner of the potential reward earned from staking. Omitted + if `subnetID` is not a PoS Subnet. + - `delegationRewardOwner` is an `OutputOwners` output which includes `locktime`, `threshold` and + array of `addresses`. Specifies the owner of the potential reward earned from delegations. + Omitted if `subnetID` is not a PoS Subnet. + - `potentialReward` is the potential reward earned from staking. Omitted if `subnetID` is not a + PoS Subnet. + - `delegationFeeRate` is the percent fee this validator charges when others delegate stake to + them. Omitted if `subnetID` is not a PoS Subnet. + - `uptime` is the % of time the queried node has reported the peer as online and validating the + Subnet. Omitted if `subnetID` is not a PoS Subnet. + - `connected` is if the node is connected and tracks the Subnet. + - `signer` is the node's BLS public key and proof of possession. Omitted if the validator doesn't + have a BLS public key. + - `delegatorCount` is the number of delegators on this validator. + Omitted if `subnetID` is not a PoS Subnet. + - `delegatorWeight` is total weight of delegators on this validator. + Omitted if `subnetID` is not a PoS Subnet. + - `delegators` is the list of delegators to this validator. + Omitted if `subnetID` is not a PoS Subnet. + Omitted unless `nodeIDs` specifies a single NodeID. + - `txID` is the delegator transaction. + - `startTime` is the Unix time when the delegator started. + - `endTime` is the Unix time when the delegator stops. + - `stakeAmount` is the amount of nAVAX this delegator staked. + - `nodeID` is the validating node’s node ID. + - `rewardOwner` is an `OutputOwners` output which includes `locktime`, `threshold` and array of + `addresses`. + - `potentialReward` is the potential reward earned from staking + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getCurrentValidators", + "params": { + "nodeIDs": ["NodeID-5mb46qkSBj81k9g9e4VFjGGSbaaSLFRzD"] + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "validators": [ + { + "txID": "2NNkpYTGfTFLSGXJcHtVv6drwVU2cczhmjK2uhvwDyxwsjzZMm", + "startTime": "1600368632", + "endTime": "1602960455", + "stakeAmount": "2000000000000", + "nodeID": "NodeID-5mb46qkSBj81k9g9e4VFjGGSbaaSLFRzD", + "validationRewardOwner": { + "locktime": "0", + "threshold": "1", + "addresses": ["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"] + }, + "delegationRewardOwner": { + "locktime": "0", + "threshold": "1", + "addresses": ["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"] + }, + "potentialReward": "117431493426", + "delegationFee": "10.0000", + "uptime": "0.0000", + "connected": false, + "delegatorCount": "1", + "delegatorWeight": "25000000000", + "delegators": [ + { + "txID": "Bbai8nzGVcyn2VmeYcbS74zfjJLjDacGNVuzuvAQkHn1uWfoV", + "startTime": "1600368523", + "endTime": "1602960342", + "stakeAmount": "25000000000", + "nodeID": "NodeID-5mb46qkSBj81k9g9e4VFjGGSbaaSLFRzD", + "rewardOwner": { + "locktime": "0", + "threshold": "1", + "addresses": ["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"] + }, + "potentialReward": "11743144774" + } + ] + } + ] + }, + "id": 1 +} +``` + +### `platform.getHeight` + +Returns the height of the last accepted block. + +**Signature:** + +```sh +platform.getHeight() -> +{ + height: int, +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getHeight", + "params": {}, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "height": "56" + }, + "id": 1 +} +``` + +### `platform.getMaxStakeAmount` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Returns the maximum amount of nAVAX staking to the named node during a particular time period. + +**Signature:** + +```sh +platform.getMaxStakeAmount( + { + subnetID: string, + nodeID: string, + startTime: int, + endTime: int + } +) -> +{ + amount: uint64 +} +``` + +- `subnetID` is a Buffer or cb58 string representing Subnet +- `nodeID` is a string representing ID of the node whose stake amount is required during the given + duration +- `startTime` is a big number denoting start time of the duration during which stake amount of the + node is required. +- `endTime` is a big number denoting end time of the duration during which stake amount of the node + is required. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getMaxStakeAmount", + "params": { + "subnetID":"11111111111111111111111111111111LpoYY", + "nodeID":"NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", + "startTime": 1644240334, + "endTime": 1644240634 + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "amount": "2000000000000000" + }, + "id": 1 +} +``` + +### `platform.getMinStake` + +Get the minimum amount of tokens required to validate the requested Subnet and the minimum amount of +tokens that can be delegated. + +**Signature:** + +```sh +platform.getMinStake({ + subnetID: string // optional +}) -> +{ + minValidatorStake : uint64, + minDelegatorStake : uint64 +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"platform.getMinStake", + "params": { + "subnetID":"11111111111111111111111111111111LpoYY" + }, +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "minValidatorStake": "2000000000000", + "minDelegatorStake": "25000000000" + }, + "id": 1 +} +``` + +### `platform.getPendingValidators` + +List the validators in the pending validator set of the specified Subnet. Each validator is not +currently validating the Subnet but will in the future. + +**Signature:** + +```sh +platform.getPendingValidators({ + subnetID: string, // optional + nodeIDs: string[], // optional +}) -> { + validators: []{ + txID: string, + startTime: string, + endTime: string, + stakeAmount: string, + nodeID: string, + delegationFee: string, + connected: bool, + signer: { + publicKey: string, + proofOfPosession: string + }, + weight: string, + }, + delegators: []{ + txID: string, + startTime: string, + endTime: string, + stakeAmount: string, + nodeID: string + } +} +``` + +- `subnetID` is the Subnet whose current validators are returned. If omitted, returns the current + validators of the Primary Network. +- `nodeIDs` is a list of the NodeIDs of pending validators to request. If omitted, all pending + validators are returned. If a specified NodeID is not in the set of pending validators, it will + not be included in the response. +- `validators`: + - `txID` is the validator transaction. + - `startTime` is the Unix time when the validator starts validating the Subnet. + - `endTime` is the Unix time when the validator stops validating the Subnet. + - `stakeAmount` is the amount of tokens this validator staked. Omitted if `subnetID` is not a PoS + Subnet. + - `nodeID` is the validator’s node ID. + - `connected` if the node is connected and tracks the Subnet. + - `signer` is the node's BLS public key and proof of possession. Omitted if the validator doesn't + have a BLS public key. + - `weight` is the validator’s weight when sampling validators. Omitted if `subnetID` is a PoS + Subnet. +- `delegators`: + - `txID` is the delegator transaction. + - `startTime` is the Unix time when the delegator starts. + - `endTime` is the Unix time when the delegator stops. + - `stakeAmount` is the amount of tokens this delegator staked. + - `nodeID` is the validating node’s node ID. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getPendingValidators", + "params": {}, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "validators": [ + { + "txID": "2NNkpYTGfTFLSGXJcHtVv6drwVU2cczhmjK2uhvwDyxwsjzZMm", + "startTime": "1600368632", + "endTime": "1602960455", + "stakeAmount": "200000000000", + "nodeID": "NodeID-5mb46qkSBj81k9g9e4VFjGGSbaaSLFRzD", + "delegationFee": "10.0000", + "connected": false + } + ], + "delegators": [ + { + "txID": "Bbai8nzGVcyn2VmeYcbS74zfjJLjDacGNVuzuvAQkHn1uWfoV", + "startTime": "1600368523", + "endTime": "1602960342", + "stakeAmount": "20000000000", + "nodeID": "NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg" + } + ] + }, + "id": 1 +} +``` + +### `platform.getRewardUTXOs` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Returns the UTXOs that were rewarded after the provided transaction's staking or delegation period +ended. + +**Signature:** + +```sh +platform.getRewardUTXOs({ + txID: string, + encoding: string // optional +}) -> { + numFetched: integer, + utxos: []string, + encoding: string +} +``` + +- `txID` is the ID of the staking or delegating transaction +- `numFetched` is the number of returned UTXOs +- `utxos` is an array of encoded reward UTXOs +- `encoding` specifies the format for the returned UTXOs. Can only be `hex` when a value is + provided. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getRewardUTXOs", + "params": { + "txID": "2nmH8LithVbdjaXsxVQCQfXtzN9hBbmebrsaEYnLM9T32Uy2Y5" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "2", + "utxos": [ + "0x0000a195046108a85e60f7a864bb567745a37f50c6af282103e47cc62f036cee404700000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216c1f01765", + "0x0000ae8b1b94444eed8de9a81b1222f00f1b4133330add23d8ac288bffa98b85271100000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216473d042a" + ], + "encoding": "hex" + }, + "id": 1 +} +``` + +### `platform.getStake` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Get the amount of nAVAX staked by a set of addresses. The amount returned does not include staking +rewards. + +**Signature:** + +```sh +platform.getStake({ + addresses: []string, + validatorsOnly: true or false +}) -> +{ + stakeds: string -> int, + stakedOutputs: []string, + encoding: string +} +``` + +- `addresses` are the addresses to get information about. +- `validatorsOnly` can be either `true` or `false`. If `true`, will skip checking delegators for stake. +- `stakeds` is a map from assetID to the amount staked by addresses provided. +- `stakedOutputs` are the string representation of staked outputs. +- `encoding` specifies the format for the returned outputs. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getStake", + "params": { + "addresses": [ + "P-avax1pmgmagjcljjzuz2ve339dx82khm7q8getlegte" + ], + "validatorsOnly": true + }, + "id": 1 +} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "staked": "6500000000000", + "stakeds": { + "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z": "6500000000000" + }, + "stakedOutputs": [ + "0x000021e67317cbc4be2aeb00677ad6462778a8f52274b9d605df2591b23027a87dff00000007000005e96630e800000000000000000000000001000000011f1c933f38da6ba0ba46f8c1b0a7040a9a991a80dd338ed1" + ], + "encoding": "hex" + }, + "id": 1 +} +``` + +### `platform.getStakingAssetID` + +Retrieve an assetID for a Subnet’s staking asset. + +**Signature:** + +```sh +platform.getStakingAssetID({ + subnetID: string // optional +}) -> { + assetID: string +} +``` + +- `subnetID` is the Subnet whose assetID is requested. +- `assetID` is the assetID for a Subnet’s staking asset. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getStakingAssetID", + "params": { + "subnetID": "11111111111111111111111111111111LpoYY" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "assetID": "2fombhL7aGPwj3KH4bfrmJwW6PVnMobf9Y2fn9GwxiAAJyFDbe" + }, + "id": 1 +} +``` + +:::note + +The AssetID for AVAX differs depending on the network you are on. + +Mainnet: FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z + +Testnet: U8iRqJoiJm8xZHAacmvYyZVwqQx6uDNtQeP3CQ6fcgQk3JqnK + +::: + +### `platform.getSubnets` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +Get info about the Subnets. + +**Signature:** + +```sh +platform.getSubnets({ + ids: []string +}) -> +{ + subnets: []{ + id: string, + controlKeys: []string, + threshold: string + } +} +``` + +- `ids` are the IDs of the Subnets to get information about. If omitted, gets information about all + Subnets. +- `id` is the Subnet’s ID. +- `threshold` signatures from addresses in `controlKeys` are needed to add a validator to the + Subnet. If the Subnet is a PoS Subnet, then `threshold` will be `0` and `controlKeys` will be + empty. + +See [here](/nodes/validate/add-a-validator.md) for information on adding a validator to a +Subnet. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getSubnets", + "params": {"ids":["hW8Ma7dLMA7o4xmJf3AXBbo17bXzE7xnThUd3ypM4VAWo1sNJ"]}, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "subnets": [ + { + "id": "hW8Ma7dLMA7o4xmJf3AXBbo17bXzE7xnThUd3ypM4VAWo1sNJ", + "controlKeys": [ + "KNjXsaA1sZsaKCD1cd85YXauDuxshTes2", + "Aiz4eEt5xv9t4NCnAWaQJFNz5ABqLtJkR" + ], + "threshold": "2" + } + ] + }, + "id": 1 +} +``` + +### `platform.getTimestamp` + +Get the current P-Chain timestamp. + +**Signature:** + +```sh +platform.getTimestamp() -> {time: string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getTimestamp", + "params": {}, + "id": 1 +} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "timestamp": "2021-09-07T00:00:00-04:00" + }, + "id": 1 +} +``` + +### `platform.getTotalStake` + +Get the total amount of tokens staked on the requested Subnet. + +**Signature:** + +```sh +platform.getTotalStake({ + subnetID: string +}) -> { + stake: int + weight: int +} +``` + +#### Primary Network Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getTotalStake", + "params": { + "subnetID": "11111111111111111111111111111111LpoYY" + }, + "id": 1 +} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "stake": "279825917679866811", + "weight": "279825917679866811" + }, + "id": 1 +} +``` + +#### Subnet Example + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getTotalStake", + "params": { + "subnetID": "2bRCr6B4MiEfSjidDwxDpdCyviwnfUVqB2HGwhm947w9YYqb7r", + }, + "id": 1 +} +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "weight": "100000" + }, + "id": 1 +} +``` + +### `platform.getTx` + +Gets a transaction by its ID. + +Optional `encoding` parameter to specify the format for the returned transaction. Can be either +`hex` or `json`. Defaults to `hex`. + +**Signature:** + +```sh +platform.getTx({ + txID: string, + encoding: string // optional +}) -> { + tx: string, + encoding: string, +} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getTx", + "params": { + "txID":"28KVjSw5h3XKGuNpJXWY74EdnGq4TUWvCgEtJPymgQTvudiugb", + "encoding": "json" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "tx": { + "unsignedTx": { + "networkID": 1, + "blockchainID": "11111111111111111111111111111111LpoYY", + "outputs": [], + "inputs": [ + { + "txID": "NXNJHKeaJyjjWVSq341t6LGQP5UNz796o1crpHPByv1TKp9ZP", + "outputIndex": 0, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 20824279595, + "signatureIndices": [0] + } + }, + { + "txID": "2ahK5SzD8iqi5KBqpKfxrnWtrEoVwQCqJsMoB9kvChCaHgAQC9", + "outputIndex": 1, + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "input": { + "amount": 28119890783, + "signatureIndices": [0] + } + } + ], + "memo": "0x", + "validator": { + "nodeID": "NodeID-VT3YhgFaWEzy4Ap937qMeNEDscCammzG", + "start": 1682945406, + "end": 1684155006, + "weight": 48944170378 + }, + "stake": [ + { + "assetID": "FvwEAhmxKfeiG8SnEvq42hc6whRyY3EFYAvebMqDNDGCgxN5Z", + "fxID": "spdxUxVJQbX85MGxMHbKw1sHxMnSqJ3QBzDyDYEP3h6TLuxqQ", + "output": { + "addresses": ["P-avax1tnuesf6cqwnjw7fxjyk7lhch0vhf0v95wj5jvy"], + "amount": 48944170378, + "locktime": 0, + "threshold": 1 + } + } + ], + "rewardsOwner": { + "addresses": ["P-avax19zfygxaf59stehzedhxjesads0p5jdvfeedal0"], + "locktime": 0, + "threshold": 1 + } + }, + "credentials": [ + { + "signatures": [ + "0x6954e90b98437646fde0c1d54c12190fc23ae5e319c4d95dda56b53b4a23e43825251289cdc3728f1f1e0d48eac20e5c8f097baa9b49ea8a3cb6a41bb272d16601" + ] + }, + { + "signatures": [ + "0x6954e90b98437646fde0c1d54c12190fc23ae5e319c4d95dda56b53b4a23e43825251289cdc3728f1f1e0d48eac20e5c8f097baa9b49ea8a3cb6a41bb272d16601" + ] + } + ], + "id": "28KVjSw5h3XKGuNpJXWY74EdnGq4TUWvCgEtJPymgQTvudiugb" + }, + "encoding": "json" + }, + "id": 1 +} +``` + +### `platform.getTxStatus` + +Gets a transaction’s status by its ID. If the transaction was dropped, response will include a +`reason` field with more information why the transaction was dropped. + +**Signature:** + +```sh +platform.getTxStatus({ + txID: string +}) -> {status: string} +``` + +`status` is one of: + +- `Committed`: The transaction is (or will be) accepted by every node +- `Processing`: The transaction is being voted on by this node +- `Dropped`: The transaction will never be accepted by any node in the network, check `reason` field + for more information +- `Unknown`: The transaction hasn’t been seen by this node + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getTxStatus", + "params": { + "txID":"TAG9Ns1sa723mZy1GSoGqWipK6Mvpaj7CAswVJGM6MkVJDF9Q" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "status": "Committed" + }, + "id": 1 +} +``` + +### `platform.getUTXOs` + +Gets the UTXOs that reference a given set of addresses. + +**Signature:** + +```sh +platform.getUTXOs( + { + addresses: []string, + limit: int, // optional + startIndex: { // optional + address: string, + utxo: string + }, + sourceChain: string, // optional + encoding: string, // optional + }, +) -> +{ + numFetched: int, + utxos: []string, + endIndex: { + address: string, + utxo: string + }, + encoding: string, +} +``` + +- `utxos` is a list of UTXOs such that each UTXO references at least one address in `addresses`. +- At most `limit` UTXOs are returned. If `limit` is omitted or greater than 1024, it is set to 1024. +- This method supports pagination. `endIndex` denotes the last UTXO returned. To get the next set of + UTXOs, use the value of `endIndex` as `startIndex` in the next call. +- If `startIndex` is omitted, will fetch all UTXOs up to `limit`. +- When using pagination (that is when `startIndex` is provided), UTXOs are not guaranteed to be unique + across multiple calls. That is, a UTXO may appear in the result of the first call, and then again + in the second call. +- When using pagination, consistency is not guaranteed across multiple calls. That is, the UTXO set + of the addresses may have changed between calls. +- `encoding` specifies the format for the returned UTXOs. Can only be `hex` when a value is + provided. + +#### **Example** + +Suppose we want all UTXOs that reference at least one of +`P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5` and `P-avax1d09qn852zcy03sfc9hay2llmn9hsgnw4tp3dv6`. + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"platform.getUTXOs", + "params" :{ + "addresses":["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", "P-avax1d09qn852zcy03sfc9hay2llmn9hsgnw4tp3dv6"], + "limit":5, + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +This gives response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "5", + "utxos": [ + "0x0000a195046108a85e60f7a864bb567745a37f50c6af282103e47cc62f036cee404700000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216c1f01765", + "0x0000ae8b1b94444eed8de9a81b1222f00f1b4133330add23d8ac288bffa98b85271100000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216473d042a", + "0x0000731ce04b1feefa9f4291d869adc30a33463f315491e164d89be7d6d2d7890cfc00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f21600dd3047", + "0x0000b462030cc4734f24c0bc224cf0d16ee452ea6b67615517caffead123ab4fbf1500000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216c71b387e", + "0x000054f6826c39bc957c0c6d44b70f961a994898999179cc32d21eb09c1908d7167b00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f2166290e79d" + ], + "endIndex": { + "address": "P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "kbUThAUfmBXUmRgTpgD6r3nLj7rJUGho6xyht5nouNNypH45j" + }, + "encoding": "hex" + }, + "id": 1 +} +``` + +Since `numFetched` is the same as `limit`, we can tell that there may be more UTXOs that were not +fetched. We call the method again, this time with `startIndex`: + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"platform.getUTXOs", + "params" :{ + "addresses":["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "limit":5, + "startIndex": { + "address": "P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "0x62fc816bb209857923770c286192ab1f9e3f11e4a7d4ba0943111c3bbfeb9e4a5ea72fae" + }, + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +This gives response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "4", + "utxos": [ + "0x000020e182dd51ee4dcd31909fddd75bb3438d9431f8e4efce86a88a684f5c7fa09300000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f21662861d59", + "0x0000a71ba36c475c18eb65dc90f6e85c4fd4a462d51c5de3ac2cbddf47db4d99284e00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f21665f6f83f", + "0x0000925424f61cb13e0fbdecc66e1270de68de9667b85baa3fdc84741d048daa69fa00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216afecf76a", + "0x000082f30327514f819da6009fad92b5dba24d27db01e29ad7541aa8e6b6b554615c00000000345aa98e8a990f4101e2268fab4c4e1f731c8dfbcffa3a77978686e6390d624f000000070000000000000001000000000000000000000001000000018ba98dabaebcd83056799841cfbc567d8b10f216779c2d59" + ], + "endIndex": { + "address": "P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "21jG2RfqyHUUgkTLe2tUp6ETGLriSDTW3th8JXFbPRNiSZ11jK" + }, + "encoding": "hex" + }, + "id": 1 +} +``` + +Since `numFetched` is less than `limit`, we know that we are done fetching UTXOs and don’t need to +call this method again. + +Suppose we want to fetch the UTXOs exported from the X Chain to the P Chain in order to build an +ImportTx. Then we need to call GetUTXOs with the `sourceChain` argument in order to retrieve the +atomic UTXOs: + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"platform.getUTXOs", + "params" :{ + "addresses":["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"], + "sourceChain": "X", + "encoding": "hex" + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +This gives response: + +```json +{ + "jsonrpc": "2.0", + "result": { + "numFetched": "1", + "utxos": [ + "0x00001f989ffaf18a18a59bdfbf209342aa61c6a62a67e8639d02bb3c8ddab315c6fa0000000139c33a499ce4c33a3b09cdd2cfa01ae70dbf2d18b2d7d168524440e55d55008800000007000000746a528800000000000000000000000001000000013cb7d3842e8cee6a0ebd09f1fe884f6861e1b29cd704fe76" + ], + "endIndex": { + "address": "P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5", + "utxo": "S5UKgWoVpoGFyxfisebmmRf8WqC7ZwcmYwS7XaDVZqoaFcCwK" + }, + "encoding": "hex" + }, + "id": 1 +} +``` + +### `platform.getValidatorsAt` + +Get the validators and their weights of a Subnet or the Primary Network at a given P-Chain height. + +**Signature:** + +```sh +platform.getValidatorsAt( + { + height: int, + subnetID: string, // optional + } +) +``` + +- `height` is the P-Chain height to get the validator set at. +- `subnetID` is the Subnet ID to get the validator set of. If not given, gets validator set of the + Primary Network. + +**Example Call:** + +```bash +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.getValidatorsAt", + "params": { + "height":1 + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "validators": { + "NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg": 2000000000000000, + "NodeID-GWPcbFJZFfZreETSoWjPimr846mXEKCtu": 2000000000000000, + "NodeID-MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ": 2000000000000000, + "NodeID-NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN": 2000000000000000, + "NodeID-P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5": 2000000000000000 + } + }, + "id": 1 +} +``` + +### `platform.issueTx` + +Issue a transaction to the Platform Chain. + +**Signature:** + +```sh +platform.issueTx({ + tx: string, + encoding: string, // optional +}) -> {txID: string} +``` + +- `tx` is the byte representation of a transaction. +- `encoding` specifies the encoding format for the transaction bytes. Can only be `hex` when a value + is provided. +- `txID` is the transaction’s ID. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.issueTx", + "params": { + "tx":"0x00000009de31b4d8b22991d51aa6aa1fc733f23a851a8c9400000000000186a0000000005f041280000000005f9ca900000030390000000000000001fceda8f90fcb5d30614b99d79fc4baa29307762668f16eb0259a57c2d3b78c875c86ec2045792d4df2d926c40f829196e0bb97ee697af71f5b0a966dabff749634c8b729855e937715b0e44303fd1014daedc752006011b730", + "encoding": "hex" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "txID": "G3BuH6ytQ2averrLxJJugjWZHTRubzCrUZEXoheG5JMqL5ccY" + }, + "id": 1 +} +``` + +### `platform.listAddresses` + +:::caution + +Deprecated as of [**v1.9.12**](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12). + +::: + +:::warning + +Not recommended for use on Mainnet. See warning notice in [Keystore API](/reference/avalanchego/keystore-api.md). + +::: + +List addresses controlled by the given user. + +**Signature:** + +```sh +platform.listAddresses({ + username: string, + password: string +}) -> {addresses: []string} +``` + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.listAddresses", + "params": { + "username":"myUsername", + "password":"myPassword" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "addresses": ["P-avax18jma8ppw3nhx5r4ap8clazz0dps7rv5ukulre5"] + }, + "id": 1 +} +``` + +### `platform.sampleValidators` + +Sample validators from the specified Subnet. + +**Signature:** + +```sh +platform.sampleValidators( + { + size: int, + subnetID: string, // optional + } +) -> +{ + validators: []string +} +``` + +- `size` is the number of validators to sample. +- `subnetID` is the Subnet to sampled from. If omitted, defaults to the Primary Network. +- Each element of `validators` is the ID of a validator. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc":"2.0", + "id" :1, + "method" :"platform.sampleValidators", + "params" :{ + "size":2 + } +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "validators": [ + "NodeID-MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", + "NodeID-NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN" + ] + } +} +``` + +### `platform.validatedBy` + +Get the Subnet that validates a given blockchain. + +**Signature:** + +```sh +platform.validatedBy( + { + blockchainID: string + } +) -> {subnetID: string} +``` + +- `blockchainID` is the blockchain’s ID. +- `subnetID` is the ID of the Subnet that validates the blockchain. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.validatedBy", + "params": { + "blockchainID": "KDYHHKjM4yTJTT8H8qPs5KXzE6gQH5TZrmP1qVr1P6qECj3XN" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "subnetID": "2bRCr6B4MiEfSjidDwxDpdCyviwnfUVqB2HGwhm947w9YYqb7r" + }, + "id": 1 +} +``` + +### `platform.validates` + +Get the IDs of the blockchains a Subnet validates. + +**Signature:** + +```sh +platform.validates( + { + subnetID: string + } +) -> {blockchainIDs: []string} +``` + +- `subnetID` is the Subnet’s ID. +- Each element of `blockchainIDs` is the ID of a blockchain the Subnet validates. + +**Example Call:** + +```sh +curl -X POST --data '{ + "jsonrpc": "2.0", + "method": "platform.validates", + "params": { + "subnetID":"2bRCr6B4MiEfSjidDwxDpdCyviwnfUVqB2HGwhm947w9YYqb7r" + }, + "id": 1 +}' -H 'content-type:application/json;' 127.0.0.1:9650/ext/bc/P +``` + +**Example Response:** + +```json +{ + "jsonrpc": "2.0", + "result": { + "blockchainIDs": [ + "KDYHHKjM4yTJTT8H8qPs5KXzE6gQH5TZrmP1qVr1P6qECj3XN", + "2TtHFqEAAJ6b33dromYMqfgavGPF3iCpdG3hwNMiart2aB5QHi" + ] + }, + "id": 1 +} +``` diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 25ce69bce037..69e94d0dee3e 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -34,18 +34,22 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" - "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" avajson "github.com/ava-labs/avalanchego/utils/json" vmkeystore "github.com/ava-labs/avalanchego/vms/components/keystore" pchainapi "github.com/ava-labs/avalanchego/vms/platformvm/api" + blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) var ( @@ -72,29 +76,28 @@ var ( } ) -func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { - vm, _, mutableSharedMemory := defaultVM(t, latestFork) - vm.ctx.Lock.Lock() - defer vm.ctx.Lock.Unlock() - ks := keystore.New(logging.NoLog{}, memdb.New()) - require.NoError(t, ks.CreateUser(testUsername, testPassword)) +func defaultService(t *testing.T) (*Service, *mutableSharedMemory, *txstest.WalletFactory) { + vm, factory, _, mutableSharedMemory := defaultVM(t, latestFork) - vm.ctx.Keystore = ks.NewBlockchainKeyStore(vm.ctx.ChainID) return &Service{ vm: vm, addrManager: avax.NewAddressManager(vm.ctx), stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ Size: stakerAttributesCacheSize, }, - }, mutableSharedMemory + }, mutableSharedMemory, factory } -// Give user [testUsername] control of [testPrivateKey] and keys[0] (which is funded) -func defaultAddress(t *testing.T, service *Service) { +func TestExportKey(t *testing.T) { require := require.New(t) + service, _, _ := defaultService(t) service.vm.ctx.Lock.Lock() - defer service.vm.ctx.Lock.Unlock() + + ks := keystore.New(logging.NoLog{}, memdb.New()) + require.NoError(ks.CreateUser(testUsername, testPassword)) + service.vm.ctx.Keystore = ks.NewBlockchainKeyStore(service.vm.ctx.ChainID) + user, err := vmkeystore.NewUserFromKeystore(service.vm.ctx.Keystore, testUsername, testPassword) require.NoError(err) @@ -102,62 +105,23 @@ func defaultAddress(t *testing.T, service *Service) { require.NoError(err) require.NoError(user.PutKeys(pk, keys[0])) -} - -func TestAddValidator(t *testing.T) { - require := require.New(t) - - expectedJSONString := `{"username":"","password":"","from":null,"changeAddr":"","txID":"11111111111111111111111111111111LpoYY","startTime":"0","endTime":"0","weight":"0","nodeID":"NodeID-111111111111111111116DBWJs","rewardAddress":"","delegationFeeRate":"0.0000"}` - args := AddValidatorArgs{} - bytes, err := json.Marshal(&args) - require.NoError(err) - require.Equal(expectedJSONString, string(bytes)) -} -func TestCreateBlockchainArgsParsing(t *testing.T) { - require := require.New(t) - - jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "username":"bob loblaw", "password":"yeet", "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` - args := CreateBlockchainArgs{} - require.NoError(json.Unmarshal([]byte(jsonString), &args)) - - _, err := json.Marshal(args.GenesisData) - require.NoError(err) -} + service.vm.ctx.Lock.Unlock() -func TestExportKey(t *testing.T) { - require := require.New(t) - jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","address":"` + testAddress + `"}` + jsonString := `{"username":"` + testUsername + `","password":"` + testPassword + `","address":"` + testAddress + `"}` args := ExportKeyArgs{} require.NoError(json.Unmarshal([]byte(jsonString), &args)) - service, _ := defaultService(t) - defaultAddress(t, service) - reply := ExportKeyReply{} require.NoError(service.ExportKey(nil, &args, &reply)) require.Equal(testPrivateKey, reply.PrivateKey.Bytes()) } -func TestImportKey(t *testing.T) { - require := require.New(t) - jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","privateKey":"PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"}` - args := ImportKeyArgs{} - require.NoError(json.Unmarshal([]byte(jsonString), &args)) - - service, _ := defaultService(t) - - reply := api.JSONAddress{} - require.NoError(service.ImportKey(nil, &args, &reply)) - require.Equal(testAddress, reply.Address) -} - // Test issuing a tx and accepted func TestGetTxStatus(t *testing.T) { require := require.New(t) - service, mutableSharedMemory := defaultService(t) - defaultAddress(t, service) + service, mutableSharedMemory, factory := defaultService(t) service.vm.ctx.Lock.Lock() recipientKey, err := secp256k1.NewPrivateKey() @@ -168,11 +132,12 @@ func TestGetTxStatus(t *testing.T) { sm := m.NewSharedMemory(service.vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(service.vm.ctx.XChainID) - // #nosec G404 + randSrc := rand.NewSource(0) + utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), + OutputIndex: uint32(randSrc.Int63()), }, Asset: avax.Asset{ID: service.vm.ctx.AVAXAssetID}, Out: &secp256k1fx.TransferOutput{ @@ -204,14 +169,17 @@ func TestGetTxStatus(t *testing.T) { mutableSharedMemory.SharedMemory = sm - tx, err := service.vm.txBuilder.NewImportTx( + builder, signer := factory.NewWallet(recipientKey) + utx, err := builder.NewImportTx( service.vm.ctx.XChainID, - ids.ShortEmpty, - []*secp256k1.PrivateKey{recipientKey}, - ids.ShortEmpty, - nil, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) service.vm.ctx.Lock.Unlock() @@ -224,7 +192,7 @@ func TestGetTxStatus(t *testing.T) { require.Zero(resp.Reason) // put the chain in existing chain list - require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) + require.NoError(service.vm.Network.IssueTxFromRPC(tx)) service.vm.ctx.Lock.Lock() block, err := service.vm.BuildBlock(context.Background()) @@ -247,56 +215,89 @@ func TestGetTxStatus(t *testing.T) { func TestGetTx(t *testing.T) { type test struct { description string - createTx func(service *Service) (*txs.Tx, error) + createTx func(service *Service, factory *txstest.WalletFactory) (*txs.Tx, error) } tests := []test{ { "standard block", - func(service *Service) (*txs.Tx, error) { - return service.vm.txBuilder.NewCreateChainTx( // Test GetTx works for standard blocks + func(_ *Service, factory *txstest.WalletFactory) (*txs.Tx, error) { + builder, signer := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), []byte{}, constants.AVMID, []ids.ID{}, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - keys[0].PublicKey().Address(), // change addr - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) + require.NoError(t, err) + return walletsigner.SignUnsigned(context.Background(), signer, utx) }, }, { "proposal block", - func(service *Service) (*txs.Tx, error) { + func(service *Service, factory *txstest.WalletFactory) (*txs.Tx, error) { sk, err := bls.NewSecretKey() require.NoError(t, err) - return service.vm.txBuilder.NewAddPermissionlessValidatorTx( // Test GetTx works for proposal blocks - service.vm.MinValidatorStake, - uint64(service.vm.clock.Time().Add(txexecutor.SyncBound).Unix()), - uint64(service.vm.clock.Time().Add(txexecutor.SyncBound).Add(defaultMinStakingDuration).Unix()), - ids.GenerateTestNodeID(), + rewardsOwner := &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + } + + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(service.vm.clock.Time().Add(txexecutor.SyncBound).Unix()), + End: uint64(service.vm.clock.Time().Add(txexecutor.SyncBound).Add(defaultMinStakingDuration).Unix()), + Wght: service.vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - ids.GenerateTestShortID(), + service.vm.ctx.AVAXAssetID, + rewardsOwner, + rewardsOwner, 0, - []*secp256k1.PrivateKey{keys[0]}, - keys[0].PublicKey().Address(), // change addr - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) + require.NoError(t, err) + return walletsigner.SignUnsigned(context.Background(), txSigner, utx) }, }, { "atomic block", - func(service *Service) (*txs.Tx, error) { - return service.vm.txBuilder.NewExportTx( // Test GetTx works for proposal blocks - 100, + func(service *Service, factory *txstest.WalletFactory) (*txs.Tx, error) { + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewExportTx( service.vm.ctx.XChainID, - ids.GenerateTestShortID(), - []*secp256k1.PrivateKey{keys[0]}, - keys[0].PublicKey().Address(), // change addr - nil, + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: service.vm.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 100, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + }}, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) + require.NoError(t, err) + return walletsigner.SignUnsigned(context.Background(), signer, utx) }, }, } @@ -309,11 +310,10 @@ func TestGetTx(t *testing.T) { ) t.Run(testName, func(t *testing.T) { require := require.New(t) - service, _ := defaultService(t) - defaultAddress(t, service) + service, _, txBuilder := defaultService(t) service.vm.ctx.Lock.Lock() - tx, err := test.createTx(service) + tx, err := test.createTx(service, txBuilder) require.NoError(err) service.vm.ctx.Lock.Unlock() @@ -326,7 +326,7 @@ func TestGetTx(t *testing.T) { err = service.GetTx(nil, arg, &response) require.ErrorIs(err, database.ErrNotFound) // We haven't issued the tx yet - require.NoError(service.vm.Network.IssueTx(context.Background(), tx)) + require.NoError(service.vm.Network.IssueTxFromRPC(tx)) service.vm.ctx.Lock.Lock() blk, err := service.vm.BuildBlock(context.Background()) @@ -374,8 +374,12 @@ func TestGetTx(t *testing.T) { func TestGetBalance(t *testing.T) { require := require.New(t) - service, _ := defaultService(t) - defaultAddress(t, service) + service, _, _ := defaultService(t) + + var ( + feeCalc = fee.NewStaticCalculator(service.vm.Config.StaticFeeConfig, service.vm.Config.UpgradeConfig) + createSubnetFee = feeCalc.CalculateFee(&txs.CreateSubnetTx{}, service.vm.clock.Time()) + ) // Ensure GetStake is correct for each of the genesis validators genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -392,7 +396,7 @@ func TestGetBalance(t *testing.T) { if idx == 0 { // we use the first key to fund a subnet creation in [defaultGenesis]. // As such we need to account for the subnet creation fee - balance = defaultBalance - service.vm.Config.GetCreateSubnetTxFee(service.vm.clock.Time()) + balance = defaultBalance - createSubnetFee } require.Equal(avajson.Uint64(balance), reply.Balance) require.Equal(avajson.Uint64(balance), reply.Unlocked) @@ -403,8 +407,7 @@ func TestGetBalance(t *testing.T) { func TestGetStake(t *testing.T) { require := require.New(t) - service, _ := defaultService(t) - defaultAddress(t, service) + service, _, factory := defaultService(t) // Ensure GetStake is correct for each of the genesis validators genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -476,17 +479,26 @@ func TestGetStake(t *testing.T) { delegatorNodeID := genesisNodeIDs[0] delegatorStartTime := defaultValidateStartTime delegatorEndTime := defaultGenesisTime.Add(defaultMinStakingDuration) - tx, err := service.vm.txBuilder.NewAddDelegatorTx( - stakeAmount, - uint64(delegatorStartTime.Unix()), - uint64(delegatorEndTime.Unix()), - delegatorNodeID, - ids.GenerateTestShortID(), - []*secp256k1.PrivateKey{keys[0]}, - keys[0].PublicKey().Address(), // change addr - nil, + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: delegatorNodeID, + Start: uint64(delegatorStartTime.Unix()), + End: uint64(delegatorEndTime.Unix()), + Wght: stakeAmount, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addDelTx := tx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( @@ -531,18 +543,26 @@ func TestGetStake(t *testing.T) { stakeAmount = service.vm.MinValidatorStake + 54321 pendingStakerNodeID := ids.GenerateTestNodeID() pendingStakerEndTime := uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()) - tx, err = service.vm.txBuilder.NewAddValidatorTx( - stakeAmount, - uint64(defaultGenesisTime.Unix()), - pendingStakerEndTime, - pendingStakerNodeID, - ids.GenerateTestShortID(), + utx2, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: pendingStakerNodeID, + Start: uint64(defaultGenesisTime.Unix()), + End: pendingStakerEndTime, + Wght: stakeAmount, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, 0, - []*secp256k1.PrivateKey{keys[0]}, - keys[0].PublicKey().Address(), // change addr - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx2) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -576,8 +596,7 @@ func TestGetStake(t *testing.T) { func TestGetCurrentValidators(t *testing.T) { require := require.New(t) - service, _ := defaultService(t) - defaultAddress(t, service) + service, _, factory := defaultService(t) genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -611,17 +630,26 @@ func TestGetCurrentValidators(t *testing.T) { service.vm.ctx.Lock.Lock() - delTx, err := service.vm.txBuilder.NewAddDelegatorTx( - stakeAmount, - uint64(delegatorStartTime.Unix()), - uint64(delegatorEndTime.Unix()), - validatorNodeID, - ids.GenerateTestShortID(), - []*secp256k1.PrivateKey{keys[0]}, - keys[0].PublicKey().Address(), // change addr - nil, + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: validatorNodeID, + Start: uint64(delegatorStartTime.Unix()), + End: uint64(delegatorEndTime.Unix()), + Wght: stakeAmount, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addDelTx := delTx.Unsigned.(*txs.AddDelegatorTx) staker, err := state.NewCurrentStaker( @@ -678,7 +706,7 @@ func TestGetCurrentValidators(t *testing.T) { service.vm.ctx.Lock.Lock() // Reward the delegator - tx, err := builder.NewRewardValidatorTx(service.vm.ctx, delTx.ID()) + tx, err := blockbuilder.NewRewardValidatorTx(service.vm.ctx, delTx.ID()) require.NoError(err) service.vm.state.AddTx(tx, status.Committed) service.vm.state.DeleteCurrentDelegator(staker) @@ -703,7 +731,7 @@ func TestGetCurrentValidators(t *testing.T) { func TestGetTimestamp(t *testing.T) { require := require.New(t) - service, _ := defaultService(t) + service, _, _ := defaultService(t) reply := GetTimestampReply{} require.NoError(service.GetTimestamp(nil, nil, &reply)) @@ -739,23 +767,26 @@ func TestGetBlock(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { require := require.New(t) - service, _ := defaultService(t) + service, _, factory := defaultService(t) service.vm.ctx.Lock.Lock() - service.vm.Config.CreateAssetTxFee = 100 * defaultTxFee + service.vm.StaticFeeConfig.CreateAssetTxFee = 100 * defaultTxFee - // Make a block an accept it, then check we can get it. - tx, err := service.vm.txBuilder.NewCreateChainTx( // Test GetTx works for standard blocks + builder, signer := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), []byte{}, constants.AVMID, []ids.ID{}, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - keys[0].PublicKey().Address(), // change addr - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) preferredID := service.vm.manager.Preferred() preferred, err := service.vm.manager.GetBlock(preferredID) @@ -1025,3 +1056,56 @@ func TestServiceGetBlockByHeight(t *testing.T) { }) } } + +func TestServiceGetSubnets(t *testing.T) { + require := require.New(t) + service, _, _ := defaultService(t) + + testSubnet1ID := testSubnet1.ID() + + var response GetSubnetsResponse + require.NoError(service.GetSubnets(nil, &GetSubnetsArgs{}, &response)) + require.Equal([]APISubnet{ + { + ID: testSubnet1ID, + ControlKeys: []string{ + "P-testing1d6kkj0qh4wcmus3tk59npwt3rluc6en72ngurd", + "P-testing17fpqs358de5lgu7a5ftpw2t8axf0pm33983krk", + "P-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e", + }, + Threshold: 2, + }, + { + ID: constants.PrimaryNetworkID, + ControlKeys: []string{}, + Threshold: 0, + }, + }, response.Subnets) + + newOwnerIDStr := "P-testing1t73fa4p4dypa4s3kgufuvr6hmprjclw66mgqgm" + newOwnerID, err := service.addrManager.ParseLocalAddress(newOwnerIDStr) + require.NoError(err) + + service.vm.ctx.Lock.Lock() + service.vm.state.SetSubnetOwner(testSubnet1ID, &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{newOwnerID}, + Threshold: 1, + }) + service.vm.ctx.Lock.Unlock() + + require.NoError(service.GetSubnets(nil, &GetSubnetsArgs{}, &response)) + require.Equal([]APISubnet{ + { + ID: testSubnet1ID, + ControlKeys: []string{ + newOwnerIDStr, + }, + Threshold: 1, + }, + { + ID: constants.PrimaryNetworkID, + ControlKeys: []string{}, + Threshold: 0, + }, + }, response.Subnets) +} diff --git a/vms/platformvm/signer/proof_of_possession.go b/vms/platformvm/signer/proof_of_possession.go index 8b32975b4969..245d2a96c6f0 100644 --- a/vms/platformvm/signer/proof_of_possession.go +++ b/vms/platformvm/signer/proof_of_possession.go @@ -30,7 +30,7 @@ type ProofOfPossession struct { func NewProofOfPossession(sk *bls.SecretKey) *ProofOfPossession { pk := bls.PublicFromSecretKey(sk) - pkBytes := bls.PublicKeyToBytes(pk) + pkBytes := bls.PublicKeyToCompressedBytes(pk) sig := bls.SignProofOfPossession(sk, pkBytes) sigBytes := bls.SignatureToBytes(sig) @@ -43,7 +43,7 @@ func NewProofOfPossession(sk *bls.SecretKey) *ProofOfPossession { } func (p *ProofOfPossession) Verify() error { - publicKey, err := bls.PublicKeyFromBytes(p.PublicKey[:]) + publicKey, err := bls.PublicKeyFromCompressedBytes(p.PublicKey[:]) if err != nil { return err } @@ -94,7 +94,7 @@ func (p *ProofOfPossession) UnmarshalJSON(b []byte) error { if err != nil { return err } - pk, err := bls.PublicKeyFromBytes(pkBytes) + pk, err := bls.PublicKeyFromCompressedBytes(pkBytes) if err != nil { return err } diff --git a/vms/platformvm/state/chain_time_helpers.go b/vms/platformvm/state/chain_time_helpers.go new file mode 100644 index 000000000000..036eb168d73d --- /dev/null +++ b/vms/platformvm/state/chain_time_helpers.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/timer/mockable" +) + +func NextBlockTime(state Chain, clk *mockable.Clock) (time.Time, bool, error) { + var ( + timestamp = clk.Time() + parentTime = state.GetTimestamp() + ) + if parentTime.After(timestamp) { + timestamp = parentTime + } + // [timestamp] = max(now, parentTime) + + nextStakerChangeTime, err := GetNextStakerChangeTime(state) + if err != nil { + return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) + } + + // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] + timeWasCapped := !timestamp.Before(nextStakerChangeTime) + if timeWasCapped { + timestamp = nextStakerChangeTime + } + // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) + return timestamp, timeWasCapped, nil +} + +// GetNextStakerChangeTime returns the next time a staker will be either added +// or removed to/from the current validator set. +func GetNextStakerChangeTime(state Chain) (time.Time, error) { + currentStakerIterator, err := state.GetCurrentStakerIterator() + if err != nil { + return time.Time{}, err + } + defer currentStakerIterator.Release() + + pendingStakerIterator, err := state.GetPendingStakerIterator() + if err != nil { + return time.Time{}, err + } + defer pendingStakerIterator.Release() + + hasCurrentStaker := currentStakerIterator.Next() + hasPendingStaker := pendingStakerIterator.Next() + switch { + case hasCurrentStaker && hasPendingStaker: + nextCurrentTime := currentStakerIterator.Value().NextTime + nextPendingTime := pendingStakerIterator.Value().NextTime + if nextCurrentTime.Before(nextPendingTime) { + return nextCurrentTime, nil + } + return nextPendingTime, nil + case hasCurrentStaker: + return currentStakerIterator.Value().NextTime, nil + case hasPendingStaker: + return pendingStakerIterator.Value().NextTime, nil + default: + return time.Time{}, database.ErrNotFound + } +} diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 907c3c56ef7d..91fb01d08fc9 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -43,7 +43,7 @@ type diff struct { modifiedDelegateeRewards map[ids.ID]map[ids.NodeID]uint64 pendingStakerDiffs diffStakers - addedSubnets []*txs.Tx + addedSubnetIDs []ids.ID // Subnet ID --> Owner of the subnet subnetOwners map[ids.ID]fx.Owner // Subnet ID --> Tx that transforms the subnet @@ -272,8 +272,8 @@ func (d *diff) GetPendingStakerIterator() (StakerIterator, error) { return d.pendingStakerDiffs.GetStakerIterator(parentIterator), nil } -func (d *diff) AddSubnet(createSubnetTx *txs.Tx) { - d.addedSubnets = append(d.addedSubnets, createSubnetTx) +func (d *diff) AddSubnet(subnetID ids.ID) { + d.addedSubnetIDs = append(d.addedSubnetIDs, subnetID) } func (d *diff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { @@ -451,8 +451,8 @@ func (d *diff) Apply(baseState Chain) error { } } } - for _, subnet := range d.addedSubnets { - baseState.AddSubnet(subnet) + for _, subnetID := range d.addedSubnetIDs { + baseState.AddSubnet(subnetID) } for _, tx := range d.transformedSubnets { baseState.AddSubnetTransformation(tx) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 87fd59714029..9b8fe1e6486b 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -257,14 +257,14 @@ func TestDiffSubnet(t *testing.T) { Owner: fx.NewMockOwner(ctrl), }, } - state.AddSubnet(parentStateCreateSubnetTx) + state.AddSubnet(parentStateCreateSubnetTx.ID()) // Verify parent returns one subnet - subnets, err := state.GetSubnets() + subnetIDs, err := state.GetSubnetIDs() require.NoError(err) - require.Equal([]*txs.Tx{ - parentStateCreateSubnetTx, - }, subnets) + require.Equal([]ids.ID{ + parentStateCreateSubnetTx.ID(), + }, subnetIDs) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() @@ -279,18 +279,18 @@ func TestDiffSubnet(t *testing.T) { Owner: fx.NewMockOwner(ctrl), }, } - diff.AddSubnet(createSubnetTx) + diff.AddSubnet(createSubnetTx.ID()) // Apply diff to parent state require.NoError(diff.Apply(state)) // Verify parent now returns two subnets - subnets, err = state.GetSubnets() + subnetIDs, err = state.GetSubnetIDs() require.NoError(err) - require.Equal([]*txs.Tx{ - parentStateCreateSubnetTx, - createSubnetTx, - }, subnets) + require.Equal([]ids.ID{ + parentStateCreateSubnetTx.ID(), + createSubnetTx.ID(), + }, subnetIDs) } func TestDiffChain(t *testing.T) { @@ -547,7 +547,7 @@ func TestDiffSubnetOwner(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(owner) - state.AddSubnet(createSubnetTx) + state.AddSubnet(subnetID) state.SetSubnetOwner(subnetID, owner1) owner, err = state.GetSubnetOwner(subnetID) @@ -610,7 +610,7 @@ func TestDiffStacking(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(owner) - state.AddSubnet(createSubnetTx) + state.AddSubnet(subnetID) state.SetSubnetOwner(subnetID, owner1) owner, err = state.GetSubnetOwner(subnetID) diff --git a/vms/platformvm/state/metadata_codec.go b/vms/platformvm/state/metadata_codec.go index 65832ed77460..f2f5478a89d3 100644 --- a/vms/platformvm/state/metadata_codec.go +++ b/vms/platformvm/state/metadata_codec.go @@ -5,7 +5,6 @@ package state import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -23,8 +22,8 @@ const ( var MetadataCodec codec.Manager func init() { - c0 := linearcodec.New(time.Time{}, []string{CodecVersion0Tag}, math.MaxInt32) - c1 := linearcodec.New(time.Time{}, []string{CodecVersion0Tag, CodecVersion1Tag}, math.MaxInt32) + c0 := linearcodec.New([]string{CodecVersion0Tag}) + c1 := linearcodec.New([]string{CodecVersion0Tag, CodecVersion1Tag}) MetadataCodec = codec.NewManager(math.MaxInt32) err := utils.Err( diff --git a/vms/platformvm/state/metadata_validator.go b/vms/platformvm/state/metadata_validator.go index 0c725368505b..340c7205350a 100644 --- a/vms/platformvm/state/metadata_validator.go +++ b/vms/platformvm/state/metadata_validator.go @@ -6,6 +6,7 @@ package state import ( "time" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" @@ -17,7 +18,7 @@ import ( // [preDelegateeRewardMetadata]. // // CodecVersionLen + UpDurationLen + LastUpdatedLen + PotentialRewardLen -const preDelegateeRewardSize = wrappers.ShortLen + 3*wrappers.LongLen +const preDelegateeRewardSize = codec.VersionSize + 3*wrappers.LongLen var _ validatorState = (*metadata)(nil) diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index cfb9dd16944a..c1321567e6a9 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -75,7 +75,7 @@ func (mr *MockChainMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { } // AddSubnet mocks base method. -func (m *MockChain) AddSubnet(arg0 *txs.Tx) { +func (m *MockChain) AddSubnet(arg0 ids.ID) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddSubnet", arg0) } @@ -523,7 +523,7 @@ func (mr *MockDiffMockRecorder) AddRewardUTXO(arg0, arg1 any) *gomock.Call { } // AddSubnet mocks base method. -func (m *MockDiff) AddSubnet(arg0 *txs.Tx) { +func (m *MockDiff) AddSubnet(arg0 ids.ID) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddSubnet", arg0) } @@ -1009,7 +1009,7 @@ func (mr *MockStateMockRecorder) AddStatelessBlock(arg0 any) *gomock.Call { } // AddSubnet mocks base method. -func (m *MockState) AddSubnet(arg0 *txs.Tx) { +func (m *MockState) AddSubnet(arg0 ids.ID) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddSubnet", arg0) } @@ -1410,6 +1410,21 @@ func (mr *MockStateMockRecorder) GetStatelessBlock(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockState)(nil).GetStatelessBlock), arg0) } +// GetSubnetIDs mocks base method. +func (m *MockState) GetSubnetIDs() ([]ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetIDs") + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetIDs indicates an expected call of GetSubnetIDs. +func (mr *MockStateMockRecorder) GetSubnetIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetIDs", reflect.TypeOf((*MockState)(nil).GetSubnetIDs)) +} + // GetSubnetOwner mocks base method. func (m *MockState) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -1440,21 +1455,6 @@ func (mr *MockStateMockRecorder) GetSubnetTransformation(arg0 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockState)(nil).GetSubnetTransformation), arg0) } -// GetSubnets mocks base method. -func (m *MockState) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockStateMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockState)(nil).GetSubnets)) -} - // GetTimestamp mocks base method. func (m *MockState) GetTimestamp() time.Time { m.ctrl.T.Helper() @@ -1516,20 +1516,6 @@ func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) } -// PruneAndIndex mocks base method. -func (m *MockState) PruneAndIndex(arg0 sync.Locker, arg1 logging.Logger) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PruneAndIndex", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// PruneAndIndex indicates an expected call of PruneAndIndex. -func (mr *MockStateMockRecorder) PruneAndIndex(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PruneAndIndex", reflect.TypeOf((*MockState)(nil).PruneAndIndex), arg0, arg1) -} - // PutCurrentDelegator mocks base method. func (m *MockState) PutCurrentDelegator(arg0 *Staker) { m.ctrl.T.Helper() @@ -1578,6 +1564,20 @@ func (mr *MockStateMockRecorder) PutPendingValidator(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPendingValidator", reflect.TypeOf((*MockState)(nil).PutPendingValidator), arg0) } +// ReindexBlocks mocks base method. +func (m *MockState) ReindexBlocks(arg0 sync.Locker, arg1 logging.Logger) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReindexBlocks", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReindexBlocks indicates an expected call of ReindexBlocks. +func (mr *MockStateMockRecorder) ReindexBlocks(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReindexBlocks", reflect.TypeOf((*MockState)(nil).ReindexBlocks), arg0, arg1) +} + // SetCurrentSupply mocks base method. func (m *MockState) SetCurrentSupply(arg0 ids.ID, arg1 uint64) { m.ctrl.T.Helper() @@ -1666,21 +1666,6 @@ func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 any) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) } -// ShouldPrune mocks base method. -func (m *MockState) ShouldPrune() (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ShouldPrune") - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ShouldPrune indicates an expected call of ShouldPrune. -func (mr *MockStateMockRecorder) ShouldPrune() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldPrune", reflect.TypeOf((*MockState)(nil).ShouldPrune)) -} - // UTXOIDs mocks base method. func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error) { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index abba557d06b0..2c090422e06d 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -47,10 +47,10 @@ import ( ) const ( - pruneCommitLimit = 1024 - pruneCommitSleepMultiplier = 5 - pruneCommitSleepCap = 10 * time.Second - pruneUpdateFrequency = 30 * time.Second + indexIterationLimit = 4096 + indexIterationSleepMultiplier = 5 + indexIterationSleepCap = 10 * time.Second + indexLogFrequency = 30 * time.Second ) var ( @@ -59,35 +59,33 @@ var ( errValidatorSetAlreadyPopulated = errors.New("validator set already populated") errIsNotSubnet = errors.New("is not a subnet") - BlockIDPrefix = []byte("blockID") - BlockPrefix = []byte("block") - ValidatorsPrefix = []byte("validators") - CurrentPrefix = []byte("current") - PendingPrefix = []byte("pending") - ValidatorPrefix = []byte("validator") - DelegatorPrefix = []byte("delegator") - SubnetValidatorPrefix = []byte("subnetValidator") - SubnetDelegatorPrefix = []byte("subnetDelegator") - NestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") - NestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") - FlatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") - FlatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") - TxPrefix = []byte("tx") - RewardUTXOsPrefix = []byte("rewardUTXOs") - UTXOPrefix = []byte("utxo") - SubnetPrefix = []byte("subnet") - SubnetOwnerPrefix = []byte("subnetOwner") - TransformedSubnetPrefix = []byte("transformedSubnet") - SupplyPrefix = []byte("supply") - ChainPrefix = []byte("chain") - SingletonPrefix = []byte("singleton") - - TimestampKey = []byte("timestamp") - CurrentSupplyKey = []byte("current supply") - LastAcceptedKey = []byte("last accepted") - HeightsIndexedKey = []byte("heights indexed") - InitializedKey = []byte("initialized") - PrunedKey = []byte("pruned") + BlockIDPrefix = []byte("blockID") + BlockPrefix = []byte("block") + ValidatorsPrefix = []byte("validators") + CurrentPrefix = []byte("current") + PendingPrefix = []byte("pending") + ValidatorPrefix = []byte("validator") + DelegatorPrefix = []byte("delegator") + SubnetValidatorPrefix = []byte("subnetValidator") + SubnetDelegatorPrefix = []byte("subnetDelegator") + ValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") + ValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") + TxPrefix = []byte("tx") + RewardUTXOsPrefix = []byte("rewardUTXOs") + UTXOPrefix = []byte("utxo") + SubnetPrefix = []byte("subnet") + SubnetOwnerPrefix = []byte("subnetOwner") + TransformedSubnetPrefix = []byte("transformedSubnet") + SupplyPrefix = []byte("supply") + ChainPrefix = []byte("chain") + SingletonPrefix = []byte("singleton") + + TimestampKey = []byte("timestamp") + CurrentSupplyKey = []byte("current supply") + LastAcceptedKey = []byte("last accepted") + HeightsIndexedKey = []byte("heights indexed") + InitializedKey = []byte("initialized") + BlocksReindexedKey = []byte("blocks reindexed") ) // Chain collects all methods to manage the state of the chain for block @@ -106,7 +104,7 @@ type Chain interface { AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) - AddSubnet(createSubnetTx *txs.Tx) + AddSubnet(subnetID ids.ID) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) SetSubnetOwner(subnetID ids.ID, owner fx.Owner) @@ -136,7 +134,7 @@ type State interface { GetBlockIDAtHeight(height uint64) (ids.ID, error) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) - GetSubnets() ([]*txs.Tx, error) + GetSubnetIDs() ([]ids.ID, error) GetChains(subnetID ids.ID) ([]*txs.Tx, error) // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis @@ -181,17 +179,13 @@ type State interface { // Discard uncommitted changes to the database. Abort() - // Returns if the state should be pruned and indexed to remove rejected - // blocks and generate the block height index. + // ReindexBlocks converts any block indices using the legacy storage format + // to the new format. If this database has already updated the indices, + // this function will return immediately, without iterating over the + // database. // - // TODO: Remove after v1.11.x is activated - ShouldPrune() (bool, error) - - // Removes rejected blocks from disk and indexes accepted blocks by height. This - // function supports being (and is recommended to be) called asynchronously. - // - // TODO: Remove after v1.11.x is activated - PruneAndIndex(sync.Locker, logging.Logger) error + // TODO: Remove after v1.12.x is activated + ReindexBlocks(lock sync.Locker, log logging.Logger) error // Commit changes to the base database. Commit() error @@ -205,9 +199,12 @@ type State interface { Close() error } -// TODO: Remove after v1.11.x is activated +// Prior to https://github.com/ava-labs/avalanchego/pull/1719, blocks were +// stored as a map from blkID to stateBlk. Nodes synced prior to this PR may +// still have blocks partially stored using this legacy format. +// +// TODO: Remove after v1.12.x is activated type stateBlk struct { - Blk block.Block Bytes []byte `serialize:"true"` Status choices.Status `serialize:"true"` } @@ -241,17 +238,9 @@ type stateBlk struct { * | | '-. subnetDelegator * | | '-. list * | | '-- txID -> nil - * | |-. nested weight diffs TODO: Remove once only the flat db is needed - * | | '-. height+subnet - * | | '-. list - * | | '-- nodeID -> weightChange - * | |-. nested pub key diffs TODO: Remove once only the flat db is needed - * | | '-. height - * | | '-. list - * | | '-- nodeID -> compressed public key - * | |-. flat weight diffs + * | |-. weight diffs * | | '-- subnet+height+nodeID -> weightChange - * | '-. flat pub key diffs + * | '-. pub key diffs * | '-- subnet+height+nodeID -> uncompressed public key or nil * |-. blockIDs * | '-- height -> blockID @@ -276,7 +265,7 @@ type stateBlk struct { * | '-- txID -> nil * '-. singletons * |-- initializedKey -> nil - * |-- prunedKey -> nil + * |-- blocksReindexedKey -> nil * |-- timestampKey -> timestamp * |-- currentSupplyKey -> currentSupply * |-- lastAcceptedKey -> lastAccepted @@ -299,11 +288,11 @@ type state struct { currentHeight uint64 addedBlockIDs map[uint64]ids.ID // map of height -> blockID - blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID; if the entry is ids.Empty, it is not in the database blockIDDB database.Database addedBlocks map[ids.ID]block.Block // map of blockID -> Block - blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockCache cache.Cacher[ids.ID, block.Block] // cache of blockID -> Block; if the entry is nil, it is not in the database blockDB database.Database validatorsDB database.Database @@ -326,39 +315,36 @@ type state struct { pendingSubnetDelegatorBaseDB database.Database pendingSubnetDelegatorList linkeddb.LinkedDB - nestedValidatorWeightDiffsDB database.Database - nestedValidatorPublicKeyDiffsDB database.Database - flatValidatorWeightDiffsDB database.Database - flatValidatorPublicKeyDiffsDB database.Database + validatorWeightDiffsDB database.Database + validatorPublicKeyDiffsDB database.Database addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database + txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}; if the entry is nil, it is not in the database txDB database.Database addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO rewardUTXODB database.Database - modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed + modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO; if the UTXO is nil, it has been removed utxoDB database.Database utxoState avax.UTXOState - cachedSubnets []*txs.Tx // nil if the subnets haven't been loaded - addedSubnets []*txs.Tx - subnetBaseDB database.Database - subnetDB linkeddb.LinkedDB + cachedSubnetIDs []ids.ID // nil if the subnets haven't been loaded + addedSubnetIDs []ids.ID + subnetBaseDB database.Database + subnetDB linkeddb.LinkedDB - // Subnet ID --> Owner of the subnet - subnetOwners map[ids.ID]fx.Owner - subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner if the entry is nil, it is not in the database + subnetOwners map[ids.ID]fx.Owner // map of subnetID -> owner + subnetOwnerCache cache.Cacher[ids.ID, fxOwnerAndSize] // cache of subnetID -> owner; if the entry is nil, it is not in the database subnetOwnerDB database.Database transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database + transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx; if the entry is nil, it is not in the database transformedSubnetDB database.Database modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database + supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply; if the entry is nil, it is not in the database supplyDB database.Database addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet @@ -371,8 +357,9 @@ type state struct { currentSupply, persistedCurrentSupply uint64 // [lastAccepted] is the most recently accepted block. lastAccepted, persistedLastAccepted ids.ID - indexedHeights *heightRange - singletonDB database.Database + // TODO: Remove indexedHeights once v1.11.3 has been released. + indexedHeights *heightRange + singletonDB database.Database } // heightRange is used to track which heights are safe to use the native DB @@ -406,11 +393,6 @@ func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { return nil } -type heightWithSubnet struct { - Height uint64 `serialize:"true"` - SubnetID ids.ID `serialize:"true"` -} - type txBytesAndStatus struct { Tx []byte `serialize:"true"` Status status.Status `serialize:"true"` @@ -477,27 +459,6 @@ func New( return nil, err } - // Before we start accepting new blocks, we check if the pruning process needs - // to be run. - // - // TODO: Cleanup after v1.11.x is activated - shouldPrune, err := s.ShouldPrune() - if err != nil { - return nil, err - } - if shouldPrune { - // If the pruned key is on disk, we must delete it to ensure our disk - // can't get into a partially pruned state if the node restarts mid-way - // through pruning. - if err := s.singletonDB.Delete(PrunedKey); err != nil { - return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) - } - - if err := s.Commit(); err != nil { - return nil, fmt.Errorf("failed to commit to baseDB: %w", err) - } - } - return s, nil } @@ -544,10 +505,8 @@ func newState( pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) - nestedValidatorWeightDiffsDB := prefixdb.New(NestedValidatorWeightDiffsPrefix, validatorsDB) - nestedValidatorPublicKeyDiffsDB := prefixdb.New(NestedValidatorPublicKeyDiffsPrefix, validatorsDB) - flatValidatorWeightDiffsDB := prefixdb.New(FlatValidatorWeightDiffsPrefix, validatorsDB) - flatValidatorPublicKeyDiffsDB := prefixdb.New(FlatValidatorPublicKeyDiffsPrefix, validatorsDB) + validatorWeightDiffsDB := prefixdb.New(ValidatorWeightDiffsPrefix, validatorsDB) + validatorPublicKeyDiffsDB := prefixdb.New(ValidatorPublicKeyDiffsPrefix, validatorsDB) txCache, err := metercacher.New( "tx_cache", @@ -645,29 +604,27 @@ func newState( currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), - validatorsDB: validatorsDB, - currentValidatorsDB: currentValidatorsDB, - currentValidatorBaseDB: currentValidatorBaseDB, - currentValidatorList: linkeddb.NewDefault(currentValidatorBaseDB), - currentDelegatorBaseDB: currentDelegatorBaseDB, - currentDelegatorList: linkeddb.NewDefault(currentDelegatorBaseDB), - currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB, - currentSubnetValidatorList: linkeddb.NewDefault(currentSubnetValidatorBaseDB), - currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB, - currentSubnetDelegatorList: linkeddb.NewDefault(currentSubnetDelegatorBaseDB), - pendingValidatorsDB: pendingValidatorsDB, - pendingValidatorBaseDB: pendingValidatorBaseDB, - pendingValidatorList: linkeddb.NewDefault(pendingValidatorBaseDB), - pendingDelegatorBaseDB: pendingDelegatorBaseDB, - pendingDelegatorList: linkeddb.NewDefault(pendingDelegatorBaseDB), - pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB, - pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), - pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, - pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), - nestedValidatorWeightDiffsDB: nestedValidatorWeightDiffsDB, - nestedValidatorPublicKeyDiffsDB: nestedValidatorPublicKeyDiffsDB, - flatValidatorWeightDiffsDB: flatValidatorWeightDiffsDB, - flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, + validatorsDB: validatorsDB, + currentValidatorsDB: currentValidatorsDB, + currentValidatorBaseDB: currentValidatorBaseDB, + currentValidatorList: linkeddb.NewDefault(currentValidatorBaseDB), + currentDelegatorBaseDB: currentDelegatorBaseDB, + currentDelegatorList: linkeddb.NewDefault(currentDelegatorBaseDB), + currentSubnetValidatorBaseDB: currentSubnetValidatorBaseDB, + currentSubnetValidatorList: linkeddb.NewDefault(currentSubnetValidatorBaseDB), + currentSubnetDelegatorBaseDB: currentSubnetDelegatorBaseDB, + currentSubnetDelegatorList: linkeddb.NewDefault(currentSubnetDelegatorBaseDB), + pendingValidatorsDB: pendingValidatorsDB, + pendingValidatorBaseDB: pendingValidatorBaseDB, + pendingValidatorList: linkeddb.NewDefault(pendingValidatorBaseDB), + pendingDelegatorBaseDB: pendingDelegatorBaseDB, + pendingDelegatorList: linkeddb.NewDefault(pendingDelegatorBaseDB), + pendingSubnetValidatorBaseDB: pendingSubnetValidatorBaseDB, + pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), + pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, + pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), + validatorWeightDiffsDB: validatorWeightDiffsDB, + validatorPublicKeyDiffsDB: validatorPublicKeyDiffsDB, addedTxs: make(map[ids.ID]*txAndStatus), txDB: prefixdb.New(TxPrefix, baseDB), @@ -770,70 +727,35 @@ func (s *state) doneInit() error { return s.singletonDB.Put(InitializedKey, nil) } -func (s *state) ShouldPrune() (bool, error) { - has, err := s.singletonDB.Has(PrunedKey) - if err != nil { - return true, err - } - - // If [prunedKey] is not in [singletonDB], [PruneAndIndex()] did not finish - // execution. - if !has { - return true, nil - } - - // To ensure the db was not modified since we last ran [PruneAndIndex()], we - // must verify that [s.lastAccepted] is height indexed. - blk, err := s.GetStatelessBlock(s.lastAccepted) - if err != nil { - return true, err - } - - _, err = s.GetBlockIDAtHeight(blk.Height()) - if err == database.ErrNotFound { - return true, nil - } - - return false, err -} - -func (s *state) donePrune() error { - return s.singletonDB.Put(PrunedKey, nil) -} - -func (s *state) GetSubnets() ([]*txs.Tx, error) { - if s.cachedSubnets != nil { - return s.cachedSubnets, nil +func (s *state) GetSubnetIDs() ([]ids.ID, error) { + if s.cachedSubnetIDs != nil { + return s.cachedSubnetIDs, nil } subnetDBIt := s.subnetDB.NewIterator() defer subnetDBIt.Release() - txs := []*txs.Tx(nil) + subnetIDs := []ids.ID{} for subnetDBIt.Next() { subnetIDBytes := subnetDBIt.Key() subnetID, err := ids.ToID(subnetIDBytes) if err != nil { return nil, err } - subnetTx, _, err := s.GetTx(subnetID) - if err != nil { - return nil, err - } - txs = append(txs, subnetTx) + subnetIDs = append(subnetIDs, subnetID) } if err := subnetDBIt.Error(); err != nil { return nil, err } - txs = append(txs, s.addedSubnets...) - s.cachedSubnets = txs - return txs, nil + subnetIDs = append(subnetIDs, s.addedSubnetIDs...) + s.cachedSubnetIDs = subnetIDs + return subnetIDs, nil } -func (s *state) AddSubnet(createSubnetTx *txs.Tx) { - s.addedSubnets = append(s.addedSubnets, createSubnetTx) - if s.cachedSubnets != nil { - s.cachedSubnets = append(s.cachedSubnets, createSubnetTx) +func (s *state) AddSubnet(subnetID ids.ID) { + s.addedSubnetIDs = append(s.addedSubnetIDs, subnetID) + if s.cachedSubnetIDs != nil { + s.cachedSubnetIDs = append(s.cachedSubnetIDs, subnetID) } } @@ -1138,16 +1060,14 @@ func (s *state) ApplyValidatorWeightDiffs( endHeight uint64, subnetID ids.ID, ) error { - diffIter := s.flatValidatorWeightDiffsDB.NewIteratorWithStartAndPrefix( + diffIter := s.validatorWeightDiffsDB.NewIteratorWithStartAndPrefix( marshalStartDiffKey(subnetID, startHeight), subnetID[:], ) defer diffIter.Release() prevHeight := startHeight + 1 - // TODO: Remove the index continuity checks once we are guaranteed nodes can - // not rollback to not support the new indexing mechanism. - for diffIter.Next() && s.indexedHeights != nil && s.indexedHeights.LowerBound <= endHeight { + for diffIter.Next() { if err := ctx.Err(); err != nil { return err } @@ -1185,50 +1105,7 @@ func (s *state) ApplyValidatorWeightDiffs( return err } } - if err := diffIter.Error(); err != nil { - return err - } - - // TODO: Remove this once it is assumed that all subnet validators have - // adopted the new indexing. - for height := prevHeight - 1; height >= endHeight; height-- { - if err := ctx.Err(); err != nil { - return err - } - - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: subnetID, - } - prefixBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, prefixStruct) - if err != nil { - return err - } - - rawDiffDB := prefixdb.New(prefixBytes, s.nestedValidatorWeightDiffsDB) - diffDB := linkeddb.NewDefault(rawDiffDB) - diffIter := diffDB.NewIterator() - defer diffIter.Release() - - for diffIter.Next() { - nodeID, err := ids.ToNodeID(diffIter.Key()) - if err != nil { - return err - } - - weightDiff := ValidatorWeightDiff{} - _, err = block.GenesisCodec.Unmarshal(diffIter.Value(), &weightDiff) - if err != nil { - return err - } - - if err := applyWeightDiff(validators, nodeID, &weightDiff); err != nil { - return err - } - } - } - - return nil + return diffIter.Error() } func applyWeightDiff( @@ -1274,7 +1151,7 @@ func (s *state) ApplyValidatorPublicKeyDiffs( startHeight uint64, endHeight uint64, ) error { - diffIter := s.flatValidatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( + diffIter := s.validatorPublicKeyDiffsDB.NewIteratorWithStartAndPrefix( marshalStartDiffKey(constants.PrimaryNetworkID, startHeight), constants.PrimaryNetworkID[:], ) @@ -1306,7 +1183,7 @@ func (s *state) ApplyValidatorPublicKeyDiffs( continue } - vdr.PublicKey = bls.DeserializePublicKey(pkBytes) + vdr.PublicKey = bls.PublicKeyFromValidUncompressedBytes(pkBytes) } // Note: this does not fallback to the linkeddb index because the linkeddb @@ -1746,7 +1623,7 @@ func (s *state) initValidatorSets() error { func (s *state) write(updateValidators bool, height uint64) error { codecVersion := CodecVersion1 - if !s.cfg.IsDurangoActivated(s.GetTimestamp()) { + if !s.cfg.UpgradeConfig.IsDurangoActivated(s.GetTimestamp()) { codecVersion = CodecVersion0 } @@ -1939,16 +1816,11 @@ func (s *state) GetStatelessBlock(blockID ids.ID) (block.Block, error) { return nil, err } - blk, status, _, err := parseStoredBlock(blkBytes) + blk, _, err := parseStoredBlock(blkBytes) if err != nil { return nil, err } - if status != choices.Accepted { - s.blockCache.Put(blockID, nil) - return nil, database.ErrNotFound - } - s.blockCache.Put(blockID, blk) return blk, nil } @@ -1981,10 +1853,6 @@ func (s *state) GetBlockIDAtHeight(height uint64) (ids.ID, error) { } func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecVersion uint16) error { - heightBytes := database.PackUInt64(height) - rawNestedPublicKeyDiffDB := prefixdb.New(heightBytes, s.nestedValidatorPublicKeyDiffsDB) - nestedPKDiffDB := linkeddb.NewDefault(rawNestedPublicKeyDiffDB) - for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { delete(s.currentStakers.validatorDiffs, subnetID) @@ -1996,17 +1864,6 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV delegatorDB = s.currentDelegatorList } - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: subnetID, - } - prefixBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, prefixStruct) - if err != nil { - return fmt.Errorf("failed to create prefix bytes: %w", err) - } - rawNestedWeightDiffDB := prefixdb.New(prefixBytes, s.nestedValidatorWeightDiffsDB) - nestedWeightDiffDB := linkeddb.NewDefault(rawNestedWeightDiffDB) - // Record the change in weight and/or public key for each validator. for nodeID, validatorDiff := range validatorDiffs { // Copy [nodeID] so it doesn't get overwritten next iteration. @@ -2026,7 +1883,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV // Record that the public key for the validator is being // added. This means the prior value for the public key was // nil. - err := s.flatValidatorPublicKeyDiffsDB.Put( + err := s.validatorPublicKeyDiffsDB.Put( marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), nil, ) @@ -2075,22 +1932,13 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV // Note: We store the uncompressed public key here as it is // significantly more efficient to parse when applying // diffs. - err := s.flatValidatorPublicKeyDiffsDB.Put( + err := s.validatorPublicKeyDiffsDB.Put( marshalDiffKey(constants.PrimaryNetworkID, height, nodeID), - bls.SerializePublicKey(staker.PublicKey), + bls.PublicKeyToUncompressedBytes(staker.PublicKey), ) if err != nil { return err } - - // TODO: Remove this once we no longer support version - // rollbacks. - // - // Note: We store the compressed public key here. - pkBytes := bls.PublicKeyToBytes(staker.PublicKey) - if err := nestedPKDiffDB.Put(nodeID.Bytes(), pkBytes); err != nil { - return err - } } if err := validatorDB.Delete(staker.TxID[:]); err != nil { @@ -2115,7 +1963,7 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV continue } - err = s.flatValidatorWeightDiffsDB.Put( + err = s.validatorWeightDiffsDB.Put( marshalDiffKey(subnetID, height, nodeID), marshalWeightDiff(weightDiff), ) @@ -2123,15 +1971,6 @@ func (s *state) writeCurrentStakers(updateValidators bool, height uint64, codecV return err } - // TODO: Remove this once we no longer support version rollbacks. - weightDiffBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, weightDiff) - if err != nil { - return fmt.Errorf("failed to serialize validator weight diff: %w", err) - } - if err := nestedWeightDiffDB.Put(nodeID.Bytes(), weightDiffBytes); err != nil { - return err - } - // TODO: Move the validator set management out of the state package if !updateValidators { continue @@ -2341,14 +2180,12 @@ func (s *state) writeUTXOs() error { } func (s *state) writeSubnets() error { - for _, subnet := range s.addedSubnets { - subnetID := subnet.ID() - + for _, subnetID := range s.addedSubnetIDs { if err := s.subnetDB.Put(subnetID[:], nil); err != nil { return fmt.Errorf("failed to write subnet: %w", err) } } - s.addedSubnets = nil + s.addedSubnetIDs = nil return nil } @@ -2437,7 +2274,6 @@ func (s *state) writeMetadata() error { } s.persistedLastAccepted = s.lastAccepted } - if s.indexedHeights != nil { indexedHeightsBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, s.indexedHeights) if err != nil { @@ -2447,107 +2283,102 @@ func (s *state) writeMetadata() error { return fmt.Errorf("failed to write indexed range: %w", err) } } - return nil } -// Returns the block, status of the block, and whether it is a [stateBlk]. +// Returns the block and whether it is a [stateBlk]. // Invariant: blkBytes is safe to parse with blocks.GenesisCodec // -// TODO: Remove after v1.11.x is activated -func parseStoredBlock(blkBytes []byte) (block.Block, choices.Status, bool, error) { +// TODO: Remove after v1.12.x is activated +func parseStoredBlock(blkBytes []byte) (block.Block, bool, error) { // Attempt to parse as blocks.Block blk, err := block.Parse(block.GenesisCodec, blkBytes) if err == nil { - return blk, choices.Accepted, false, nil + return blk, false, nil } // Fallback to [stateBlk] blkState := stateBlk{} if _, err := block.GenesisCodec.Unmarshal(blkBytes, &blkState); err != nil { - return nil, choices.Processing, false, err + return nil, false, err } - blkState.Blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) + blk, err = block.Parse(block.GenesisCodec, blkState.Bytes) + return blk, true, err +} + +func (s *state) ReindexBlocks(lock sync.Locker, log logging.Logger) error { + has, err := s.singletonDB.Has(BlocksReindexedKey) if err != nil { - return nil, choices.Processing, false, err + return err + } + if has { + log.Info("blocks already reindexed") + return nil } - return blkState.Blk, blkState.Status, true, nil -} - -func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { - lock.Lock() - // It is possible that new blocks are added after grabbing this iterator. New - // blocks are guaranteed to be accepted and height-indexed, so we don't need to - // check them. + // It is possible that new blocks are added after grabbing this iterator. + // New blocks are guaranteed to be persisted in the new format, so we don't + // need to check them. blockIterator := s.blockDB.NewIterator() - // Releasing is done using a closure to ensure that updating blockIterator will - // result in having the most recent iterator released when executing the - // deferred function. + // Releasing is done using a closure to ensure that updating blockIterator + // will result in having the most recent iterator released when executing + // the deferred function. defer func() { blockIterator.Release() }() - // While we are pruning the disk, we disable caching of the data we are - // modifying. Caching is re-enabled when pruning finishes. - // - // Note: If an unexpected error occurs the caches are never re-enabled. - // That's fine as the node is going to be in an unhealthy state regardless. - oldBlockIDCache := s.blockIDCache - s.blockIDCache = &cache.Empty[uint64, ids.ID]{} - lock.Unlock() - - log.Info("starting state pruning and indexing") + log.Info("starting block reindexing") var ( - startTime = time.Now() - lastCommit = startTime - lastUpdate = startTime - numPruned = 0 - numIndexed = 0 + startTime = time.Now() + lastCommit = startTime + nextUpdate = startTime.Add(indexLogFrequency) + numIndicesChecked = 0 + numIndicesUpdated = 0 ) for blockIterator.Next() { - blkBytes := blockIterator.Value() - - blk, status, isStateBlk, err := parseStoredBlock(blkBytes) + valueBytes := blockIterator.Value() + blk, isStateBlk, err := parseStoredBlock(valueBytes) if err != nil { - return err + return fmt.Errorf("failed to parse block: %w", err) } - if status != choices.Accepted { - // Remove non-accepted blocks from disk. - if err := s.blockDB.Delete(blockIterator.Key()); err != nil { - return fmt.Errorf("failed to delete block: %w", err) - } - - numPruned++ - - // We don't index the height of non-accepted blocks. - continue - } - - blkHeight := blk.Height() blkID := blk.ID() - // Populate the map of height -> blockID. - heightKey := database.PackUInt64(blkHeight) - if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { - return fmt.Errorf("failed to add blockID: %w", err) - } - - // Since we only store accepted blocks on disk, we only need to store a map of - // ids.ID to Block. + // This block was previously stored using the legacy format, update the + // index to remove the usage of stateBlk. if isStateBlk { + blkBytes := blk.Bytes() if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { return fmt.Errorf("failed to write block: %w", err) } + + numIndicesUpdated++ } - numIndexed++ + numIndicesChecked++ - if numIndexed%pruneCommitLimit == 0 { + now := time.Now() + if now.After(nextUpdate) { + nextUpdate = now.Add(indexLogFrequency) + + progress := timer.ProgressFromHash(blkID[:]) + eta := timer.EstimateETA( + startTime, + progress, + math.MaxUint64, + ) + + log.Info("reindexing blocks", + zap.Int("numIndicesUpdated", numIndicesUpdated), + zap.Int("numIndicesChecked", numIndicesChecked), + zap.Duration("eta", eta), + ) + } + + if numIndicesChecked%indexIterationLimit == 0 { // We must hold the lock during committing to make sure we don't // attempt to commit to disk while a block is concurrently being // accepted. @@ -2565,36 +2396,18 @@ func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { // clean up deleted state. blockIterator.Release() - now := time.Now() - if now.Sub(lastUpdate) > pruneUpdateFrequency { - lastUpdate = now - - progress := timer.ProgressFromHash(blkID[:]) - eta := timer.EstimateETA( - startTime, - progress, - math.MaxUint64, - ) - - log.Info("committing state pruning and indexing", - zap.Int("numPruned", numPruned), - zap.Int("numIndexed", numIndexed), - zap.Duration("eta", eta), - ) - } - // We take the minimum here because it's possible that the node is // currently bootstrapping. This would mean that grabbing the lock // could take an extremely long period of time; which we should not // delay processing for. - pruneDuration := now.Sub(lastCommit) + indexDuration := now.Sub(lastCommit) sleepDuration := min( - pruneCommitSleepMultiplier*pruneDuration, - pruneCommitSleepCap, + indexIterationSleepMultiplier*indexDuration, + indexIterationSleepCap, ) time.Sleep(sleepDuration) - // Make sure not to include the sleep duration into the next prune + // Make sure not to include the sleep duration into the next index // duration. lastCommit = time.Now() @@ -2602,33 +2415,27 @@ func (s *state) PruneAndIndex(lock sync.Locker, log logging.Logger) error { } } - // Ensure we fully iterated over all blocks before writing that pruning has + // Ensure we fully iterated over all blocks before writing that indexing has // finished. // // Note: This is needed because a transient read error could cause the // iterator to stop early. if err := blockIterator.Error(); err != nil { - return err + return fmt.Errorf("failed to iterate over historical blocks: %w", err) } - if err := s.donePrune(); err != nil { - return err + if err := s.singletonDB.Put(BlocksReindexedKey, nil); err != nil { + return fmt.Errorf("failed to put marked blocks as reindexed: %w", err) } - // We must hold the lock during committing to make sure we don't - // attempt to commit to disk while a block is concurrently being - // accepted. + // We must hold the lock during committing to make sure we don't attempt to + // commit to disk while a block is concurrently being accepted. lock.Lock() defer lock.Unlock() - // Make sure we flush the original cache before re-enabling it to prevent - // surfacing any stale data. - oldBlockIDCache.Flush() - s.blockIDCache = oldBlockIDCache - - log.Info("finished state pruning and indexing", - zap.Int("numPruned", numPruned), - zap.Int("numIndexed", numIndexed), + log.Info("finished block reindexing", + zap.Int("numIndicesUpdated", numIndicesUpdated), + zap.Int("numIndicesChecked", numIndicesChecked), zap.Duration("duration", time.Since(startTime)), ) diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 01fae668335b..c6241ddc8cc4 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -7,6 +7,7 @@ import ( "context" "fmt" "math" + "sync" "testing" "time" @@ -22,6 +23,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -164,7 +166,7 @@ func TestPersistStakers(t *testing.T) { r.Equal(lastUpdated, staker.StartTime) }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) weightDiff, err := unmarshalWeightDiff(weightDiffBytes) r.NoError(err) @@ -173,7 +175,7 @@ func TestPersistStakers(t *testing.T) { Amount: staker.Weight, }, weightDiff) - blsDiffBytes, err := s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) if staker.SubnetID == constants.PrimaryNetworkID { r.NoError(err) r.Nil(blsDiffBytes) @@ -262,7 +264,7 @@ func TestPersistStakers(t *testing.T) { checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // validator's weight must increase of delegator's weight amount - weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) weightDiff, err := unmarshalWeightDiff(weightDiffBytes) r.NoError(err) @@ -317,11 +319,11 @@ func TestPersistStakers(t *testing.T) { r.ErrorIs(err, database.ErrNotFound) }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - // pending validators weight diff and bls diffs are not stored - _, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + // pending validators weight diff and bls diffs are not stored + _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.ErrorIs(err, database.ErrNotFound) - _, err = s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.ErrorIs(err, database.ErrNotFound) }, }, @@ -439,7 +441,7 @@ func TestPersistStakers(t *testing.T) { r.ErrorIs(err, database.ErrNotFound) }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) weightDiff, err := unmarshalWeightDiff(weightDiffBytes) r.NoError(err) @@ -448,10 +450,10 @@ func TestPersistStakers(t *testing.T) { Amount: staker.Weight, }, weightDiff) - blsDiffBytes, err := s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + blsDiffBytes, err := s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) if staker.SubnetID == constants.PrimaryNetworkID { r.NoError(err) - r.Equal(bls.DeserializePublicKey(blsDiffBytes), staker.PublicKey) + r.Equal(bls.PublicKeyFromValidUncompressedBytes(blsDiffBytes), staker.PublicKey) } else { r.ErrorIs(err, database.ErrNotFound) } @@ -537,7 +539,7 @@ func TestPersistStakers(t *testing.T) { checkValidatorUptimes: func(*require.Assertions, *state, *Staker) {}, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { // validator's weight must decrease of delegator's weight amount - weightDiffBytes, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + weightDiffBytes, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.NoError(err) weightDiff, err := unmarshalWeightDiff(weightDiffBytes) r.NoError(err) @@ -593,10 +595,10 @@ func TestPersistStakers(t *testing.T) { r.ErrorIs(err, database.ErrNotFound) }, checkDiffs: func(r *require.Assertions, s *state, staker *Staker, height uint64) { - _, err := s.flatValidatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + _, err := s.validatorWeightDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.ErrorIs(err, database.ErrNotFound) - _, err = s.flatValidatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) + _, err = s.validatorPublicKeyDiffsDB.Get(marshalDiffKey(staker.SubnetID, height, staker.NodeID)) r.ErrorIs(err, database.ErrNotFound) }, }, @@ -1295,10 +1297,112 @@ func requireEqualPublicKeysValidatorSet( } func TestParsedStateBlock(t *testing.T) { + var ( + require = require.New(t) + blks = makeBlocks(require) + ) + + for _, blk := range blks { + stBlk := stateBlk{ + Bytes: blk.Bytes(), + Status: choices.Accepted, + } + + stBlkBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &stBlk) + require.NoError(err) + + gotBlk, isStateBlk, err := parseStoredBlock(stBlkBytes) + require.NoError(err) + require.True(isStateBlk) + require.Equal(blk.ID(), gotBlk.ID()) + + gotBlk, isStateBlk, err = parseStoredBlock(blk.Bytes()) + require.NoError(err) + require.False(isStateBlk) + require.Equal(blk.ID(), gotBlk.ID()) + } +} + +func TestReindexBlocks(t *testing.T) { + var ( + require = require.New(t) + s = newInitializedState(require).(*state) + blks = makeBlocks(require) + ) + + // Populate the blocks using the legacy format. + for _, blk := range blks { + stBlk := stateBlk{ + Bytes: blk.Bytes(), + Status: choices.Accepted, + } + stBlkBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &stBlk) + require.NoError(err) + + blkID := blk.ID() + require.NoError(s.blockDB.Put(blkID[:], stBlkBytes)) + } + + // Convert the indices to the new format. + require.NoError(s.ReindexBlocks(&sync.Mutex{}, logging.NoLog{})) + + // Verify that the blocks are stored in the new format. + for _, blk := range blks { + blkID := blk.ID() + blkBytes, err := s.blockDB.Get(blkID[:]) + require.NoError(err) + + parsedBlk, err := block.Parse(block.GenesisCodec, blkBytes) + require.NoError(err) + require.Equal(blkID, parsedBlk.ID()) + } + + // Verify that the flag has been written to disk to allow skipping future + // reindexings. + reindexed, err := s.singletonDB.Has(BlocksReindexedKey) + require.NoError(err) + require.True(reindexed) +} + +func TestStateSubnetOwner(t *testing.T) { require := require.New(t) - var blks []block.Block + state := newInitializedState(require) + ctrl := gomock.NewController(t) + + var ( + owner1 = fx.NewMockOwner(ctrl) + owner2 = fx.NewMockOwner(ctrl) + + createSubnetTx = &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{}, + Owner: owner1, + }, + } + + subnetID = createSubnetTx.ID() + ) + + owner, err := state.GetSubnetOwner(subnetID) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(owner) + state.AddSubnet(subnetID) + state.SetSubnetOwner(subnetID, owner1) + + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner1, owner) + + state.SetSubnetOwner(subnetID, owner2) + owner, err = state.GetSubnetOwner(subnetID) + require.NoError(err) + require.Equal(owner2, owner) +} + +func makeBlocks(require *require.Assertions) []block.Block { + var blks []block.Block { blk, err := block.NewApricotAbortBlock(ids.GenerateTestID(), 1000) require.NoError(err) @@ -1382,62 +1486,5 @@ func TestParsedStateBlock(t *testing.T) { require.NoError(err) blks = append(blks, blk) } - - for _, blk := range blks { - stBlk := stateBlk{ - Blk: blk, - Bytes: blk.Bytes(), - Status: choices.Accepted, - } - - stBlkBytes, err := block.GenesisCodec.Marshal(block.CodecVersion, &stBlk) - require.NoError(err) - - gotBlk, _, isStateBlk, err := parseStoredBlock(stBlkBytes) - require.NoError(err) - require.True(isStateBlk) - require.Equal(blk.ID(), gotBlk.ID()) - - gotBlk, _, isStateBlk, err = parseStoredBlock(blk.Bytes()) - require.NoError(err) - require.False(isStateBlk) - require.Equal(blk.ID(), gotBlk.ID()) - } -} - -func TestStateSubnetOwner(t *testing.T) { - require := require.New(t) - - state := newInitializedState(require) - ctrl := gomock.NewController(t) - - var ( - owner1 = fx.NewMockOwner(ctrl) - owner2 = fx.NewMockOwner(ctrl) - - createSubnetTx = &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - BaseTx: txs.BaseTx{}, - Owner: owner1, - }, - } - - subnetID = createSubnetTx.ID() - ) - - owner, err := state.GetSubnetOwner(subnetID) - require.ErrorIs(err, database.ErrNotFound) - require.Nil(owner) - - state.AddSubnet(createSubnetTx) - state.SetSubnetOwner(subnetID, owner1) - - owner, err = state.GetSubnetOwner(subnetID) - require.NoError(err) - require.Equal(owner1, owner) - - state.SetSubnetOwner(subnetID, owner2) - owner, err = state.GetSubnetOwner(subnetID) - require.NoError(err) - require.Equal(owner2, owner) + return blks } diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index d7483f5ba609..152706391aa3 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -525,7 +525,7 @@ func TestAddPermissionlessPrimaryDelegatorSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x05, // amount 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // number of signature indicies + // number of signature indices 0x00, 0x00, 0x00, 0x00, // memo length 0x00, 0x00, 0x00, 0x14, @@ -1281,7 +1281,7 @@ func TestAddPermissionlessSubnetDelegatorSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x05, // amount 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // number of signature indicies + // number of signature indices 0x00, 0x00, 0x00, 0x00, // memo length 0x00, 0x00, 0x00, 0x14, diff --git a/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 96828c94c160..389f28b13e2a 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -585,7 +585,7 @@ func TestAddPermissionlessPrimaryValidator(t *testing.T) { 0x00, 0x00, 0x00, 0x05, // amount 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // number of signature indicies + // number of signature indices 0x00, 0x00, 0x00, 0x00, // memo length 0x00, 0x00, 0x00, 0x14, @@ -1272,7 +1272,7 @@ func TestAddPermissionlessSubnetValidator(t *testing.T) { 0x00, 0x00, 0x00, 0x05, // amount 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // number of signature indicies + // number of signature indices 0x00, 0x00, 0x00, 0x00, // memo length 0x00, 0x00, 0x00, 0x14, diff --git a/vms/platformvm/txs/builder/builder.go b/vms/platformvm/txs/builder/builder.go deleted file mode 100644 index 626edf6e56ee..000000000000 --- a/vms/platformvm/txs/builder/builder.go +++ /dev/null @@ -1,940 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "errors" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/config" - "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/utxo" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -// Max number of items allowed in a page -const MaxPageSize = 1024 - -var ( - _ Builder = (*builder)(nil) - - ErrNoFunds = errors.New("no spendable funds were found") -) - -type Builder interface { - AtomicTxBuilder - DecisionTxBuilder - ProposalTxBuilder -} - -type AtomicTxBuilder interface { - // chainID: chain to import UTXOs from - // to: address of recipient - // keys: keys to import the funds - // changeAddr: address to send change to, if there is any - NewImportTx( - chainID ids.ID, - to ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // amount: amount of tokens to export - // chainID: chain to send the UTXOs to - // to: address of recipient - // keys: keys to pay the fee and provide the tokens - // changeAddr: address to send change to, if there is any - NewExportTx( - amount uint64, - chainID ids.ID, - to ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) -} - -type DecisionTxBuilder interface { - // subnetID: ID of the subnet that validates the new chain - // genesisData: byte repr. of genesis state of the new chain - // vmID: ID of VM this chain runs - // fxIDs: ids of features extensions this chain supports - // chainName: name of the chain - // keys: keys to sign the tx - // changeAddr: address to send change to, if there is any - NewCreateChainTx( - subnetID ids.ID, - genesisData []byte, - vmID ids.ID, - fxIDs []ids.ID, - chainName string, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // threshold: [threshold] of [ownerAddrs] needed to manage this subnet - // ownerAddrs: control addresses for the new subnet - // keys: keys to pay the fee - // changeAddr: address to send change to, if there is any - NewCreateSubnetTx( - threshold uint32, - ownerAddrs []ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - NewTransformSubnetTx( - subnetID ids.ID, - assetID ids.ID, - initialSupply uint64, - maxSupply uint64, - minConsumptionRate uint64, - maxConsumptionRate uint64, - minValidatorStake uint64, - maxValidatorStake uint64, - minStakeDuration time.Duration, - maxStakeDuration time.Duration, - minDelegationFee uint32, - minDelegatorStake uint64, - maxValidatorWeightFactor byte, - uptimeRequirement uint32, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // amount: amount the sender is sending - // owner: recipient of the funds - // keys: keys to sign the tx and pay the amount - // changeAddr: address to send change to, if there is any - NewBaseTx( - amount uint64, - owner secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) -} - -type ProposalTxBuilder interface { - // stakeAmount: amount the validator stakes - // startTime: unix time they start validating - // endTime: unix time they stop validating - // nodeID: ID of the node we want to validate with - // rewardAddress: address to send reward to, if applicable - // shares: 10,000 times percentage of reward taken from delegators - // keys: Keys providing the staked tokens - // changeAddr: Address to send change to, if there is any - NewAddValidatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - rewardAddress ids.ShortID, - shares uint32, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // stakeAmount: amount the validator stakes - // startTime: unix time they start validating - // endTime: unix time they stop validating - // nodeID: ID of the node we want to validate with - // pop: the node proof of possession - // rewardAddress: address to send reward to, if applicable - // shares: 10,000 times percentage of reward taken from delegators - // keys: Keys providing the staked tokens - // changeAddr: Address to send change to, if there is any - NewAddPermissionlessValidatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - pop *signer.ProofOfPossession, - rewardAddress ids.ShortID, - shares uint32, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // stakeAmount: amount the delegator stakes - // startTime: unix time they start delegating - // endTime: unix time they stop delegating - // nodeID: ID of the node we are delegating to - // rewardAddress: address to send reward to, if applicable - // keys: keys providing the staked tokens - // changeAddr: address to send change to, if there is any - NewAddDelegatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - rewardAddress ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // stakeAmount: amount the delegator stakes - // startTime: unix time they start delegating - // endTime: unix time they stop delegating - // nodeID: ID of the node we are delegating to - // rewardAddress: address to send reward to, if applicable - // keys: keys providing the staked tokens - // changeAddr: address to send change to, if there is any - NewAddPermissionlessDelegatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - rewardAddress ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // weight: sampling weight of the new validator - // startTime: unix time they start delegating - // endTime: unix time they top delegating - // nodeID: ID of the node validating - // subnetID: ID of the subnet the validator will validate - // keys: keys to use for adding the validator - // changeAddr: address to send change to, if there is any - NewAddSubnetValidatorTx( - weight, - startTime, - endTime uint64, - nodeID ids.NodeID, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // Creates a transaction that removes [nodeID] - // as a validator from [subnetID] - // keys: keys to use for removing the validator - // changeAddr: address to send change to, if there is any - NewRemoveSubnetValidatorTx( - nodeID ids.NodeID, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) - - // Creates a transaction that transfers ownership of [subnetID] - // threshold: [threshold] of [ownerAddrs] needed to manage this subnet - // ownerAddrs: control addresses for the new subnet - // keys: keys to use for modifying the subnet - // changeAddr: address to send change to, if there is any - NewTransferSubnetOwnershipTx( - subnetID ids.ID, - threshold uint32, - ownerAddrs []ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, - ) (*txs.Tx, error) -} - -func New( - ctx *snow.Context, - cfg *config.Config, - clk *mockable.Clock, - fx fx.Fx, - state state.State, - atomicUTXOManager avax.AtomicUTXOManager, - utxoSpender utxo.Spender, -) Builder { - return &builder{ - AtomicUTXOManager: atomicUTXOManager, - Spender: utxoSpender, - state: state, - cfg: cfg, - ctx: ctx, - clk: clk, - fx: fx, - } -} - -type builder struct { - avax.AtomicUTXOManager - utxo.Spender - state state.State - - cfg *config.Config - ctx *snow.Context - clk *mockable.Clock - fx fx.Fx -} - -func (b *builder) NewImportTx( - from ids.ID, - to ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - kc := secp256k1fx.NewKeychain(keys...) - - atomicUTXOs, _, _, err := b.GetAtomicUTXOs(from, kc.Addresses(), ids.ShortEmpty, ids.Empty, MaxPageSize) - if err != nil { - return nil, fmt.Errorf("problem retrieving atomic UTXOs: %w", err) - } - - importedInputs := []*avax.TransferableInput{} - signers := [][]*secp256k1.PrivateKey{} - - importedAmounts := make(map[ids.ID]uint64) - now := b.clk.Unix() - for _, utxo := range atomicUTXOs { - inputIntf, utxoSigners, err := kc.Spend(utxo.Out, now) - if err != nil { - continue - } - input, ok := inputIntf.(avax.TransferableIn) - if !ok { - continue - } - assetID := utxo.AssetID() - importedAmounts[assetID], err = math.Add64(importedAmounts[assetID], input.Amount()) - if err != nil { - return nil, err - } - importedInputs = append(importedInputs, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: utxo.Asset, - In: input, - }) - signers = append(signers, utxoSigners) - } - avax.SortTransferableInputsWithSigners(importedInputs, signers) - - if len(importedAmounts) == 0 { - return nil, ErrNoFunds // No imported UTXOs were spendable - } - - importedAVAX := importedAmounts[b.ctx.AVAXAssetID] - - ins := []*avax.TransferableInput{} - outs := []*avax.TransferableOutput{} - switch { - case importedAVAX < b.cfg.TxFee: // imported amount goes toward paying tx fee - var baseSigners [][]*secp256k1.PrivateKey - ins, outs, _, baseSigners, err = b.Spend(b.state, keys, 0, b.cfg.TxFee-importedAVAX, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - signers = append(baseSigners, signers...) - delete(importedAmounts, b.ctx.AVAXAssetID) - case importedAVAX == b.cfg.TxFee: - delete(importedAmounts, b.ctx.AVAXAssetID) - default: - importedAmounts[b.ctx.AVAXAssetID] -= b.cfg.TxFee - } - - for assetID, amount := range importedAmounts { - outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amount, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, - }) - } - - avax.SortTransferableOutputs(outs, txs.Codec) // sort imported outputs - - // Create the transaction - utx := &txs.ImportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Outs: outs, - Ins: ins, - Memo: memo, - }}, - SourceChain: from, - ImportedInputs: importedInputs, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -// TODO: should support other assets than AVAX -func (b *builder) NewExportTx( - amount uint64, - chainID ids.ID, - to ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - toBurn, err := math.Add64(amount, b.cfg.TxFee) - if err != nil { - return nil, fmt.Errorf("amount (%d) + tx fee(%d) overflows", amount, b.cfg.TxFee) - } - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, toBurn, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - // Create the transaction - utx := &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, // Non-exported outputs - Memo: memo, - }}, - DestinationChain: chainID, - ExportedOutputs: []*avax.TransferableOutput{{ // Exported to X-Chain - Asset: avax.Asset{ID: b.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amount, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, - }}, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewCreateChainTx( - subnetID ids.ID, - genesisData []byte, - vmID ids.ID, - fxIDs []ids.ID, - chainName string, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - timestamp := b.state.GetTimestamp() - createBlockchainTxFee := b.cfg.GetCreateBlockchainTxFee(timestamp) - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, createBlockchainTxFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) - if err != nil { - return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) - } - signers = append(signers, subnetSigners) - - // Sort the provided fxIDs - utils.Sort(fxIDs) - - // Create the tx - utx := &txs.CreateChainTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }}, - SubnetID: subnetID, - ChainName: chainName, - VMID: vmID, - FxIDs: fxIDs, - GenesisData: genesisData, - SubnetAuth: subnetAuth, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewCreateSubnetTx( - threshold uint32, - ownerAddrs []ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - timestamp := b.state.GetTimestamp() - createSubnetTxFee := b.cfg.GetCreateSubnetTxFee(timestamp) - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, createSubnetTxFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - // Sort control addresses - utils.Sort(ownerAddrs) - - // Create the tx - utx := &txs.CreateSubnetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }}, - Owner: &secp256k1fx.OutputOwners{ - Threshold: threshold, - Addrs: ownerAddrs, - }, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewTransformSubnetTx( - subnetID ids.ID, - assetID ids.ID, - initialSupply uint64, - maxSupply uint64, - minConsumptionRate uint64, - maxConsumptionRate uint64, - minValidatorStake uint64, - maxValidatorStake uint64, - minStakeDuration time.Duration, - maxStakeDuration time.Duration, - minDelegationFee uint32, - minDelegatorStake uint64, - maxValidatorWeightFactor byte, - uptimeRequirement uint32, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TransformSubnetTxFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) - if err != nil { - return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) - } - signers = append(signers, subnetSigners) - - utx := &txs.TransformSubnetTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }, - }, - Subnet: subnetID, - AssetID: assetID, - InitialSupply: initialSupply, - MaximumSupply: maxSupply, - MinConsumptionRate: minConsumptionRate, - MaxConsumptionRate: maxConsumptionRate, - MinValidatorStake: minValidatorStake, - MaxValidatorStake: maxValidatorStake, - MinStakeDuration: uint32(minStakeDuration / time.Second), - MaxStakeDuration: uint32(maxStakeDuration / time.Second), - MinDelegationFee: minDelegationFee, - MinDelegatorStake: minDelegatorStake, - MaxValidatorWeightFactor: maxValidatorWeightFactor, - UptimeRequirement: uptimeRequirement, - SubnetAuth: subnetAuth, - } - - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewAddValidatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - rewardAddress ids.ShortID, - shares uint32, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, unstakedOuts, stakedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkValidatorFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - // Create the tx - utx := &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: unstakedOuts, - Memo: memo, - }}, - Validator: txs.Validator{ - NodeID: nodeID, - Start: startTime, - End: endTime, - Wght: stakeAmount, - }, - StakeOuts: stakedOuts, - RewardsOwner: &secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{rewardAddress}, - }, - DelegationShares: shares, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewAddPermissionlessValidatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - pop *signer.ProofOfPossession, - rewardAddress ids.ShortID, - shares uint32, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, unstakedOuts, stakedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkValidatorFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - // Create the tx - utx := &txs.AddPermissionlessValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: unstakedOuts, - Memo: memo, - }}, - Validator: txs.Validator{ - NodeID: nodeID, - Start: startTime, - End: endTime, - Wght: stakeAmount, - }, - Subnet: constants.PrimaryNetworkID, - Signer: pop, - StakeOuts: stakedOuts, - ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{rewardAddress}, - }, - DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{rewardAddress}, - }, - DelegationShares: shares, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewAddDelegatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - rewardAddress ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, unlockedOuts, lockedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkDelegatorFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - // Create the tx - utx := &txs.AddDelegatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: unlockedOuts, - Memo: memo, - }}, - Validator: txs.Validator{ - NodeID: nodeID, - Start: startTime, - End: endTime, - Wght: stakeAmount, - }, - StakeOuts: lockedOuts, - DelegationRewardsOwner: &secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{rewardAddress}, - }, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewAddPermissionlessDelegatorTx( - stakeAmount, - startTime, - endTime uint64, - nodeID ids.NodeID, - rewardAddress ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, unlockedOuts, lockedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkDelegatorFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - // Create the tx - utx := &txs.AddPermissionlessDelegatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: unlockedOuts, - Memo: memo, - }}, - Validator: txs.Validator{ - NodeID: nodeID, - Start: startTime, - End: endTime, - Wght: stakeAmount, - }, - Subnet: constants.PrimaryNetworkID, - StakeOuts: lockedOuts, - DelegationRewardsOwner: &secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{rewardAddress}, - }, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewAddSubnetValidatorTx( - weight, - startTime, - endTime uint64, - nodeID ids.NodeID, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) - if err != nil { - return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) - } - signers = append(signers, subnetSigners) - - // Create the tx - utx := &txs.AddSubnetValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }}, - SubnetValidator: txs.SubnetValidator{ - Validator: txs.Validator{ - NodeID: nodeID, - Start: startTime, - End: endTime, - Wght: weight, - }, - Subnet: subnetID, - }, - SubnetAuth: subnetAuth, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewRemoveSubnetValidatorTx( - nodeID ids.NodeID, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) - if err != nil { - return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) - } - signers = append(signers, subnetSigners) - - // Create the tx - utx := &txs.RemoveSubnetValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }}, - Subnet: subnetID, - NodeID: nodeID, - SubnetAuth: subnetAuth, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewTransferSubnetOwnershipTx( - subnetID ids.ID, - threshold uint32, - ownerAddrs []ids.ShortID, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - subnetAuth, subnetSigners, err := b.Authorize(b.state, subnetID, keys) - if err != nil { - return nil, fmt.Errorf("couldn't authorize tx's subnet restrictions: %w", err) - } - signers = append(signers, subnetSigners) - - utx := &txs.TransferSubnetOwnershipTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }}, - Subnet: subnetID, - SubnetAuth: subnetAuth, - Owner: &secp256k1fx.OutputOwners{ - Threshold: threshold, - Addrs: ownerAddrs, - }, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - -func (b *builder) NewBaseTx( - amount uint64, - owner secp256k1fx.OutputOwners, - keys []*secp256k1.PrivateKey, - changeAddr ids.ShortID, - memo []byte, -) (*txs.Tx, error) { - toBurn, err := math.Add64(amount, b.cfg.TxFee) - if err != nil { - return nil, fmt.Errorf("amount (%d) + tx fee(%d) overflows", amount, b.cfg.TxFee) - } - ins, outs, _, signers, err := b.Spend(b.state, keys, 0, toBurn, changeAddr) - if err != nil { - return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) - } - - outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: b.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amount, - OutputOwners: owner, - }, - }) - - avax.SortTransferableOutputs(outs, txs.Codec) - - utx := &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: b.ctx.NetworkID, - BlockchainID: b.ctx.ChainID, - Ins: ins, - Outs: outs, - Memo: memo, - }, - } - tx, err := txs.NewSigned(utx, txs.Codec, signers) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} diff --git a/vms/platformvm/txs/codec.go b/vms/platformvm/txs/codec.go index 36fe2e5a8eb0..a93477af7074 100644 --- a/vms/platformvm/txs/codec.go +++ b/vms/platformvm/txs/codec.go @@ -5,7 +5,6 @@ package txs import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -28,13 +27,9 @@ var ( GenesisCodec codec.Manager ) -// TODO: Remove after v1.11.x has activated -// -// Invariant: InitCodec, Codec, and GenesisCodec must not be accessed -// concurrently -func InitCodec(durangoTime time.Time) error { - c := linearcodec.NewDefault(durangoTime) - gc := linearcodec.NewDefault(time.Time{}) +func init() { + c := linearcodec.NewDefault() + gc := linearcodec.NewDefault() errs := wrappers.Errs{} for _, c := range []linearcodec.Codec{c, gc} { @@ -50,24 +45,14 @@ func InitCodec(durangoTime time.Time) error { errs.Add(RegisterDUnsignedTxsTypes(c)) } - newCodec := codec.NewDefaultManager() - newGenesisCodec := codec.NewManager(math.MaxInt32) + Codec = codec.NewDefaultManager() + GenesisCodec = codec.NewManager(math.MaxInt32) errs.Add( - newCodec.RegisterCodec(CodecVersion, c), - newGenesisCodec.RegisterCodec(CodecVersion, gc), + Codec.RegisterCodec(CodecVersion, c), + GenesisCodec.RegisterCodec(CodecVersion, gc), ) if errs.Errored() { - return errs.Err - } - - Codec = newCodec - GenesisCodec = newGenesisCodec - return nil -} - -func init() { - if err := InitCodec(time.Time{}); err != nil { - panic(err) + panic(errs.Err) } } diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 4e106a82de1e..48f4426276b2 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "fmt" "testing" "time" @@ -19,6 +20,9 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func newAdvanceTimeTx(t testing.TB, timestamp time.Time) (*txs.Tx, error) { @@ -375,17 +379,21 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { } for _, staker := range test.subnetStakers { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 10, // Weight - uint64(staker.startTime.Unix()), - uint64(staker.endTime.Unix()), - staker.nodeID, // validator ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: staker.nodeID, + Start: uint64(staker.startTime.Unix()), + End: uint64(staker.endTime.Unix()), + Wght: 10, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -470,17 +478,22 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { subnetValidatorNodeID := genesisNodeIDs[0] subnetVdr1StartTime := defaultValidateStartTime subnetVdr1EndTime := defaultValidateStartTime.Add(defaultMinStakingDuration) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1StartTime.Unix()), // Start time - uint64(subnetVdr1EndTime.Unix()), // end time - subnetValidatorNodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetValidatorNodeID, + Start: uint64(subnetVdr1StartTime.Unix()), + End: uint64(subnetVdr1EndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err := state.NewCurrentStaker( @@ -500,17 +513,20 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // Queue a staker that joins the staker set after the above validator leaves subnetVdr2NodeID := genesisNodeIDs[1] - tx, err = env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time - uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), // end time - subnetVdr2NodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, // Keys - ids.ShortEmpty, // reward address - nil, + utx, err = builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetVdr2NodeID, + Start: uint64(subnetVdr1EndTime.Add(time.Second).Unix()), + End: uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err = state.NewPendingStaker( tx.ID(), @@ -577,17 +593,21 @@ func TestTrackedSubnet(t *testing.T) { subnetVdr1StartTime := defaultValidateStartTime.Add(1 * time.Minute) subnetVdr1EndTime := defaultValidateStartTime.Add(10 * defaultMinStakingDuration).Add(1 * time.Minute) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetVdr1StartTime.Unix()), // Start time - uint64(subnetVdr1EndTime.Unix()), // end time - subnetValidatorNodeID, // Node ID - subnetID, // Subnet ID - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: subnetValidatorNodeID, + Start: uint64(subnetVdr1StartTime.Unix()), + End: uint64(subnetVdr1EndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -680,21 +700,22 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(1 * time.Second) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - uint64(pendingDelegatorStartTime.Unix()), - uint64(pendingDelegatorEndTime.Unix()), - nodeID, - preFundedKeys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{ - preFundedKeys[0], - preFundedKeys[1], - preFundedKeys[4], + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(pendingDelegatorStartTime.Unix()), + End: uint64(pendingDelegatorEndTime.Unix()), + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, }, - ids.ShortEmpty, - nil, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( addDelegatorTx.ID(), @@ -779,17 +800,22 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { // Add delegator pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) pendingDelegatorEndTime := pendingDelegatorStartTime.Add(defaultMinStakingDuration) - addDelegatorTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - uint64(pendingDelegatorStartTime.Unix()), - uint64(pendingDelegatorEndTime.Unix()), - nodeID, - preFundedKeys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(pendingDelegatorStartTime.Unix()), + End: uint64(pendingDelegatorEndTime.Unix()), + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }, ) require.NoError(err) + addDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( addDelegatorTx.ID(), @@ -837,9 +863,9 @@ func TestAdvanceTimeTxAfterBanff(t *testing.T) { defer env.ctx.Lock.Unlock() env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time upgradeTime := env.clk.Time().Add(SyncBound) - env.config.BanffTime = upgradeTime - env.config.CortinaTime = upgradeTime - env.config.DurangoTime = upgradeTime + env.config.UpgradeConfig.BanffTime = upgradeTime + env.config.UpgradeConfig.CortinaTime = upgradeTime + env.config.UpgradeConfig.DurangoTime = upgradeTime // Proposed advancing timestamp to the banff timestamp tx, err := newAdvanceTimeTx(t, upgradeTime) @@ -892,20 +918,27 @@ func addPendingValidator( nodeID ids.NodeID, keys []*secp256k1.PrivateKey, ) (*txs.Tx, error) { - addPendingValidatorTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - ids.GenerateTestShortID(), + builder, signer := env.factory.NewWallet(keys...) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - keys, - ids.ShortEmpty, - nil, ) if err != nil { return nil, err } + addPendingValidatorTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, err + } staker, err := state.NewPendingStaker( addPendingValidatorTx.ID(), diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 8209c9756ba0..4d52432ab8fa 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -14,12 +15,15 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) // Ensure Execute fails when there are not enough control sigs @@ -29,17 +33,17 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(preFundedKeys[0], preFundedKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove a signature tx.Creds[0].(*secp256k1fx.Credential).Sigs = tx.Creds[0].(*secp256k1fx.Credential).Sigs[1:] @@ -63,17 +67,17 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Generate new, random key to sign tx with key, err := secp256k1.NewPrivateKey() @@ -104,17 +108,17 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) tx.Unsigned.(*txs.CreateChainTx).SubnetID = ids.GenerateTestID() @@ -137,17 +141,17 @@ func TestCreateChainTxValid(t *testing.T) { env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, constants.AVMID, nil, "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -192,31 +196,30 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { require := require.New(t) env := newEnvironment(t, banff) - env.config.ApricotPhase3Time = ap3Time - - ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) - require.NoError(err) + env.config.UpgradeConfig.ApricotPhase3Time = ap3Time - subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.ID(), preFundedKeys) - require.NoError(err) + addrs := set.NewSet[ids.ShortID](len(preFundedKeys)) + for _, key := range preFundedKeys { + addrs.Add(key.Address()) + } - signers = append(signers, subnetSigners) + env.state.SetTimestamp(test.time) // to duly set fee - // Create the tx + cfg := *env.config - utx := &txs.CreateChainTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: env.ctx.NetworkID, - BlockchainID: env.ctx.ChainID, - Ins: ins, - Outs: outs, - }}, - SubnetID: testSubnet1.ID(), - VMID: constants.AVMID, - SubnetAuth: subnetAuth, - } - tx := &txs.Tx{Unsigned: utx} - require.NoError(tx.Sign(txs.Codec, signers)) + cfg.StaticFeeConfig.CreateBlockchainTxFee = test.fee + factory := txstest.NewWalletFactory(env.ctx, &cfg, env.state) + builder, signer := factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateChainTx( + testSubnet1.ID(), + nil, + ids.GenerateTestID(), + nil, + "", + ) + require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index 259a5596218d..8ba9b529d565 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -4,18 +4,21 @@ package executor import ( + "context" "testing" "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestCreateSubnetTxAP3FeeChange(t *testing.T) { @@ -50,25 +53,27 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { require := require.New(t) env := newEnvironment(t, apricotPhase3) - env.config.ApricotPhase3Time = ap3Time + env.config.UpgradeConfig.ApricotPhase3Time = ap3Time env.ctx.Lock.Lock() defer env.ctx.Lock.Unlock() - ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) - require.NoError(err) + env.state.SetTimestamp(test.time) // to duly set fee - // Create the tx - utx := &txs.CreateSubnetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: env.ctx.NetworkID, - BlockchainID: env.ctx.ChainID, - Ins: ins, - Outs: outs, - }}, - Owner: &secp256k1fx.OutputOwners{}, + addrs := set.NewSet[ids.ShortID](len(preFundedKeys)) + for _, key := range preFundedKeys { + addrs.Add(key.Address()) } - tx := &txs.Tx{Unsigned: utx} - require.NoError(tx.Sign(txs.Codec, signers)) + + cfg := *env.config + cfg.StaticFeeConfig.CreateSubnetTxFee = test.fee + factory := txstest.NewWalletFactory(env.ctx, &cfg, env.state) + builder, signer := factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{}, + ) + require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/export_test.go b/vms/platformvm/txs/executor/export_test.go index 0ee1966e6088..f962c1af8814 100644 --- a/vms/platformvm/txs/executor/export_test.go +++ b/vms/platformvm/txs/executor/export_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -11,7 +12,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestNewExportTx(t *testing.T) { @@ -39,7 +44,7 @@ func TestNewExportTx(t *testing.T) { description: "P->C export", destinationChainID: env.ctx.CChainID, sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - timestamp: env.config.ApricotPhase5Time, + timestamp: env.config.UpgradeConfig.ApricotPhase5Time, }, } @@ -48,15 +53,24 @@ func TestNewExportTx(t *testing.T) { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - tx, err := env.txBuilder.NewExportTx( - defaultBalance-defaultTxFee, // Amount of tokens to export + builder, signer := env.factory.NewWallet(tt.sourceKeys...) + utx, err := builder.NewExportTx( tt.destinationChainID, - to, - tt.sourceKeys, - ids.ShortEmpty, // Change address - nil, + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: defaultBalance - defaultTxFee, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }}, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index f319b19826a2..557fb691417c 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "fmt" "math" "testing" @@ -33,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" @@ -42,9 +42,14 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) const ( @@ -56,6 +61,7 @@ const ( banff cortina durango + eUpgrade ) var ( @@ -100,10 +106,9 @@ type environment struct { fx fx.Fx state state.State states map[ids.ID]state.Chain - atomicUTXOs avax.AtomicUTXOManager uptimes uptime.Manager - utxosHandler utxo.Handler - txBuilder builder.Builder + utxosHandler utxo.Verifier + factory *txstest.WalletFactory backend Backend } @@ -139,19 +144,10 @@ func newEnvironment(t *testing.T, f fork) *environment { rewards := reward.NewCalculator(config.RewardConfig) baseState := defaultState(config, ctx, baseDB, rewards) - atomicUTXOs := avax.NewAtomicUTXOManager(ctx.SharedMemory, txs.Codec) uptimes := uptime.NewManager(baseState, clk) - utxoHandler := utxo.NewHandler(ctx, clk, fx) + utxosVerifier := utxo.NewVerifier(ctx, clk, fx) - txBuilder := builder.New( - ctx, - config, - clk, - fx, - baseState, - atomicUTXOs, - utxoHandler, - ) + factory := txstest.NewWalletFactory(ctx, config, baseState) backend := Backend{ Config: config, @@ -159,7 +155,7 @@ func newEnvironment(t *testing.T, f fork) *environment { Clk: clk, Bootstrapped: &isBootstrapped, Fx: fx, - FlowChecker: utxoHandler, + FlowChecker: utxosVerifier, Uptimes: uptimes, Rewards: rewards, } @@ -174,14 +170,13 @@ func newEnvironment(t *testing.T, f fork) *environment { fx: fx, state: baseState, states: make(map[ids.ID]state.Chain), - atomicUTXOs: atomicUTXOs, uptimes: uptimes, - utxosHandler: utxoHandler, - txBuilder: txBuilder, + utxosHandler: utxosVerifier, + factory: factory, backend: backend, } - addSubnet(t, env, txBuilder) + addSubnet(t, env) t.Cleanup(func() { env.ctx.Lock.Lock() @@ -210,29 +205,28 @@ func newEnvironment(t *testing.T, f fork) *environment { return env } -func addSubnet( - t *testing.T, - env *environment, - txBuilder builder.Builder, -) { +func addSubnet(t *testing.T, env *environment) { require := require.New(t) - // Create a subnet - var err error - testSubnet1, err = txBuilder.NewCreateSubnetTx( - 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet - []ids.ShortID{ // control keys - preFundedKeys[0].PublicKey().Address(), - preFundedKeys[1].PublicKey().Address(), - preFundedKeys[2].PublicKey().Address(), + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + preFundedKeys[0].PublicKey().Address(), + preFundedKeys[1].PublicKey().Address(), + preFundedKeys[2].PublicKey().Address(), + }, }, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - preFundedKeys[0].PublicKey().Address(), - nil, + common.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, + }), ) require.NoError(err) + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) - // store it stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -280,57 +274,59 @@ func defaultState( } func defaultConfig(t *testing.T, f fork) *config.Config { - var ( - apricotPhase3Time = mockable.MaxTime - apricotPhase5Time = mockable.MaxTime - banffTime = mockable.MaxTime - cortinaTime = mockable.MaxTime - durangoTime = mockable.MaxTime - ) + c := &config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + Validators: validators.NewManager(), + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: 5 * units.MilliAvax, + MaxValidatorStake: 500 * units.MilliAvax, + MinDelegatorStake: 1 * units.MilliAvax, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: reward.Config{ + MaxConsumptionRate: .12 * reward.PercentDenominator, + MinConsumptionRate: .10 * reward.PercentDenominator, + MintingPeriod: 365 * 24 * time.Hour, + SupplyCap: 720 * units.MegaAvax, + }, + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: mockable.MaxTime, + ApricotPhase5Time: mockable.MaxTime, + BanffTime: mockable.MaxTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, + EUpgradeTime: mockable.MaxTime, + }, + } switch f { + case eUpgrade: + c.UpgradeConfig.EUpgradeTime = defaultValidateStartTime.Add(-2 * time.Second) + fallthrough case durango: - durangoTime = defaultValidateStartTime.Add(-2 * time.Second) + c.UpgradeConfig.DurangoTime = defaultValidateStartTime.Add(-2 * time.Second) fallthrough case cortina: - cortinaTime = defaultValidateStartTime.Add(-2 * time.Second) + c.UpgradeConfig.CortinaTime = defaultValidateStartTime.Add(-2 * time.Second) fallthrough case banff: - banffTime = defaultValidateStartTime.Add(-2 * time.Second) + c.UpgradeConfig.BanffTime = defaultValidateStartTime.Add(-2 * time.Second) fallthrough case apricotPhase5: - apricotPhase5Time = defaultValidateEndTime + c.UpgradeConfig.ApricotPhase5Time = defaultValidateEndTime fallthrough case apricotPhase3: - apricotPhase3Time = defaultValidateEndTime + c.UpgradeConfig.ApricotPhase3Time = defaultValidateEndTime default: - require.NoError(t, fmt.Errorf("unhandled fork %d", f)) + require.FailNow(t, "unhandled fork", f) } - return &config.Config{ - Chains: chains.TestManager, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: 5 * units.MilliAvax, - MaxValidatorStake: 500 * units.MilliAvax, - MinDelegatorStake: 1 * units.MilliAvax, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: reward.Config{ - MaxConsumptionRate: .12 * reward.PercentDenominator, - MinConsumptionRate: .10 * reward.PercentDenominator, - MintingPeriod: 365 * 24 * time.Hour, - SupplyCap: 720 * units.MegaAvax, - }, - ApricotPhase3Time: apricotPhase3Time, - ApricotPhase5Time: apricotPhase5Time, - BanffTime: banffTime, - CortinaTime: cortinaTime, - DurangoTime: durangoTime, - } + return c } func defaultClock(f fork) *mockable.Clock { @@ -364,7 +360,7 @@ func (fvi *fxVMInt) Logger() logging.Logger { func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ - registry: linearcodec.NewDefault(time.Time{}), + registry: linearcodec.NewDefault(), clk: clk, log: log, } diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index bc52fabc2472..7e8db3da48f0 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "math/rand" "testing" "time" @@ -17,8 +18,10 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) var fundedSharedMemoryCalls byte @@ -39,7 +42,8 @@ func TestNewImportTx(t *testing.T) { require.NoError(t, err) customAssetID := ids.GenerateTestID() - + // generate a constant random source generator. + randSrc := rand.NewSource(0) tests := []test{ { description: "can't pay fee", @@ -50,11 +54,12 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee - 1, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee - 1, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - expectedErr: utxo.ErrInsufficientFunds, + expectedErr: builder.ErrInsufficientFunds, }, { description: "can barely pay fee", @@ -65,8 +70,9 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, expectedErr: nil, @@ -80,11 +86,12 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.CChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - timestamp: env.config.ApricotPhase5Time, + timestamp: env.config.UpgradeConfig.ApricotPhase5Time, expectedErr: nil, }, { @@ -96,33 +103,37 @@ func TestNewImportTx(t *testing.T) { sourceKey, env.ctx.XChainID, map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, + env.ctx.AVAXAssetID: env.config.StaticFeeConfig.TxFee, customAssetID: 1, }, + randSrc, ), sourceKeys: []*secp256k1.PrivateKey{sourceKey}, - timestamp: env.config.ApricotPhase5Time, + timestamp: env.config.UpgradeConfig.ApricotPhase5Time, expectedErr: nil, }, } - to := ids.GenerateTestShortID() + to := &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) env.msm.SharedMemory = tt.sharedMemory - tx, err := env.txBuilder.NewImportTx( + + builder, signer := env.factory.NewWallet(tt.sourceKeys...) + utx, err := builder.NewImportTx( tt.sourceChainID, to, - tt.sourceKeys, - ids.ShortEmpty, - nil, ) require.ErrorIs(err, tt.expectedErr) if tt.expectedErr != nil { return } + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) require.NoError(err) unsignedTx := tx.Unsigned.(*txs.ImportTx) @@ -142,7 +153,7 @@ func TestNewImportTx(t *testing.T) { totalOut += out.Out.Amount() } - require.Equal(env.config.TxFee, totalIn-totalOut) + require.Equal(env.config.StaticFeeConfig.TxFee, totalIn-totalOut) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -167,6 +178,7 @@ func fundedSharedMemory( sourceKey *secp256k1.PrivateKey, peerChain ids.ID, assets map[ids.ID]uint64, + randSrc rand.Source, ) atomic.SharedMemory { fundedSharedMemoryCalls++ m := atomic.NewMemory(prefixdb.New([]byte{fundedSharedMemoryCalls}, env.baseDB)) @@ -175,11 +187,10 @@ func fundedSharedMemory( peerSharedMemory := m.NewSharedMemory(peerChain) for assetID, amt := range assets { - // #nosec G404 utxo := &avax.UTXO{ UTXOID: avax.UTXOID{ TxID: ids.GenerateTestID(), - OutputIndex: rand.Uint32(), + OutputIndex: uint32(randSrc.Int63()), }, Asset: avax.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index 0f082cb8b296..c54b8207fb06 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -103,12 +103,12 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { // activation. Following the activation, AddValidatorTxs must be issued into // StandardBlocks. currentTimestamp := e.OnCommitState.GetTimestamp() - if e.Config.IsBanffActivated(currentTimestamp) { + if e.Config.UpgradeConfig.IsBanffActivated(currentTimestamp) { return fmt.Errorf( "%w: timestamp (%s) >= Banff fork time (%s)", ErrProposedAddStakerTxAfterBanff, currentTimestamp, - e.Config.BanffTime, + e.Config.UpgradeConfig.BanffTime, ) } @@ -150,12 +150,12 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) // activation. Following the activation, AddSubnetValidatorTxs must be // issued into StandardBlocks. currentTimestamp := e.OnCommitState.GetTimestamp() - if e.Config.IsBanffActivated(currentTimestamp) { + if e.Config.UpgradeConfig.IsBanffActivated(currentTimestamp) { return fmt.Errorf( "%w: timestamp (%s) >= Banff fork time (%s)", ErrProposedAddStakerTxAfterBanff, currentTimestamp, - e.Config.BanffTime, + e.Config.UpgradeConfig.BanffTime, ) } @@ -196,12 +196,12 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { // activation. Following the activation, AddDelegatorTxs must be issued into // StandardBlocks. currentTimestamp := e.OnCommitState.GetTimestamp() - if e.Config.IsBanffActivated(currentTimestamp) { + if e.Config.UpgradeConfig.IsBanffActivated(currentTimestamp) { return fmt.Errorf( "%w: timestamp (%s) >= Banff fork time (%s)", ErrProposedAddStakerTxAfterBanff, currentTimestamp, - e.Config.BanffTime, + e.Config.UpgradeConfig.BanffTime, ) } @@ -248,12 +248,12 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { // Validate [newChainTime] newChainTime := tx.Timestamp() - if e.Config.IsBanffActivated(newChainTime) { + if e.Config.UpgradeConfig.IsBanffActivated(newChainTime) { return fmt.Errorf( "%w: proposed timestamp (%s) >= Banff fork time (%s)", ErrAdvanceTimeTxIssuedAfterBanff, newChainTime, - e.Config.BanffTime, + e.Config.UpgradeConfig.BanffTime, ) } @@ -269,7 +269,7 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { // Only allow timestamp to move forward as far as the time of next staker // set change time - nextStakerChangeTime, err := GetNextStakerChangeTime(e.OnCommitState) + nextStakerChangeTime, err := state.GetNextStakerChangeTime(e.OnCommitState) if err != nil { return err } @@ -558,7 +558,7 @@ func (e *ProposalTxExecutor) rewardDelegatorTx(uDelegatorTx txs.DelegatorTx, del } // Reward the delegatee here - if e.Config.IsCortinaActivated(validator.StartTime) { + if e.Config.UpgradeConfig.IsCortinaActivated(validator.StartTime) { previousDelegateeReward, err := e.OnCommitState.GetDelegateeReward( validator.SubnetID, validator.NodeID, diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index a6ecc21b4059..cb547d4d589d 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -4,7 +4,7 @@ package executor import ( - "fmt" + "context" "math" "testing" "time" @@ -20,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func TestProposalTxExecuteAddDelegator(t *testing.T) { @@ -33,19 +35,26 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { // [addMinStakeValidator] adds a new validator to the primary network's // pending validator set with the minimum staking amount - addMinStakeValidator := func(target *environment) { - tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MinValidatorStake, // stake amount - newValidatorStartTime, // start time - newValidatorEndTime, // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + addMinStakeValidator := func(env *environment) { + require := require.New(t) + + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: newValidatorID, + Start: newValidatorStartTime, + End: newValidatorEndTime, + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + reward.PercentDenominator, ) - require.NoError(t, err) + require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -54,29 +63,36 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { addValTx.StartTime(), 0, ) - require.NoError(t, err) + require.NoError(err) - target.state.PutCurrentValidator(staker) - target.state.AddTx(tx, status.Committed) - target.state.SetHeight(dummyHeight) - require.NoError(t, target.state.Commit()) + env.state.PutCurrentValidator(staker) + env.state.AddTx(tx, status.Committed) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) } // [addMaxStakeValidator] adds a new validator to the primary network's // pending validator set with the maximum staking amount - addMaxStakeValidator := func(target *environment) { - tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MaxValidatorStake, // stake amount - newValidatorStartTime, // start time - newValidatorEndTime, // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shared - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + addMaxStakeValidator := func(env *environment) { + require := require.New(t) + + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: newValidatorID, + Start: newValidatorStartTime, + End: newValidatorEndTime, + Wght: env.config.MaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + reward.PercentDenominator, ) - require.NoError(t, err) + require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -85,197 +101,180 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { addValTx.StartTime(), 0, ) - require.NoError(t, err) + require.NoError(err) - target.state.PutCurrentValidator(staker) - target.state.AddTx(tx, status.Committed) - target.state.SetHeight(dummyHeight) - require.NoError(t, target.state.Commit()) + env.state.PutCurrentValidator(staker) + env.state.AddTx(tx, status.Committed) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) } - dummyH := newEnvironment(t, apricotPhase5) - currentTimestamp := dummyH.state.GetTimestamp() + env := newEnvironment(t, apricotPhase5) + currentTimestamp := env.state.GetTimestamp() type test struct { - description string - stakeAmount uint64 - startTime uint64 - endTime uint64 - nodeID ids.NodeID - rewardAddress ids.ShortID - feeKeys []*secp256k1.PrivateKey - setup func(*environment) - AP3Time time.Time - expectedErr error + description string + stakeAmount uint64 + startTime uint64 + endTime uint64 + nodeID ids.NodeID + feeKeys []*secp256k1.PrivateKey + setup func(*environment) + AP3Time time.Time + expectedErr error } tests := []test{ { - description: "validator stops validating earlier than delegator", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Unix()) + 1, - endTime: uint64(defaultValidateEndTime.Unix()) + 1, - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - expectedErr: ErrPeriodMismatch, - }, - { - description: fmt.Sprintf("delegator should not be added more than (%s) in the future", MaxFutureStartTime), - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(currentTimestamp.Add(MaxFutureStartTime + time.Second).Unix()), - endTime: uint64(currentTimestamp.Add(MaxFutureStartTime + defaultMinStakingDuration + time.Second).Unix()), - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - expectedErr: ErrFutureStakeTime, + description: "validator stops validating earlier than delegator", + stakeAmount: env.config.MinDelegatorStake, + startTime: uint64(defaultValidateStartTime.Unix()) + 1, + endTime: uint64(defaultValidateEndTime.Unix()) + 1, + nodeID: nodeID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: nil, + AP3Time: defaultGenesisTime, + expectedErr: ErrPeriodMismatch, }, { - description: "validator not in the current or pending validator sets", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()), - endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - expectedErr: database.ErrNotFound, + description: "validator not in the current or pending validator sets", + stakeAmount: env.config.MinDelegatorStake, + startTime: uint64(defaultValidateStartTime.Add(5 * time.Second).Unix()), + endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), + nodeID: newValidatorID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: nil, + AP3Time: defaultGenesisTime, + expectedErr: database.ErrNotFound, }, { - description: "delegator starts before validator", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime - 1, // start validating subnet before primary network - endTime: newValidatorEndTime, - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMinStakeValidator, - AP3Time: defaultGenesisTime, - expectedErr: ErrPeriodMismatch, + description: "delegator starts before validator", + stakeAmount: env.config.MinDelegatorStake, + startTime: newValidatorStartTime - 1, // start validating subnet before primary network + endTime: newValidatorEndTime, + nodeID: newValidatorID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMinStakeValidator, + AP3Time: defaultGenesisTime, + expectedErr: ErrPeriodMismatch, }, { - description: "delegator stops before validator", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, - endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMinStakeValidator, - AP3Time: defaultGenesisTime, - expectedErr: ErrPeriodMismatch, + description: "delegator stops before validator", + stakeAmount: env.config.MinDelegatorStake, + startTime: newValidatorStartTime, + endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network + nodeID: newValidatorID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMinStakeValidator, + AP3Time: defaultGenesisTime, + expectedErr: ErrPeriodMismatch, }, { - description: "valid", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, // same start time as for primary network - endTime: newValidatorEndTime, // same end time as for primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMinStakeValidator, - AP3Time: defaultGenesisTime, - expectedErr: nil, + description: "valid", + stakeAmount: env.config.MinDelegatorStake, + startTime: newValidatorStartTime, // same start time as for primary network + endTime: newValidatorEndTime, // same end time as for primary network + nodeID: newValidatorID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMinStakeValidator, + AP3Time: defaultGenesisTime, + expectedErr: nil, }, { - description: "starts delegating at current timestamp", - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(currentTimestamp.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer - setup: nil, - AP3Time: defaultGenesisTime, - expectedErr: ErrTimestampNotBeforeStartTime, + description: "starts delegating at current timestamp", + stakeAmount: env.config.MinDelegatorStake, + startTime: uint64(currentTimestamp.Unix()), + endTime: uint64(defaultValidateEndTime.Unix()), + nodeID: nodeID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: nil, + AP3Time: defaultGenesisTime, + expectedErr: ErrTimestampNotBeforeStartTime, }, { - description: "tx fee paying key has no funds", - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(defaultValidateStartTime.Unix()) + 1, // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer - setup: func(target *environment) { // Remove all UTXOs owned by keys[1] - utxoIDs, err := target.state.UTXOIDs( + description: "tx fee paying key has no funds", + stakeAmount: env.config.MinDelegatorStake, + startTime: uint64(defaultValidateStartTime.Unix()) + 1, + endTime: uint64(defaultValidateEndTime.Unix()), + nodeID: nodeID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, + setup: func(env *environment) { // Remove all UTXOs owned by keys[1] + utxoIDs, err := env.state.UTXOIDs( preFundedKeys[1].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) require.NoError(t, err) for _, utxoID := range utxoIDs { - target.state.DeleteUTXO(utxoID) + env.state.DeleteUTXO(utxoID) } - target.state.SetHeight(dummyHeight) - require.NoError(t, target.state.Commit()) + env.state.SetHeight(dummyHeight) + require.NoError(t, env.state.Commit()) }, AP3Time: defaultGenesisTime, expectedErr: ErrFlowCheckFailed, }, { - description: "over delegation before AP3", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, // same start time as for primary network - endTime: newValidatorEndTime, // same end time as for primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMaxStakeValidator, - AP3Time: defaultValidateEndTime, - expectedErr: nil, + description: "over delegation before AP3", + stakeAmount: env.config.MinDelegatorStake, + startTime: newValidatorStartTime, // same start time as for primary network + endTime: newValidatorEndTime, // same end time as for primary network + nodeID: newValidatorID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMaxStakeValidator, + AP3Time: defaultValidateEndTime, + expectedErr: nil, }, { - description: "over delegation after AP3", - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: newValidatorStartTime, // same start time as for primary network - endTime: newValidatorEndTime, // same end time as for primary network - nodeID: newValidatorID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: addMaxStakeValidator, - AP3Time: defaultGenesisTime, - expectedErr: ErrOverDelegated, + description: "over delegation after AP3", + stakeAmount: env.config.MinDelegatorStake, + startTime: newValidatorStartTime, // same start time as for primary network + endTime: newValidatorEndTime, // same end time as for primary network + nodeID: newValidatorID, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, + setup: addMaxStakeValidator, + AP3Time: defaultGenesisTime, + expectedErr: ErrOverDelegated, }, } for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(t, apricotPhase5) - freshTH.config.ApricotPhase3Time = tt.AP3Time - - tx, err := freshTH.txBuilder.NewAddDelegatorTx( - tt.stakeAmount, - tt.startTime, - tt.endTime, - tt.nodeID, - tt.rewardAddress, - tt.feeKeys, - ids.ShortEmpty, - nil, + env := newEnvironment(t, apricotPhase5) + env.config.UpgradeConfig.ApricotPhase3Time = tt.AP3Time + + builder, signer := env.factory.NewWallet(tt.feeKeys...) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: tt.nodeID, + Start: tt.startTime, + End: tt.endTime, + Wght: tt.stakeAmount, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) if tt.setup != nil { - tt.setup(freshTH) + tt.setup(env) } - onCommitState, err := state.NewDiff(lastAcceptedID, freshTH) + onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - onAbortState, err := state.NewDiff(lastAcceptedID, freshTH) + onAbortState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, OnAbortState: onAbortState, - Backend: &freshTH.backend, + Backend: &env.backend, Tx: tx, } err = tx.Unsigned.Visit(&executor) @@ -295,17 +294,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // Case: Proposed validator currently validating primary network // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(defaultValidateStartTime.Unix())+1, - uint64(defaultValidateEndTime.Unix())+1, - nodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateEndTime.Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -328,17 +331,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // and proposed subnet validation period is subset of // primary network validation period // (note that keys[0] is a genesis validator) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(defaultValidateStartTime.Unix())+1, - uint64(defaultValidateEndTime.Unix()), - nodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -361,32 +368,41 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { dsStartTime := defaultValidateStartTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) - addDSTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stake amount - uint64(dsStartTime.Unix()), // start time - uint64(dsEndTime.Unix()), // end time - pendingDSValidatorID, // node ID - ids.GenerateTestShortID(), // reward address - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), + End: uint64(dsEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + reward.PercentDenominator, ) require.NoError(err) + addDSTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) { // Case: Proposed validator isn't in pending or current validator sets - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix()), // start validating subnet before primary network - uint64(dsEndTime.Unix()), - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), // start validating subnet before primary network + End: uint64(dsEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -424,17 +440,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but starts validating subnet before primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix())-1, // start validating subnet before primary network - uint64(dsEndTime.Unix()), - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()) - 1, // start validating subnet before primary network + End: uint64(dsEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -455,17 +475,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but stops validating subnet after primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix()), - uint64(dsEndTime.Unix())+1, // stop validating subnet after stopping validating primary network - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), + End: uint64(dsEndTime.Unix()) + 1, // stop validating subnet after stopping validating primary network + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -486,17 +510,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network and // period validating subnet is subset of time validating primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix()), // same start time as for primary network - uint64(dsEndTime.Unix()), // same end time as for primary network - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), // same start time as for primary network + End: uint64(dsEndTime.Unix()), // same end time as for primary network + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -519,17 +547,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.SetTimestamp(newTimestamp) { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(newTimestamp.Unix()), // start time - uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(newTimestamp.Unix()), + End: uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -552,17 +584,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // Case: Proposed validator already validating the subnet // First, add validator as validator of subnet - subnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer = env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + uSubnetTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()), + End: uint64(defaultValidateEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), signer, uSubnetTx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -580,17 +616,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID - duplicateSubnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix())+1, // start time - uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + duplicateSubnetTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -614,17 +654,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Too few signatures - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix())+1, // start time - uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -651,22 +695,26 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Control Signature from invalid key (keys[3] is not a control key) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix())+1, // start time - uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) require.NoError(err) - copy(tx.Creds[1].(*secp256k1fx.Credential).Sigs[0][:], sig) + copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -687,17 +735,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix())+1, // start time - uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix())+1, // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -741,48 +793,22 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator's start time too early - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(chainTime.Unix()), - uint64(defaultValidateEndTime.Unix()), - nodeID, - ids.ShortEmpty, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(chainTime.Unix()), + End: uint64(defaultValidateEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) - - onCommitState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - onAbortState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - executor := ProposalTxExecutor{ - OnCommitState: onCommitState, - OnAbortState: onAbortState, - Backend: &env.backend, - Tx: tx, - } - err = tx.Unsigned.Visit(&executor) - require.ErrorIs(err, ErrTimestampNotBeforeStartTime) - } - - { - // Case: Validator's start time too far in the future - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Add(MaxFutureStartTime).Unix()+1), - uint64(defaultValidateStartTime.Add(MaxFutureStartTime).Add(defaultMinStakingDuration).Unix()+1), - nodeID, - ids.ShortEmpty, - reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, - ) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -798,25 +824,30 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.ErrorIs(err, ErrFutureStakeTime) + require.ErrorIs(err, ErrTimestampNotBeforeStartTime) } { nodeID := genesisNodeIDs[0] // Case: Validator already validating primary network - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix())+1, - uint64(defaultValidateEndTime.Unix()), - nodeID, - ids.ShortEmpty, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -837,18 +868,23 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator in pending validator set of primary network startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stake amount - uint64(startTime.Unix()), // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, - ids.ShortEmpty, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -883,18 +919,23 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { { // Case: Validator doesn't have enough tokens to cover stake amount - tx, err := env.txBuilder.NewAddValidatorTx( // create the tx - env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix())+1, - uint64(defaultValidateEndTime.Unix()), - ids.GenerateTestNodeID(), - ids.ShortEmpty, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(defaultValidateStartTime.Unix()) + 1, + End: uint64(defaultValidateEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove all UTXOs owned by preFundedKeys[0] utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) diff --git a/vms/platformvm/txs/executor/reward_validator_test.go b/vms/platformvm/txs/executor/reward_validator_test.go index cbd7f7bdf4eb..88b7b0b8a368 100644 --- a/vms/platformvm/txs/executor/reward_validator_test.go +++ b/vms/platformvm/txs/executor/reward_validator_test.go @@ -4,6 +4,7 @@ package executor import ( + "context" "testing" "time" @@ -13,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -22,6 +22,8 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) func newRewardValidatorTx(t testing.TB, txID ids.ID) (*txs.Tx, error) { @@ -239,33 +241,42 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stakeAmt - vdrStartTime, - vdrEndTime, - vdrNodeID, // node ID - vdrRewardAddress, // reward address + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: vdrStartTime, + End: vdrEndTime, + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{vdrRewardAddress}, + }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - delStartTime, - delEndTime, - vdrNodeID, - delRewardAddress, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // Change address - nil, + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: delStartTime, + End: delEndTime, + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{delRewardAddress}, + }, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( @@ -362,33 +373,42 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - vdrStartTime, - vdrEndTime, - vdrNodeID, - vdrRewardAddress, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: vdrStartTime, + End: vdrEndTime, + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{vdrRewardAddress}, + }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, /*=changeAddr*/ - nil, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - delStartTime, - delEndTime, - vdrNodeID, - delRewardAddress, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, /*=changeAddr*/ - nil, + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: delStartTime, + End: delEndTime, + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{delRewardAddress}, + }, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) @@ -580,33 +600,42 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stakeAmt - vdrStartTime, - vdrEndTime, - vdrNodeID, // node ID - vdrRewardAddress, // reward address + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: vdrStartTime, + End: vdrEndTime, + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{vdrRewardAddress}, + }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - delStartTime, - delEndTime, - vdrNodeID, - delRewardAddress, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // Change address - nil, + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: delStartTime, + End: delEndTime, + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{delRewardAddress}, + }, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrRewardAmt := uint64(2000000) @@ -744,32 +773,42 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) vdrNodeID := ids.GenerateTestNodeID() - vdrTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stakeAmt - vdrStartTime, - vdrEndTime, - vdrNodeID, // node ID - vdrRewardAddress, // reward address + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + uVdrTx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: vdrStartTime, + End: vdrEndTime, + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{vdrRewardAddress}, + }, reward.PercentDenominator/4, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + vdrTx, err := walletsigner.SignUnsigned(context.Background(), signer, uVdrTx) + require.NoError(err) delStartTime := vdrStartTime delEndTime := vdrEndTime - delTx, err := env.txBuilder.NewAddDelegatorTx( - env.config.MinDelegatorStake, - delStartTime, - delEndTime, - vdrNodeID, - delRewardAddress, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: vdrNodeID, + Start: delStartTime, + End: delEndTime, + Wght: env.config.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{delRewardAddress}, + }, ) require.NoError(err) + delTx, err := walletsigner.SignUnsigned(context.Background(), signer, uDelTx) + require.NoError(err) addValTx := vdrTx.Unsigned.(*txs.AddValidatorTx) vdrStaker, err := state.NewCurrentStaker( diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 4dd72f6bf42c..0aac4ad50f64 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" safemath "github.com/ava-labs/avalanchego/utils/math" ) @@ -26,7 +27,6 @@ var ( ErrStakeTooShort = errors.New("staking period is too short") ErrStakeTooLong = errors.New("staking period is too long") ErrFlowCheckFailed = errors.New("flow check failed") - ErrFutureStakeTime = fmt.Errorf("staker is attempting to start staking more than %s ahead of the current chain time", MaxFutureStartTime) ErrNotValidator = errors.New("isn't a current or pending validator") ErrRemovePermissionlessValidator = errors.New("attempting to remove permissionless validator") ErrStakeOverflow = errors.New("validator stake exceeds limit") @@ -98,7 +98,7 @@ func verifyAddValidatorTx( error, ) { currentTimestamp := chainState.GetTimestamp() - if backend.Config.IsDurangoActivated(currentTimestamp) { + if backend.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) { return nil, ErrAddValidatorTxPostDurango } @@ -164,6 +164,9 @@ func verifyAddValidatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -171,15 +174,13 @@ func verifyAddValidatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkValidatorFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // verifyStakerStartsSoon is checked last to allow - // the verifier visitor to explicitly check for this error. - return outs, verifyStakerStartsSoon(false /*=isDurangoActive*/, currentTimestamp, startTime) + return outs, nil } // verifyAddSubnetValidatorTx carries out the validation for an @@ -197,7 +198,7 @@ func verifyAddSubnetValidatorTx( var ( currentTimestamp = chainState.GetTimestamp() - isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = backend.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -254,6 +255,9 @@ func verifyAddSubnetValidatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -261,15 +265,13 @@ func verifyAddSubnetValidatorTx( tx.Outs, baseTxCreds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.AddSubnetValidatorFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // verifyStakerStartsSoon is checked last to allow - // the verifier visitor to explicitly check for this error. - return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) + return nil } // Returns the representation of [tx.NodeID] validating [tx.Subnet]. @@ -293,7 +295,7 @@ func verifyRemoveSubnetValidatorTx( var ( currentTimestamp = chainState.GetTimestamp() - isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = backend.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return nil, false, err @@ -331,6 +333,9 @@ func verifyRemoveSubnetValidatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -338,7 +343,7 @@ func verifyRemoveSubnetValidatorTx( tx.Outs, baseTxCreds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.TxFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return nil, false, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -360,7 +365,7 @@ func verifyAddDelegatorTx( error, ) { currentTimestamp := chainState.GetTimestamp() - if backend.Config.IsDurangoActivated(currentTimestamp) { + if backend.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) { return nil, ErrAddDelegatorTxPostDurango } @@ -418,7 +423,7 @@ func verifyAddDelegatorTx( return nil, ErrStakeOverflow } - if backend.Config.IsApricotPhase3Activated(currentTimestamp) { + if backend.Config.UpgradeConfig.IsApricotPhase3Activated(currentTimestamp) { maximumWeight = min(maximumWeight, backend.Config.MaxValidatorStake) } @@ -446,6 +451,9 @@ func verifyAddDelegatorTx( } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -453,15 +461,13 @@ func verifyAddDelegatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkDelegatorFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // verifyStakerStartsSoon is checked last to allow - // the verifier visitor to explicitly check for this error. - return outs, verifyStakerStartsSoon(false /*=isDurangoActive*/, currentTimestamp, startTime) + return outs, nil } // verifyAddPermissionlessValidatorTx carries out the validation for an @@ -479,7 +485,7 @@ func verifyAddPermissionlessValidatorTx( var ( currentTimestamp = chainState.GetTimestamp() - isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = backend.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -554,15 +560,10 @@ func verifyAddPermissionlessValidatorTx( ) } - var txFee uint64 if tx.Subnet != constants.PrimaryNetworkID { if err := verifySubnetValidatorPrimaryNetworkRequirements(isDurangoActive, chainState, tx.Validator); err != nil { return err } - - txFee = backend.Config.AddSubnetValidatorFee - } else { - txFee = backend.Config.AddPrimaryNetworkValidatorFee } outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.StakeOuts)) @@ -570,6 +571,9 @@ func verifyAddPermissionlessValidatorTx( copy(outs[len(tx.Outs):], tx.StakeOuts) // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -577,15 +581,13 @@ func verifyAddPermissionlessValidatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: txFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // verifyStakerStartsSoon is checked last to allow - // the verifier visitor to explicitly check for this error. - return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) + return nil } // verifyAddPermissionlessDelegatorTx carries out the validation for an @@ -603,7 +605,7 @@ func verifyAddPermissionlessDelegatorTx( var ( currentTimestamp = chainState.GetTimestamp() - isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = backend.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -701,7 +703,6 @@ func verifyAddPermissionlessDelegatorTx( copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.StakeOuts) - var txFee uint64 if tx.Subnet != constants.PrimaryNetworkID { // Invariant: Delegators must only be able to reference validator // transactions that implement [txs.ValidatorTx]. All @@ -712,13 +713,12 @@ func verifyAddPermissionlessDelegatorTx( if validator.Priority.IsPermissionedValidator() { return ErrDelegateToPermissionedValidator } - - txFee = backend.Config.AddSubnetDelegatorFee - } else { - txFee = backend.Config.AddPrimaryNetworkDelegatorFee } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -726,15 +726,13 @@ func verifyAddPermissionlessDelegatorTx( outs, sTx.Creds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: txFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // verifyStakerStartsSoon is checked last to allow - // the verifier visitor to explicitly check for this error. - return verifyStakerStartsSoon(isDurangoActive, currentTimestamp, startTime) + return nil } // Returns an error if the given tx is invalid. @@ -748,7 +746,7 @@ func verifyTransferSubnetOwnershipTx( sTx *txs.Tx, tx *txs.TransferSubnetOwnershipTx, ) error { - if !backend.Config.IsDurangoActivated(chainState.GetTimestamp()) { + if !backend.Config.UpgradeConfig.IsDurangoActivated(chainState.GetTimestamp()) { return ErrDurangoUpgradeNotActive } @@ -772,6 +770,10 @@ func verifyTransferSubnetOwnershipTx( } // Verify the flowcheck + currentTimestamp := chainState.GetTimestamp() + feeCalculator := fee.NewStaticCalculator(backend.Config.StaticFeeConfig, backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := backend.FlowChecker.VerifySpend( tx, chainState, @@ -779,7 +781,7 @@ func verifyTransferSubnetOwnershipTx( tx.Outs, baseTxCreds, map[ids.ID]uint64{ - backend.Ctx.AVAXAssetID: backend.Config.TxFee, + backend.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) @@ -806,17 +808,3 @@ func verifyStakerStartTime(isDurangoActive bool, chainTime, stakerTime time.Time } return nil } - -func verifyStakerStartsSoon(isDurangoActive bool, chainTime, stakerStartTime time.Time) error { - if isDurangoActive { - return nil - } - - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := chainTime.Add(MaxFutureStartTime) - if stakerStartTime.After(maxStartTime) { - return ErrFutureStakeTime - } - return nil -} diff --git a/vms/platformvm/txs/executor/staker_tx_verification_helpers.go b/vms/platformvm/txs/executor/staker_tx_verification_helpers.go index 3a74cea28696..eb18c6609299 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_helpers.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_helpers.go @@ -94,40 +94,6 @@ func getDelegatorRules( }, nil } -// GetNextStakerChangeTime returns the next time a staker will be either added -// or removed to/from the current validator set. -func GetNextStakerChangeTime(state state.Chain) (time.Time, error) { - currentStakerIterator, err := state.GetCurrentStakerIterator() - if err != nil { - return time.Time{}, err - } - defer currentStakerIterator.Release() - - pendingStakerIterator, err := state.GetPendingStakerIterator() - if err != nil { - return time.Time{}, err - } - defer pendingStakerIterator.Release() - - hasCurrentStaker := currentStakerIterator.Next() - hasPendingStaker := pendingStakerIterator.Next() - switch { - case hasCurrentStaker && hasPendingStaker: - nextCurrentTime := currentStakerIterator.Value().NextTime - nextPendingTime := pendingStakerIterator.Value().NextTime - if nextCurrentTime.Before(nextPendingTime) { - return nextCurrentTime, nil - } - return nextPendingTime, nil - case hasCurrentStaker: - return currentStakerIterator.Value().NextTime, nil - case hasPendingStaker: - return pendingStakerIterator.Value().NextTime, nil - default: - return time.Time{}, database.ErrNotFound - } -} - // GetValidator returns information about the given validator, which may be a // current validator or pending validator. func GetValidator(state state.Chain, subnetID ids.ID, nodeID ids.NodeID) (*state.Staker, error) { diff --git a/vms/platformvm/txs/executor/staker_tx_verification_test.go b/vms/platformvm/txs/executor/staker_tx_verification_test.go index bc67a3b78189..bde3da64ad7a 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -109,10 +109,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { name: "fail syntactic verification", backendF: func(*gomock.Controller) *Backend { return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), } }, stateF: func(*gomock.Controller) state.Chain { @@ -130,10 +128,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { name: "not bootstrapped", backendF: func(*gomock.Controller) *Backend { return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: &utils.Atomic[bool]{}, } }, @@ -156,11 +152,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - CortinaTime: activeForkTime, - DurangoTime: mockable.MaxTime, - }, + Ctx: ctx, + Config: defaultTestConfig(t, cortina, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -183,10 +176,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -212,10 +203,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -241,10 +230,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -271,10 +258,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -304,10 +289,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -337,10 +320,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -372,10 +353,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -401,10 +380,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { bootstrapped := &utils.Atomic[bool]{} bootstrapped.Set(true) return &Backend{ - Ctx: ctx, - Config: &config.Config{ - DurangoTime: activeForkTime, // activate latest fork - }, + Ctx: ctx, + Config: defaultTestConfig(t, durango, activeForkTime), Bootstrapped: bootstrapped, } }, @@ -445,12 +422,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { gomock.Any(), ).Return(ErrFlowCheckFailed) + cfg := defaultTestConfig(t, durango, activeForkTime) + cfg.StaticFeeConfig.AddSubnetValidatorFee = 1 + return &Backend{ - FlowChecker: flowChecker, - Config: &config.Config{ - AddSubnetValidatorFee: 1, - DurangoTime: activeForkTime, // activate latest fork, - }, + FlowChecker: flowChecker, + Config: cfg, Ctx: ctx, Bootstrapped: bootstrapped, } @@ -475,58 +452,6 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { }, expectedErr: ErrFlowCheckFailed, }, - { - name: "starts too far in the future", - backendF: func(ctrl *gomock.Controller) *Backend { - bootstrapped := &utils.Atomic[bool]{} - bootstrapped.Set(true) - - flowChecker := utxo.NewMockVerifier(ctrl) - flowChecker.EXPECT().VerifySpend( - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - gomock.Any(), - ).Return(nil) - - return &Backend{ - FlowChecker: flowChecker, - Config: &config.Config{ - CortinaTime: activeForkTime, - DurangoTime: mockable.MaxTime, - AddSubnetValidatorFee: 1, - }, - Ctx: ctx, - Bootstrapped: bootstrapped, - } - }, - stateF: func(ctrl *gomock.Controller) state.Chain { - mockState := state.NewMockChain(ctrl) - mockState.EXPECT().GetTimestamp().Return(now).Times(2) // chain time is Cortina fork activation since now.After(activeForkTime) - mockState.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) - mockState.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) - mockState.EXPECT().GetPendingValidator(subnetID, verifiedTx.NodeID()).Return(nil, database.ErrNotFound) - primaryNetworkVdr := &state.Staker{ - StartTime: time.Unix(0, 0), - EndTime: mockable.MaxTime, - } - mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) - return mockState - }, - sTxF: func() *txs.Tx { - return &verifiedSignedTx - }, - txF: func() *txs.AddPermissionlessValidatorTx { - // Note this copies [verifiedTx] - tx := verifiedTx - tx.Validator.Start = uint64(now.Add(MaxFutureStartTime).Add(time.Second).Unix()) - tx.Validator.End = tx.Validator.Start + uint64(unsignedTransformTx.MinStakeDuration) - return &tx - }, - expectedErr: ErrFutureStakeTime, - }, { name: "success", backendF: func(ctrl *gomock.Controller) *Backend { @@ -543,12 +468,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { gomock.Any(), ).Return(nil) + cfg := defaultTestConfig(t, durango, activeForkTime) + cfg.StaticFeeConfig.AddSubnetValidatorFee = 1 + return &Backend{ - FlowChecker: flowChecker, - Config: &config.Config{ - AddSubnetValidatorFee: 1, - DurangoTime: activeForkTime, // activate latest fork, - }, + FlowChecker: flowChecker, + Config: cfg, Ctx: ctx, Bootstrapped: bootstrapped, } diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index aa3ea9a2aafe..2de7d3392ba8 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" ) var ( @@ -56,7 +57,7 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { var ( currentTimestamp = e.State.GetTimestamp() - isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = e.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -68,7 +69,9 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { } // Verify the flowcheck - createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(currentTimestamp) + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -76,7 +79,7 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { tx.Outs, baseTxCreds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: createBlockchainTxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -107,14 +110,16 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { var ( currentTimestamp = e.State.GetTimestamp() - isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = e.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err } // Verify the flowcheck - createSubnetTxFee := e.Config.GetCreateSubnetTxFee(currentTimestamp) + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -122,7 +127,7 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { tx.Outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: createSubnetTxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -135,7 +140,7 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { // Produce the UTXOS avax.Produce(e.State, txID, tx.Outs) // Add the new subnet to the database - e.State.AddSubnet(e.Tx) + e.State.AddSubnet(txID) e.State.SetSubnetOwner(txID, tx.Owner) return nil } @@ -147,7 +152,7 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { var ( currentTimestamp = e.State.GetTimestamp() - isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = e.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -194,6 +199,10 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { copy(ins, tx.Ins) copy(ins[len(tx.Ins):], tx.ImportedInputs) + // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpendUTXOs( tx, utxos, @@ -201,7 +210,7 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { tx.Outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -233,7 +242,7 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { var ( currentTimestamp = e.State.GetTimestamp() - isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = e.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -250,6 +259,9 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { } // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -257,7 +269,7 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return fmt.Errorf("failed verifySpend: %w", err) @@ -418,7 +430,7 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error var ( currentTimestamp = e.State.GetTimestamp() - isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + isDurangoActive = e.Config.UpgradeConfig.IsDurangoActivated(currentTimestamp) ) if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { return err @@ -435,6 +447,10 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error return err } + // Verify the flowcheck + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + totalRewardAmount := tx.MaximumSupply - tx.InitialSupply if err := e.Backend.FlowChecker.VerifySpend( tx, @@ -446,7 +462,7 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error // entry in this map literal from being overwritten by the // second entry. map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TransformSubnetTxFee, + e.Ctx.AVAXAssetID: fee, tx.AssetID: totalRewardAmount, }, ); err != nil { @@ -541,7 +557,7 @@ func (e *StandardTxExecutor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwn } func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { - if !e.Backend.Config.IsDurangoActivated(e.State.GetTimestamp()) { + if !e.Backend.Config.UpgradeConfig.IsDurangoActivated(e.State.GetTimestamp()) { return ErrDurangoUpgradeNotActive } @@ -555,6 +571,10 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { } // Verify the flowcheck + currentTimestamp := e.State.GetTimestamp() + feeCalculator := fee.NewStaticCalculator(e.Backend.Config.StaticFeeConfig, e.Backend.Config.UpgradeConfig) + fee := feeCalculator.CalculateFee(tx, currentTimestamp) + if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -562,7 +582,7 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { tx.Outs, e.Tx.Creds, map[ids.ID]uint64{ - e.Ctx.AVAXAssetID: e.Config.TxFee, + e.Ctx.AVAXAssetID: fee, }, ); err != nil { return err @@ -585,7 +605,7 @@ func (e *StandardTxExecutor) putStaker(stakerTx txs.Staker) error { err error ) - if !e.Config.IsDurangoActivated(chainTime) { + if !e.Config.UpgradeConfig.IsDurangoActivated(chainTime) { // Pre-Durango, stakers set a future [StartTime] and are added to the // pending staker set. They are promoted to the current staker set once // the chain time reaches [StartTime]. diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 69ad018caa1b..1de00365723f 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -4,9 +4,10 @@ package executor import ( + "context" "errors" - "fmt" "math" + "math/rand" "testing" "time" @@ -21,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -31,8 +33,12 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) // This tests that the math performed during TransformSubnetTx execution can @@ -69,20 +75,25 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { } for _, test := range tests { // Case: Empty validator node ID after banff - env.config.BanffTime = test.banffTime - - tx, err := env.txBuilder.NewAddValidatorTx( // create the tx - env.config.MinValidatorStake, - uint64(startTime.Unix()), - uint64(defaultValidateEndTime.Unix()), - ids.EmptyNodeID, - ids.GenerateTestShortID(), + env.config.UpgradeConfig.BanffTime = test.banffTime + + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: ids.EmptyNodeID, + Start: uint64(startTime.Unix()), + End: uint64(defaultValidateEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -108,19 +119,26 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { // [addMinStakeValidator] adds a new validator to the primary network's // pending validator set with the minimum staking amount - addMinStakeValidator := func(target *environment) { - tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MinValidatorStake, // stake amount - uint64(newValidatorStartTime.Unix()), // start time - uint64(newValidatorEndTime.Unix()), // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + addMinStakeValidator := func(env *environment) { + require := require.New(t) + + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: newValidatorID, + Start: uint64(newValidatorStartTime.Unix()), + End: uint64(newValidatorEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + reward.PercentDenominator, ) - require.NoError(t, err) + require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -129,29 +147,36 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { newValidatorStartTime, 0, ) - require.NoError(t, err) + require.NoError(err) - target.state.PutCurrentValidator(staker) - target.state.AddTx(tx, status.Committed) - target.state.SetHeight(dummyHeight) - require.NoError(t, target.state.Commit()) + env.state.PutCurrentValidator(staker) + env.state.AddTx(tx, status.Committed) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) } // [addMaxStakeValidator] adds a new validator to the primary network's // pending validator set with the maximum staking amount - addMaxStakeValidator := func(target *environment) { - tx, err := target.txBuilder.NewAddValidatorTx( - target.config.MaxValidatorStake, // stake amount - uint64(newValidatorStartTime.Unix()), // start time - uint64(newValidatorEndTime.Unix()), // end time - newValidatorID, // node ID - rewardAddress, // Reward Address - reward.PercentDenominator, // Shared - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + addMaxStakeValidator := func(env *environment) { + require := require.New(t) + + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: newValidatorID, + Start: uint64(newValidatorStartTime.Unix()), + End: uint64(newValidatorEndTime.Unix()), + Wght: env.config.MaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + reward.PercentDenominator, ) - require.NoError(t, err) + require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -160,16 +185,16 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { newValidatorStartTime, 0, ) - require.NoError(t, err) + require.NoError(err) - target.state.PutCurrentValidator(staker) - target.state.AddTx(tx, status.Committed) - target.state.SetHeight(dummyHeight) - require.NoError(t, target.state.Commit()) + env.state.PutCurrentValidator(staker) + env.state.AddTx(tx, status.Committed) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) } - dummyH := newEnvironment(t, apricotPhase5) - currentTimestamp := dummyH.state.GetTimestamp() + env := newEnvironment(t, apricotPhase5) + currentTimestamp := env.state.GetTimestamp() type test struct { description string @@ -177,7 +202,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { startTime time.Time endTime time.Time nodeID ids.NodeID - rewardAddress ids.ShortID feeKeys []*secp256k1.PrivateKey setup func(*environment) AP3Time time.Time @@ -187,35 +211,21 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { tests := []test{ { description: "validator stops validating earlier than delegator", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: defaultValidateStartTime.Add(time.Second), endTime: defaultValidateEndTime.Add(time.Second), nodeID: nodeID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, }, - { - description: fmt.Sprintf("delegator should not be added more than (%s) in the future", MaxFutureStartTime), - stakeAmount: dummyH.config.MinDelegatorStake, - startTime: currentTimestamp.Add(MaxFutureStartTime + time.Second), - endTime: currentTimestamp.Add(MaxFutureStartTime + defaultMinStakingDuration + time.Second), - nodeID: nodeID, - rewardAddress: rewardAddress, - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, - setup: nil, - AP3Time: defaultGenesisTime, - expectedExecutionErr: ErrFutureStakeTime, - }, { description: "validator not in the current or pending validator sets", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: defaultValidateStartTime.Add(5 * time.Second), endTime: defaultValidateEndTime.Add(-5 * time.Second), nodeID: newValidatorID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, @@ -223,11 +233,10 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, { description: "delegator starts before validator", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: newValidatorStartTime.Add(-1 * time.Second), // start validating subnet before primary network endTime: newValidatorEndTime, nodeID: newValidatorID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, @@ -235,11 +244,10 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, { description: "delegator stops before validator", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: newValidatorStartTime, endTime: newValidatorEndTime.Add(time.Second), // stop validating subnet after stopping validating primary network nodeID: newValidatorID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, @@ -247,11 +255,10 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, { description: "valid", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: newValidatorStartTime, // same start time as for primary network endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, @@ -259,47 +266,44 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, { description: "starts delegating at current timestamp", - stakeAmount: dummyH.config.MinDelegatorStake, // weight + stakeAmount: env.config.MinDelegatorStake, // weight startTime: currentTimestamp, // start time endTime: defaultValidateEndTime, // end time nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrTimestampNotBeforeStartTime, }, { - description: "tx fee paying key has no funds", - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: defaultValidateStartTime.Add(time.Second), // start time - endTime: defaultValidateEndTime, // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer - setup: func(target *environment) { // Remove all UTXOs owned by keys[1] - utxoIDs, err := target.state.UTXOIDs( + description: "tx fee paying key has no funds", + stakeAmount: env.config.MinDelegatorStake, // weight + startTime: defaultValidateStartTime.Add(time.Second), // start time + endTime: defaultValidateEndTime, // end time + nodeID: nodeID, // node ID + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer + setup: func(env *environment) { // Remove all UTXOs owned by keys[1] + utxoIDs, err := env.state.UTXOIDs( preFundedKeys[1].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) require.NoError(t, err) for _, utxoID := range utxoIDs { - target.state.DeleteUTXO(utxoID) + env.state.DeleteUTXO(utxoID) } - target.state.SetHeight(dummyHeight) - require.NoError(t, target.state.Commit()) + env.state.SetHeight(dummyHeight) + require.NoError(t, env.state.Commit()) }, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrFlowCheckFailed, }, { description: "over delegation before AP3", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: newValidatorStartTime, // same start time as for primary network endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultValidateEndTime, @@ -307,11 +311,10 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, { description: "over delegation after AP3", - stakeAmount: dummyH.config.MinDelegatorStake, + stakeAmount: env.config.MinDelegatorStake, startTime: newValidatorStartTime, // same start time as for primary network endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, - rewardAddress: rewardAddress, feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultGenesisTime, @@ -322,32 +325,37 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(t, apricotPhase5) - freshTH.config.ApricotPhase3Time = tt.AP3Time - - tx, err := freshTH.txBuilder.NewAddDelegatorTx( - tt.stakeAmount, - uint64(tt.startTime.Unix()), - uint64(tt.endTime.Unix()), - tt.nodeID, - tt.rewardAddress, - tt.feeKeys, - ids.ShortEmpty, - nil, + env := newEnvironment(t, apricotPhase5) + env.config.UpgradeConfig.ApricotPhase3Time = tt.AP3Time + + builder, signer := env.factory.NewWallet(tt.feeKeys...) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: tt.nodeID, + Start: uint64(tt.startTime.Unix()), + End: uint64(tt.endTime.Unix()), + Wght: tt.stakeAmount, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) if tt.setup != nil { - tt.setup(freshTH) + tt.setup(env) } - onAcceptState, err := state.NewDiff(lastAcceptedID, freshTH) + onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - freshTH.config.BanffTime = onAcceptState.GetTimestamp() + env.config.UpgradeConfig.BanffTime = onAcceptState.GetTimestamp() executor := StandardTxExecutor{ - Backend: &freshTH.backend, + Backend: &env.backend, State: onAcceptState, Tx: tx, } @@ -370,17 +378,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // but stops validating subnet after stops validating primary network // (note that keys[0] is a genesis validator) startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(startTime.Unix()), - uint64(defaultValidateEndTime.Unix())+1, - nodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(defaultValidateEndTime.Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -399,17 +411,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // and proposed subnet validation period is subset of // primary network validation period // (note that keys[0] is a genesis validator) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(defaultValidateStartTime.Unix()+1), - uint64(defaultValidateEndTime.Unix()), - nodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix() + 1), + End: uint64(defaultValidateEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -428,32 +444,41 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { dsStartTime := defaultGenesisTime.Add(10 * time.Second) dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) - addDSTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stake amount - uint64(dsStartTime.Unix()), // start time - uint64(dsEndTime.Unix()), // end time - pendingDSValidatorID, // node ID - ids.GenerateTestShortID(), // reward address - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), + End: uint64(dsEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + reward.PercentDenominator, ) require.NoError(err) + addDSTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) { // Case: Proposed validator isn't in pending or current validator sets - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix()), // start validating subnet before primary network - uint64(dsEndTime.Unix()), - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), // start validating subnet before primary network + End: uint64(dsEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -487,17 +512,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but starts validating subnet before primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix())-1, // start validating subnet before primary network - uint64(dsEndTime.Unix()), - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()) - 1, // start validating subnet before primary network + End: uint64(dsEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -514,17 +543,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network // but stops validating subnet after primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix()), - uint64(dsEndTime.Unix())+1, // stop validating subnet after stopping validating primary network - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), + End: uint64(dsEndTime.Unix()) + 1, // stop validating subnet after stopping validating primary network + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -541,17 +574,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Proposed validator is pending validator of primary network and // period validating subnet is subset of time validating primary network - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(dsStartTime.Unix()), // same start time as for primary network - uint64(dsEndTime.Unix()), // same end time as for primary network - pendingDSValidatorID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: pendingDSValidatorID, + Start: uint64(dsStartTime.Unix()), // same start time as for primary network + End: uint64(dsEndTime.Unix()), // same end time as for primary network + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -569,17 +606,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.SetTimestamp(newTimestamp) { - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(newTimestamp.Unix()), // start time - uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(newTimestamp.Unix()), + End: uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -598,17 +639,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // Case: Proposed validator already validating the subnet // First, add validator as validator of subnet - subnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer = env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + uSubnetTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()), + End: uint64(defaultValidateEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), signer, uSubnetTx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -627,17 +672,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID startTime := defaultValidateStartTime.Add(time.Second) - duplicateSubnetTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(startTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(defaultValidateEndTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -645,9 +694,9 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { executor := StandardTxExecutor{ Backend: &env.backend, State: onAcceptState, - Tx: duplicateSubnetTx, + Tx: tx, } - err = duplicateSubnetTx.Unsigned.Visit(&executor) + err = tx.Unsigned.Visit(&executor) require.ErrorIs(err, ErrDuplicateValidator) } @@ -658,17 +707,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Duplicate signatures startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(startTime.Unix()), // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Duplicate a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -692,17 +745,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Too few signatures startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(startTime.Unix()), // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -726,22 +783,26 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { { // Case: Control Signature from invalid key (keys[3] is not a control key) startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(startTime.Unix()), // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], preFundedKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) require.NoError(err) - copy(tx.Creds[1].(*secp256k1fx.Credential).Sigs[0][:], sig) + copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -759,17 +820,21 @@ func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { // Case: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet startTime := defaultValidateStartTime.Add(time.Second) - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, // weight - uint64(startTime.Unix())+1, // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix())+1, // end time - nodeID, // node ID - testSubnet1.ID(), // subnet ID - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()) + 1, + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()) + 1, + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addSubnetValTx := subnetTx.Unsigned.(*txs.AddSubnetValidatorTx) staker, err = state.NewCurrentStaker( @@ -808,44 +873,22 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator's start time too early - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Unix())-1, - uint64(defaultValidateEndTime.Unix()), - nodeID, - ids.ShortEmpty, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(defaultValidateStartTime.Unix()) - 1, + End: uint64(defaultValidateEndTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) - - onAcceptState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - executor := StandardTxExecutor{ - Backend: &env.backend, - State: onAcceptState, - Tx: tx, - } - err = tx.Unsigned.Visit(&executor) - require.ErrorIs(err, ErrTimestampNotBeforeStartTime) - } - - { - // Case: Validator's start time too far in the future - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(defaultValidateStartTime.Add(MaxFutureStartTime).Unix()+1), - uint64(defaultValidateStartTime.Add(MaxFutureStartTime).Add(defaultMinStakingDuration).Unix()+1), - nodeID, - ids.ShortEmpty, - reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, - ) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) @@ -857,24 +900,29 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.ErrorIs(err, ErrFutureStakeTime) + require.ErrorIs(err, ErrTimestampNotBeforeStartTime) } { // Case: Validator in current validator set of primary network startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stake amount - uint64(startTime.Unix()), // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, - ids.ShortEmpty, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) addValTx := tx.Unsigned.(*txs.AddValidatorTx) staker, err := state.NewCurrentStaker( @@ -903,18 +951,23 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator in pending validator set of primary network startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, // stake amount - uint64(startTime.Unix()), // start time - uint64(startTime.Add(defaultMinStakingDuration).Unix()), // end time - nodeID, - ids.ShortEmpty, - reward.PercentDenominator, // shares - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + reward.PercentDenominator, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) staker, err := state.NewPendingStaker( tx.ID(), @@ -940,18 +993,23 @@ func TestBanffStandardTxExecutorAddValidator(t *testing.T) { { // Case: Validator doesn't have enough tokens to cover stake amount startTime := defaultValidateStartTime.Add(1 * time.Second) - tx, err := env.txBuilder.NewAddValidatorTx( // create the tx - env.config.MinValidatorStake, - uint64(startTime.Unix()), - uint64(startTime.Add(defaultMinStakingDuration).Unix()), - nodeID, - ids.ShortEmpty, + builder, signer := env.factory.NewWallet(preFundedKeys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(defaultMinStakingDuration).Unix()), + Wght: env.config.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) // Remove all UTXOs owned by preFundedKeys[0] utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) @@ -992,18 +1050,23 @@ func TestDurangoDisabledTransactions(t *testing.T) { endTime = chainTime.Add(defaultMaxStakingDuration) ) - tx, err := env.txBuilder.NewAddValidatorTx( - defaultMinValidatorStake, - 0, // startTime - uint64(endTime.Unix()), - nodeID, - ids.ShortEmpty, // reward address, - reward.PercentDenominator, // shares - preFundedKeys, - ids.ShortEmpty, // change address - nil, // memo + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: 0, + End: uint64(endTime.Unix()), + Wght: defaultMinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + reward.PercentDenominator, ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) return tx }, @@ -1025,17 +1088,22 @@ func TestDurangoDisabledTransactions(t *testing.T) { } it.Release() - tx, err := env.txBuilder.NewAddDelegatorTx( - defaultMinValidatorStake, - 0, // startTime - uint64(primaryValidator.EndTime.Unix()), - primaryValidator.NodeID, - ids.ShortEmpty, // reward address, - preFundedKeys, - ids.ShortEmpty, // change address - nil, // memo + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: primaryValidator.NodeID, + Start: 0, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) return tx }, @@ -1090,17 +1158,22 @@ func TestDurangoMemoField(t *testing.T) { } it.Release() - tx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultMinValidatorStake, - 0, // startTime - uint64(primaryValidator.EndTime.Unix()), - primaryValidator.NodeID, - testSubnet1.TxID, - preFundedKeys, - ids.ShortEmpty, - memoField, + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + Start: 0, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + Subnet: testSubnet1.TxID, + }, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1110,17 +1183,18 @@ func TestDurangoMemoField(t *testing.T) { { name: "CreateChainTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewCreateChainTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateChainTx( testSubnet1.TxID, - []byte{}, // genesisData - ids.GenerateTestID(), // vmID - []ids.ID{}, // fxIDs - "aaa", // chain name - preFundedKeys, - ids.ShortEmpty, - memoField, + []byte{}, + ids.GenerateTestID(), + []ids.ID{}, + "aaa", + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1131,14 +1205,17 @@ func TestDurangoMemoField(t *testing.T) { { name: "CreateSubnetTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{ids.GenerateTestShortID()}, - preFundedKeys, - ids.ShortEmpty, - memoField, + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1166,17 +1243,23 @@ func TestDurangoMemoField(t *testing.T) { map[ids.ID]uint64{ env.ctx.AVAXAssetID: sourceAmount, }, + rand.NewSource(0), ) env.msm.SharedMemory = sharedMemory - tx, err := env.txBuilder.NewImportTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewImportTx( sourceChain, - sourceKey.PublicKey().Address(), - preFundedKeys, - ids.ShortEmpty, // change address - memoField, + &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{sourceKey.PublicKey().Address()}, + }, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1187,15 +1270,25 @@ func TestDurangoMemoField(t *testing.T) { { name: "ExportTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewExportTx( - units.Avax, // amount - env.ctx.XChainID, // destination chain - ids.GenerateTestShortID(), // destination address - preFundedKeys, - ids.ShortEmpty, // change address - memoField, + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewExportTx( + env.ctx.XChainID, + []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: units.Avax, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + }}, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1220,17 +1313,21 @@ func TestDurangoMemoField(t *testing.T) { it.Release() endTime := primaryValidator.EndTime - subnetValTx, err := env.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - 0, - uint64(endTime.Unix()), - primaryValidator.NodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - nil, + builder, signer := env.factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + Start: 0, + End: uint64(endTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(t, err) + subnetValTx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1241,14 +1338,15 @@ func TestDurangoMemoField(t *testing.T) { Tx: subnetValTx, })) - tx, err := env.txBuilder.NewRemoveSubnetValidatorTx( + builder, signer = env.factory.NewWallet(preFundedKeys...) + utx2, err := builder.NewRemoveSubnetValidatorTx( primaryValidator.NodeID, testSubnet1.ID(), - preFundedKeys, - ids.ShortEmpty, - memoField, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx2) + require.NoError(t, err) return tx, onAcceptState }, @@ -1256,7 +1354,8 @@ func TestDurangoMemoField(t *testing.T) { { name: "TransformSubnetTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewTransformSubnetTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewTransformSubnetTx( testSubnet1.TxID, // subnetID ids.GenerateTestID(), // assetID 10, // initial supply @@ -1271,11 +1370,11 @@ func TestDurangoMemoField(t *testing.T) { 10, // min delegator stake 1, // max validator weight factor 80, // uptime requirement - preFundedKeys, - ids.ShortEmpty, // change address - memoField, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1294,19 +1393,33 @@ func TestDurangoMemoField(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(t, err) - tx, err := env.txBuilder.NewAddPermissionlessValidatorTx( - env.config.MinValidatorStake, - 0, // start Time - uint64(endTime.Unix()), - nodeID, + builder, txSigner := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: 0, + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - ids.ShortEmpty, // reward address - reward.PercentDenominator, // shares - preFundedKeys, - ids.ShortEmpty, // change address - memoField, + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + reward.PercentDenominator, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1330,17 +1443,27 @@ func TestDurangoMemoField(t *testing.T) { } it.Release() - tx, err := env.txBuilder.NewAddPermissionlessDelegatorTx( - defaultMinValidatorStake, - 0, // start Time - uint64(primaryValidator.EndTime.Unix()), - primaryValidator.NodeID, - ids.ShortEmpty, // reward address - preFundedKeys, - ids.ShortEmpty, // change address - memoField, + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + Start: 0, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, + env.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1351,15 +1474,18 @@ func TestDurangoMemoField(t *testing.T) { { name: "TransferSubnetOwnershipTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewTransferSubnetOwnershipTx( + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewTransferSubnetOwnershipTx( testSubnet1.TxID, - 1, - []ids.ShortID{ids.ShortEmpty}, - preFundedKeys, - ids.ShortEmpty, // change address - memoField, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1370,17 +1496,25 @@ func TestDurangoMemoField(t *testing.T) { { name: "BaseTx", setupTest: func(env *environment, memoField []byte) (*txs.Tx, state.Diff) { - tx, err := env.txBuilder.NewBaseTx( - 1, - secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ids.ShortEmpty}, + builder, signer := env.factory.NewWallet(preFundedKeys...) + utx, err := builder.NewBaseTx( + []*avax.TransferableOutput{ + { + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + }, + }, }, - preFundedKeys, - ids.ShortEmpty, - memoField, + common.WithMemo(memoField), ) require.NoError(t, err) + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(t, err) onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) require.NoError(t, err) @@ -1529,7 +1663,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) // Set dependency expectations. - env.state.EXPECT().GetTimestamp().Return(env.latestForkTime).AnyTimes() + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil).Times(1) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) @@ -1542,11 +1676,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.state.EXPECT().AddUTXO(gomock.Any()).Times(len(env.unsignedTx.Outs)) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1569,11 +1699,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.state = state.NewMockDiff(ctrl) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1597,11 +1723,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.state.EXPECT().GetPendingValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1628,11 +1750,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(&staker, nil).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1657,11 +1775,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1685,11 +1799,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1715,11 +1825,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(errTest) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1748,11 +1854,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { ).Return(errTest) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1905,11 +2007,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env.state = state.NewMockDiff(ctrl) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1932,11 +2030,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - }, + Config: defaultTestConfig(t, durango, env.latestForkTime), Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1958,14 +2052,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) + + cfg := defaultTestConfig(t, durango, env.latestForkTime) + cfg.MaxStakeDuration = math.MaxInt64 + e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - MaxStakeDuration: math.MaxInt64, - }, + Config: cfg, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -1992,14 +2085,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env.flowChecker.EXPECT().VerifySpend( gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(ErrFlowCheckFailed) + + cfg := defaultTestConfig(t, durango, env.latestForkTime) + cfg.MaxStakeDuration = math.MaxInt64 + e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - MaxStakeDuration: math.MaxInt64, - }, + Config: cfg, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -2031,14 +2123,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env.state.EXPECT().SetCurrentSupply(env.unsignedTx.Subnet, env.unsignedTx.InitialSupply) env.state.EXPECT().DeleteUTXO(gomock.Any()).Times(len(env.unsignedTx.Ins)) env.state.EXPECT().AddUTXO(gomock.Any()).Times(len(env.unsignedTx.Outs)) + + cfg := defaultTestConfig(t, durango, env.latestForkTime) + cfg.MaxStakeDuration = math.MaxInt64 + e := &StandardTxExecutor{ Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.latestForkTime, - CortinaTime: env.latestForkTime, - DurangoTime: env.latestForkTime, - MaxStakeDuration: math.MaxInt64, - }, + Config: cfg, Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, @@ -2064,3 +2155,40 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { }) } } + +func defaultTestConfig(t *testing.T, f fork, tm time.Time) *config.Config { + c := &config.Config{ + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: mockable.MaxTime, + ApricotPhase5Time: mockable.MaxTime, + BanffTime: mockable.MaxTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, + EUpgradeTime: mockable.MaxTime, + }, + } + + switch f { + case eUpgrade: + c.UpgradeConfig.EUpgradeTime = tm + fallthrough + case durango: + c.UpgradeConfig.DurangoTime = tm + fallthrough + case cortina: + c.UpgradeConfig.CortinaTime = tm + fallthrough + case banff: + c.UpgradeConfig.BanffTime = tm + fallthrough + case apricotPhase5: + c.UpgradeConfig.ApricotPhase5Time = tm + fallthrough + case apricotPhase3: + c.UpgradeConfig.ApricotPhase3Time = tm + default: + require.FailNow(t, "unhandled fork", f) + } + + return c +} diff --git a/vms/platformvm/txs/executor/state_changes.go b/vms/platformvm/txs/executor/state_changes.go index 36981b095e8c..3086358304a3 100644 --- a/vms/platformvm/txs/executor/state_changes.go +++ b/vms/platformvm/txs/executor/state_changes.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -58,30 +57,6 @@ func VerifyNewChainTime( return nil } -func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { - var ( - timestamp = clk.Time() - parentTime = state.GetTimestamp() - ) - if parentTime.After(timestamp) { - timestamp = parentTime - } - // [timestamp] = max(now, parentTime) - - nextStakerChangeTime, err := GetNextStakerChangeTime(state) - if err != nil { - return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) - } - - // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] - timeWasCapped := !timestamp.Before(nextStakerChangeTime) - if timeWasCapped { - timestamp = nextStakerChangeTime - } - // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - return timestamp, timeWasCapped, nil -} - // AdvanceTimeTo applies all state changes to [parentState] resulting from // advancing the chain time to [newChainTime]. // Returns true iff the validator set changed. diff --git a/vms/platformvm/txs/fee/calculator.go b/vms/platformvm/txs/fee/calculator.go new file mode 100644 index 000000000000..f349f282f7ca --- /dev/null +++ b/vms/platformvm/txs/fee/calculator.go @@ -0,0 +1,142 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +import ( + "time" + + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" +) + +var _ txs.Visitor = (*calculator)(nil) + +func NewStaticCalculator(config StaticConfig, upgradeTimes upgrade.Config) *Calculator { + return &Calculator{ + config: config, + upgradeTimes: upgradeTimes, + } +} + +type Calculator struct { + config StaticConfig + upgradeTimes upgrade.Config +} + +// [CalculateFee] returns the minimal fee needed to accept [tx], at chain time [time] +func (c *Calculator) CalculateFee(tx txs.UnsignedTx, time time.Time) uint64 { + tmp := &calculator{ + upgrades: c.upgradeTimes, + staticCfg: c.config, + time: time, + } + + // this is guaranteed to never return an error + _ = tx.Visit(tmp) + return tmp.fee +} + +// calculator is intentionally unexported and used through Calculator to provide +// a more convenient API +type calculator struct { + // Pre E-fork inputs + upgrades upgrade.Config + staticCfg StaticConfig + time time.Time + + // outputs of visitor execution + fee uint64 +} + +func (c *calculator) AddValidatorTx(*txs.AddValidatorTx) error { + c.fee = c.staticCfg.AddPrimaryNetworkValidatorFee + return nil +} + +func (c *calculator) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { + c.fee = c.staticCfg.AddSubnetValidatorFee + return nil +} + +func (c *calculator) AddDelegatorTx(*txs.AddDelegatorTx) error { + c.fee = c.staticCfg.AddPrimaryNetworkDelegatorFee + return nil +} + +func (c *calculator) CreateChainTx(*txs.CreateChainTx) error { + if c.upgrades.IsApricotPhase3Activated(c.time) { + c.fee = c.staticCfg.CreateBlockchainTxFee + } else { + c.fee = c.staticCfg.CreateAssetTxFee + } + return nil +} + +func (c *calculator) CreateSubnetTx(*txs.CreateSubnetTx) error { + if c.upgrades.IsApricotPhase3Activated(c.time) { + c.fee = c.staticCfg.CreateSubnetTxFee + } else { + c.fee = c.staticCfg.CreateAssetTxFee + } + return nil +} + +func (c *calculator) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + c.fee = 0 // no fees + return nil +} + +func (c *calculator) RewardValidatorTx(*txs.RewardValidatorTx) error { + c.fee = 0 // no fees + return nil +} + +func (c *calculator) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) TransformSubnetTx(*txs.TransformSubnetTx) error { + c.fee = c.staticCfg.TransformSubnetTxFee + return nil +} + +func (c *calculator) TransferSubnetOwnershipTx(*txs.TransferSubnetOwnershipTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { + if tx.Subnet != constants.PrimaryNetworkID { + c.fee = c.staticCfg.AddSubnetValidatorFee + } else { + c.fee = c.staticCfg.AddPrimaryNetworkValidatorFee + } + return nil +} + +func (c *calculator) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { + if tx.Subnet != constants.PrimaryNetworkID { + c.fee = c.staticCfg.AddSubnetDelegatorFee + } else { + c.fee = c.staticCfg.AddPrimaryNetworkDelegatorFee + } + return nil +} + +func (c *calculator) BaseTx(*txs.BaseTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) ImportTx(*txs.ImportTx) error { + c.fee = c.staticCfg.TxFee + return nil +} + +func (c *calculator) ExportTx(*txs.ExportTx) error { + c.fee = c.staticCfg.TxFee + return nil +} diff --git a/vms/platformvm/txs/fee/calculator_test.go b/vms/platformvm/txs/fee/calculator_test.go new file mode 100644 index 000000000000..c25fec9073e8 --- /dev/null +++ b/vms/platformvm/txs/fee/calculator_test.go @@ -0,0 +1,251 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" +) + +func TestTxFees(t *testing.T) { + feeTestsDefaultCfg := StaticConfig{ + TxFee: 1 * units.Avax, + CreateAssetTxFee: 2 * units.Avax, + CreateSubnetTxFee: 3 * units.Avax, + TransformSubnetTxFee: 4 * units.Avax, + CreateBlockchainTxFee: 5 * units.Avax, + AddPrimaryNetworkValidatorFee: 6 * units.Avax, + AddPrimaryNetworkDelegatorFee: 7 * units.Avax, + AddSubnetValidatorFee: 8 * units.Avax, + AddSubnetDelegatorFee: 9 * units.Avax, + } + + latestForkTime := time.Unix(1713945427, 0) + upgrades := upgrade.Config{ + EUpgradeTime: latestForkTime, + DurangoTime: latestForkTime.Add(-1 * time.Hour), + CortinaTime: latestForkTime.Add(-2 * time.Hour), + BanffTime: latestForkTime.Add(-3 * time.Hour), + ApricotPhase5Time: latestForkTime.Add(-4 * time.Hour), + ApricotPhase3Time: latestForkTime.Add(-5 * time.Hour), + } + + // chain times needed to have specific upgrades active + preEUpgradeTime := upgrades.EUpgradeTime.Add(-1 * time.Second) + preApricotPhase3Time := upgrades.ApricotPhase3Time.Add(-1 * time.Second) + + tests := []struct { + name string + chainTime time.Time + unsignedTx func() txs.UnsignedTx + expected uint64 + }{ + { + name: "AddValidatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: addValidatorTx, + expected: feeTestsDefaultCfg.AddPrimaryNetworkValidatorFee, + }, + { + name: "AddSubnetValidatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: addSubnetValidatorTx, + expected: feeTestsDefaultCfg.AddSubnetValidatorFee, + }, + { + name: "AddDelegatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: addDelegatorTx, + expected: feeTestsDefaultCfg.AddPrimaryNetworkDelegatorFee, + }, + { + name: "CreateChainTx pre ApricotPhase3", + chainTime: preApricotPhase3Time, + unsignedTx: createChainTx, + expected: feeTestsDefaultCfg.CreateAssetTxFee, + }, + { + name: "CreateChainTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: createChainTx, + expected: feeTestsDefaultCfg.CreateBlockchainTxFee, + }, + { + name: "CreateSubnetTx pre ApricotPhase3", + chainTime: preApricotPhase3Time, + unsignedTx: createSubnetTx, + expected: feeTestsDefaultCfg.CreateAssetTxFee, + }, + { + name: "CreateSubnetTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: createSubnetTx, + expected: feeTestsDefaultCfg.CreateSubnetTxFee, + }, + { + name: "RemoveSubnetValidatorTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: removeSubnetValidatorTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "TransformSubnetTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: transformSubnetTx, + expected: feeTestsDefaultCfg.TransformSubnetTxFee, + }, + { + name: "TransferSubnetOwnershipTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: transferSubnetOwnershipTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "AddPermissionlessValidatorTx Primary Network pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return addPermissionlessValidatorTx(constants.PrimaryNetworkID) + }, + expected: feeTestsDefaultCfg.AddPrimaryNetworkValidatorFee, + }, + { + name: "AddPermissionlessValidatorTx Subnet pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + subnetID := ids.GenerateTestID() + require.NotEqual(t, constants.PrimaryNetworkID, subnetID) + return addPermissionlessValidatorTx(subnetID) + }, + expected: feeTestsDefaultCfg.AddSubnetValidatorFee, + }, + { + name: "AddPermissionlessDelegatorTx Primary Network pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return addPermissionlessDelegatorTx(constants.PrimaryNetworkID) + }, + expected: feeTestsDefaultCfg.AddPrimaryNetworkDelegatorFee, + }, + { + name: "AddPermissionlessDelegatorTx pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + subnetID := ids.GenerateTestID() + require.NotEqual(t, constants.PrimaryNetworkID, subnetID) + return addPermissionlessDelegatorTx(subnetID) + }, + expected: feeTestsDefaultCfg.AddSubnetDelegatorFee, + }, + { + name: "BaseTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: baseTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "ImportTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: importTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "ExportTx pre EUpgrade", + chainTime: preEUpgradeTime, + unsignedTx: exportTx, + expected: feeTestsDefaultCfg.TxFee, + }, + { + name: "RewardValidatorTx pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return &txs.RewardValidatorTx{ + TxID: ids.GenerateTestID(), + } + }, + expected: 0, + }, + { + name: "AdvanceTimeTx pre EUpgrade", + chainTime: upgrades.EUpgradeTime.Add(-1 * time.Second), + unsignedTx: func() txs.UnsignedTx { + return &txs.AdvanceTimeTx{ + Time: uint64(time.Now().Unix()), + } + }, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + uTx := tt.unsignedTx() + fc := NewStaticCalculator(feeTestsDefaultCfg, upgrades) + require.Equal(t, tt.expected, fc.CalculateFee(uTx, tt.chainTime)) + }) + } +} + +func addValidatorTx() txs.UnsignedTx { + return &txs.AddValidatorTx{} +} + +func addSubnetValidatorTx() txs.UnsignedTx { + return &txs.AddSubnetValidatorTx{} +} + +func addDelegatorTx() txs.UnsignedTx { + return &txs.AddDelegatorTx{} +} + +func createChainTx() txs.UnsignedTx { + return &txs.CreateChainTx{} +} + +func createSubnetTx() txs.UnsignedTx { + return &txs.CreateSubnetTx{} +} + +func removeSubnetValidatorTx() txs.UnsignedTx { + return &txs.RemoveSubnetValidatorTx{} +} + +func transformSubnetTx() txs.UnsignedTx { + return &txs.TransformSubnetTx{} +} + +func transferSubnetOwnershipTx() txs.UnsignedTx { + return &txs.TransferSubnetOwnershipTx{} +} + +func addPermissionlessValidatorTx(subnetID ids.ID) txs.UnsignedTx { + return &txs.AddPermissionlessValidatorTx{ + Subnet: subnetID, + } +} + +func addPermissionlessDelegatorTx(subnetID ids.ID) txs.UnsignedTx { + return &txs.AddPermissionlessDelegatorTx{ + Subnet: subnetID, + } +} + +func baseTx() txs.UnsignedTx { + return &txs.BaseTx{} +} + +func importTx() txs.UnsignedTx { + return &txs.ImportTx{} +} + +func exportTx() txs.UnsignedTx { + return &txs.ExportTx{} +} diff --git a/vms/platformvm/txs/fee/static_config.go b/vms/platformvm/txs/fee/static_config.go new file mode 100644 index 000000000000..e03fb701806a --- /dev/null +++ b/vms/platformvm/txs/fee/static_config.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fee + +type StaticConfig struct { + // Fee that is burned by every non-state creating transaction + TxFee uint64 `json:"txFee"` + + // Fee that must be burned by every state creating transaction before AP3 + CreateAssetTxFee uint64 `json:"createAssetTxFee"` + + // Fee that must be burned by every subnet creating transaction after AP3 + CreateSubnetTxFee uint64 `json:"createSubnetTxFee"` + + // Fee that must be burned by every transform subnet transaction + TransformSubnetTxFee uint64 `json:"transformSubnetTxFee"` + + // Fee that must be burned by every blockchain creating transaction after AP3 + CreateBlockchainTxFee uint64 `json:"createBlockchainTxFee"` + + // Transaction fee for adding a primary network validator + AddPrimaryNetworkValidatorFee uint64 `json:"addPrimaryNetworkValidatorFee"` + + // Transaction fee for adding a primary network delegator + AddPrimaryNetworkDelegatorFee uint64 `json:"addPrimaryNetworkDelegatorFee"` + + // Transaction fee for adding a subnet validator + AddSubnetValidatorFee uint64 `json:"addSubnetValidatorFee"` + + // Transaction fee for adding a subnet delegator + AddSubnetDelegatorFee uint64 `json:"addSubnetDelegatorFee"` +} diff --git a/vms/platformvm/txs/mempool/mempool.go b/vms/platformvm/txs/mempool/mempool.go index 34ee9c283745..af93ac2c7129 100644 --- a/vms/platformvm/txs/mempool/mempool.go +++ b/vms/platformvm/txs/mempool/mempool.go @@ -5,55 +5,24 @@ package mempool import ( "errors" - "fmt" - "sync" "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" - "github.com/ava-labs/avalanchego/utils/setmap" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -const ( - // MaxTxSize is the maximum number of bytes a transaction can use to be - // allowed into the mempool. - MaxTxSize = 64 * units.KiB - - // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache - droppedTxIDsCacheSize = 64 - // maxMempoolSize is the maximum number of bytes allowed in the mempool - maxMempoolSize = 64 * units.MiB + txmempool "github.com/ava-labs/avalanchego/vms/txs/mempool" ) var ( _ Mempool = (*mempool)(nil) - ErrDuplicateTx = errors.New("duplicate tx") - ErrTxTooLarge = errors.New("tx too large") - ErrMempoolFull = errors.New("mempool is full") - ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") ErrCantIssueAdvanceTimeTx = errors.New("can not issue an advance time tx") ErrCantIssueRewardValidatorTx = errors.New("can not issue a reward validator tx") ) type Mempool interface { - Add(tx *txs.Tx) error - Get(txID ids.ID) (*txs.Tx, bool) - // Remove [txs] and any conflicts of [txs] from the mempool. - Remove(txs ...*txs.Tx) - - // Peek returns the oldest tx in the mempool. - Peek() (tx *txs.Tx, exists bool) - - // Iterate iterates over the txs until f returns false - Iterate(f func(tx *txs.Tx) bool) + txmempool.Mempool[*txs.Tx] // RequestBuildBlock notifies the consensus engine that a block should be // built. If [emptyBlockPermitted] is true, the notification will be sent @@ -61,30 +30,12 @@ type Mempool interface { // a notification will only be sent if there is at least one transaction in // the mempool. RequestBuildBlock(emptyBlockPermitted bool) - - // Note: dropped txs are added to droppedTxIDs but are not evicted from - // unissued decision/staker txs. This allows previously dropped txs to be - // possibly reissued. - MarkDropped(txID ids.ID, reason error) - GetDropReason(txID ids.ID) error - - // Len returns the number of txs in the mempool. - Len() int } -// Transactions from clients that have not yet been put into blocks and added to -// consensus type mempool struct { - lock sync.RWMutex - unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] - consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs - bytesAvailable int - droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> verification error + txmempool.Mempool[*txs.Tx] toEngine chan<- common.Message - - numTxs prometheus.Gauge - bytesAvailableMetric prometheus.Gauge } func New( @@ -92,36 +43,20 @@ func New( registerer prometheus.Registerer, toEngine chan<- common.Message, ) (Mempool, error) { - m := &mempool{ - unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), - consumedUTXOs: setmap.New[ids.ID, ids.ID](), - bytesAvailable: maxMempoolSize, - droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, - toEngine: toEngine, - numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "txs", - Help: "Number of decision/staker transactions in the mempool", - }), - bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "bytes_available", - Help: "Number of bytes of space currently available in the mempool", - }), + metrics, err := txmempool.NewMetrics(namespace, registerer) + if err != nil { + return nil, err } - m.bytesAvailableMetric.Set(maxMempoolSize) - - err := utils.Err( - registerer.Register(m.numTxs), - registerer.Register(m.bytesAvailableMetric), + pool := txmempool.New[*txs.Tx]( + metrics, ) - return m, err + return &mempool{ + Mempool: pool, + toEngine: toEngine, + }, nil } func (m *mempool) Add(tx *txs.Tx) error { - m.lock.Lock() - defer m.lock.Unlock() - switch tx.Unsigned.(type) { case *txs.AdvanceTimeTx: return ErrCantIssueAdvanceTimeTx @@ -130,117 +65,11 @@ func (m *mempool) Add(tx *txs.Tx) error { default: } - // Note: a previously dropped tx can be re-added - txID := tx.ID() - if _, ok := m.unissuedTxs.Get(txID); ok { - return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) - } - - txSize := len(tx.Bytes()) - if txSize > MaxTxSize { - return fmt.Errorf("%w: %s size (%d) > max size (%d)", - ErrTxTooLarge, - txID, - txSize, - MaxTxSize, - ) - } - if txSize > m.bytesAvailable { - return fmt.Errorf("%w: %s size (%d) > available space (%d)", - ErrMempoolFull, - txID, - txSize, - m.bytesAvailable, - ) - } - - inputs := tx.Unsigned.InputIDs() - if m.consumedUTXOs.HasOverlap(inputs) { - return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) - } - - m.unissuedTxs.Put(txID, tx) - m.numTxs.Inc() - m.bytesAvailable -= txSize - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - - // Mark these UTXOs as consumed in the mempool - m.consumedUTXOs.Put(txID, inputs) - - // An explicitly added tx must not be marked as dropped. - m.droppedTxIDs.Evict(txID) - - return nil -} - -func (m *mempool) Get(txID ids.ID) (*txs.Tx, bool) { - return m.unissuedTxs.Get(txID) -} - -func (m *mempool) Remove(txs ...*txs.Tx) { - m.lock.Lock() - defer m.lock.Unlock() - - for _, tx := range txs { - txID := tx.ID() - // If the transaction is in the mempool, remove it. - if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { - m.unissuedTxs.Delete(txID) - m.bytesAvailable += len(tx.Bytes()) - continue - } - - // If the transaction isn't in the mempool, remove any conflicts it has. - inputs := tx.Unsigned.InputIDs() - for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { - tx, _ := m.unissuedTxs.Get(removed.Key) - m.unissuedTxs.Delete(removed.Key) - m.bytesAvailable += len(tx.Bytes()) - } - } - m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) - m.numTxs.Set(float64(m.unissuedTxs.Len())) -} - -func (m *mempool) Peek() (*txs.Tx, bool) { - _, tx, exists := m.unissuedTxs.Oldest() - return tx, exists -} - -func (m *mempool) Iterate(f func(tx *txs.Tx) bool) { - m.lock.RLock() - defer m.lock.RUnlock() - - itr := m.unissuedTxs.NewIterator() - for itr.Next() { - if !f(itr.Value()) { - return - } - } -} - -func (m *mempool) MarkDropped(txID ids.ID, reason error) { - if errors.Is(reason, ErrMempoolFull) { - return - } - - m.lock.RLock() - defer m.lock.RUnlock() - - if _, ok := m.unissuedTxs.Get(txID); ok { - return - } - - m.droppedTxIDs.Put(txID, reason) -} - -func (m *mempool) GetDropReason(txID ids.ID) error { - err, _ := m.droppedTxIDs.Get(txID) - return err + return m.Mempool.Add(tx) } func (m *mempool) RequestBuildBlock(emptyBlockPermitted bool) { - if !emptyBlockPermitted && m.unissuedTxs.Len() == 0 { + if !emptyBlockPermitted && m.Len() == 0 { return } @@ -249,10 +78,3 @@ func (m *mempool) RequestBuildBlock(emptyBlockPermitted bool) { default: } } - -func (m *mempool) Len() int { - m.lock.RLock() - defer m.lock.RUnlock() - - return m.unissuedTxs.Len() -} diff --git a/vms/platformvm/txs/mempool/mempool_test.go b/vms/platformvm/txs/mempool/mempool_test.go deleted file mode 100644 index 3fadca5f4888..000000000000 --- a/vms/platformvm/txs/mempool/mempool_test.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mempool - -import ( - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -var preFundedKeys = secp256k1.TestKeys() - -// shows that valid tx is not added to mempool if this would exceed its maximum -// size -func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { - require := require.New(t) - - registerer := prometheus.NewRegistry() - mpool, err := New("mempool", registerer, nil) - require.NoError(err) - - decisionTxs, err := createTestDecisionTxs(1) - require.NoError(err) - tx := decisionTxs[0] - - // shortcut to simulated almost filled mempool - mpool.(*mempool).bytesAvailable = len(tx.Bytes()) - 1 - - err = mpool.Add(tx) - require.ErrorIs(err, ErrMempoolFull) - - // tx should not be marked as dropped if the mempool is full - txID := tx.ID() - mpool.MarkDropped(txID, err) - require.NoError(mpool.GetDropReason(txID)) - - // shortcut to simulated almost filled mempool - mpool.(*mempool).bytesAvailable = len(tx.Bytes()) - - err = mpool.Add(tx) - require.NoError(err, "should have added tx to mempool") -} - -func TestDecisionTxsInMempool(t *testing.T) { - require := require.New(t) - - registerer := prometheus.NewRegistry() - mpool, err := New("mempool", registerer, nil) - require.NoError(err) - - decisionTxs, err := createTestDecisionTxs(2) - require.NoError(err) - - for _, tx := range decisionTxs { - // tx not already there - _, ok := mpool.Get(tx.ID()) - require.False(ok) - - // we can insert - require.NoError(mpool.Add(tx)) - - // we can get it - got, ok := mpool.Get(tx.ID()) - require.True(ok) - require.Equal(tx, got) - - // once removed it cannot be there - mpool.Remove(tx) - - _, ok = mpool.Get(tx.ID()) - require.False(ok) - - // we can reinsert it again to grow the mempool - require.NoError(mpool.Add(tx)) - } -} - -func TestProposalTxsInMempool(t *testing.T) { - require := require.New(t) - - registerer := prometheus.NewRegistry() - mpool, err := New("mempool", registerer, nil) - require.NoError(err) - - // The proposal txs are ordered by decreasing start time. This means after - // each insertion, the last inserted transaction should be on the top of the - // heap. - proposalTxs, err := createTestProposalTxs(2) - require.NoError(err) - - for _, tx := range proposalTxs { - _, ok := mpool.Get(tx.ID()) - require.False(ok) - - // we can insert - require.NoError(mpool.Add(tx)) - - // we can get it - got, ok := mpool.Get(tx.ID()) - require.Equal(tx, got) - require.True(ok) - - // once removed it cannot be there - mpool.Remove(tx) - - _, ok = mpool.Get(tx.ID()) - require.False(ok) - - // we can reinsert it again to grow the mempool - require.NoError(mpool.Add(tx)) - } -} - -func createTestDecisionTxs(count int) ([]*txs.Tx, error) { - decisionTxs := make([]*txs.Tx, 0, count) - for i := uint32(0); i < uint32(count); i++ { - utx := &txs.CreateChainTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: 10, - BlockchainID: ids.Empty.Prefix(uint64(i)), - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{'t', 'x', 'I', 'D'}, - OutputIndex: i, - }, - Asset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 'r', 't'}}, - In: &secp256k1fx.TransferInput{ - Amt: uint64(5678), - Input: secp256k1fx.Input{SigIndices: []uint32{i}}, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: ids.ID{'a', 's', 's', 'e', 'r', 't'}}, - Out: &secp256k1fx.TransferOutput{ - Amt: uint64(1234), - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{preFundedKeys[0].PublicKey().Address()}, - }, - }, - }}, - }}, - SubnetID: ids.GenerateTestID(), - ChainName: "chainName", - VMID: ids.GenerateTestID(), - FxIDs: []ids.ID{ids.GenerateTestID()}, - GenesisData: []byte{'g', 'e', 'n', 'D', 'a', 't', 'a'}, - SubnetAuth: &secp256k1fx.Input{SigIndices: []uint32{1}}, - } - - tx, err := txs.NewSigned(utx, txs.Codec, nil) - if err != nil { - return nil, err - } - decisionTxs = append(decisionTxs, tx) - } - return decisionTxs, nil -} - -// Proposal txs are sorted by decreasing start time -func createTestProposalTxs(count int) ([]*txs.Tx, error) { - now := time.Now() - proposalTxs := make([]*txs.Tx, 0, count) - for i := 0; i < count; i++ { - tx, err := generateAddValidatorTx( - uint64(now.Add(time.Duration(count-i)*time.Second).Unix()), // startTime - 0, // endTime - ) - if err != nil { - return nil, err - } - proposalTxs = append(proposalTxs, tx) - } - return proposalTxs, nil -} - -func generateAddValidatorTx(startTime uint64, endTime uint64) (*txs.Tx, error) { - utx := &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{}, - Validator: txs.Validator{ - NodeID: ids.GenerateTestNodeID(), - Start: startTime, - End: endTime, - }, - StakeOuts: nil, - RewardsOwner: &secp256k1fx.OutputOwners{}, - DelegationShares: 100, - } - - return txs.NewSigned(utx, txs.Codec, nil) -} - -func TestPeekTxs(t *testing.T) { - require := require.New(t) - - registerer := prometheus.NewRegistry() - toEngine := make(chan common.Message, 100) - mempool, err := New("mempool", registerer, toEngine) - require.NoError(err) - - testDecisionTxs, err := createTestDecisionTxs(1) - require.NoError(err) - testProposalTxs, err := createTestProposalTxs(1) - require.NoError(err) - - tx, exists := mempool.Peek() - require.False(exists) - require.Nil(tx) - - require.NoError(mempool.Add(testDecisionTxs[0])) - require.NoError(mempool.Add(testProposalTxs[0])) - - tx, exists = mempool.Peek() - require.True(exists) - require.Equal(tx, testDecisionTxs[0]) - require.NotEqual(tx, testProposalTxs[0]) - - mempool.Remove(testDecisionTxs[0]) - - tx, exists = mempool.Peek() - require.True(exists) - require.NotEqual(tx, testDecisionTxs[0]) - require.Equal(tx, testProposalTxs[0]) - - mempool.Remove(testProposalTxs[0]) - - tx, exists = mempool.Peek() - require.False(exists) - require.Nil(tx) -} - -func TestRemoveConflicts(t *testing.T) { - require := require.New(t) - - registerer := prometheus.NewRegistry() - toEngine := make(chan common.Message, 100) - mempool, err := New("mempool", registerer, toEngine) - require.NoError(err) - - txs, err := createTestDecisionTxs(1) - require.NoError(err) - conflictTxs, err := createTestDecisionTxs(1) - require.NoError(err) - - require.NoError(mempool.Add(txs[0])) - - tx, exists := mempool.Peek() - require.True(exists) - require.Equal(tx, txs[0]) - - mempool.Remove(conflictTxs[0]) - - _, exists = mempool.Peek() - require.False(exists) -} - -func TestIterate(t *testing.T) { - require := require.New(t) - - registerer := prometheus.NewRegistry() - toEngine := make(chan common.Message, 100) - mempool, err := New("mempool", registerer, toEngine) - require.NoError(err) - - testDecisionTxs, err := createTestDecisionTxs(1) - require.NoError(err) - decisionTx := testDecisionTxs[0] - - testProposalTxs, err := createTestProposalTxs(1) - require.NoError(err) - proposalTx := testProposalTxs[0] - - require.NoError(mempool.Add(decisionTx)) - require.NoError(mempool.Add(proposalTx)) - - expectedSet := set.Of( - decisionTx.ID(), - proposalTx.ID(), - ) - - set := set.NewSet[ids.ID](2) - mempool.Iterate(func(tx *txs.Tx) bool { - set.Add(tx.ID()) - return true - }) - - require.Equal(expectedSet, set) -} diff --git a/vms/platformvm/txs/tx.go b/vms/platformvm/txs/tx.go index 9874f66e0468..8047369c4a2d 100644 --- a/vms/platformvm/txs/tx.go +++ b/vms/platformvm/txs/tx.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -92,6 +93,10 @@ func (tx *Tx) Bytes() []byte { return tx.bytes } +func (tx *Tx) Size() int { + return len(tx.bytes) +} + func (tx *Tx) ID() ids.ID { return tx.TxID } @@ -117,6 +122,11 @@ func (tx *Tx) UTXOs() []*avax.UTXO { return utxos } +// InputIDs returns the set of inputs this transaction consumes +func (tx *Tx) InputIDs() set.Set[ids.ID] { + return tx.Unsigned.InputIDs() +} + func (tx *Tx) SyntacticVerify(ctx *snow.Context) error { switch { case tx == nil: diff --git a/vms/platformvm/txs/txstest/backend.go b/vms/platformvm/txs/txstest/backend.go new file mode 100644 index 000000000000..3ef798c0b69d --- /dev/null +++ b/vms/platformvm/txs/txstest/backend.go @@ -0,0 +1,81 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "context" + "math" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + "github.com/ava-labs/avalanchego/wallet/chain/p/signer" +) + +var ( + _ builder.Backend = (*Backend)(nil) + _ signer.Backend = (*Backend)(nil) +) + +func newBackend( + addrs set.Set[ids.ShortID], + state state.State, + sharedMemory atomic.SharedMemory, +) *Backend { + return &Backend{ + addrs: addrs, + state: state, + sharedMemory: sharedMemory, + } +} + +type Backend struct { + addrs set.Set[ids.ShortID] + state state.State + sharedMemory atomic.SharedMemory +} + +func (b *Backend) UTXOs(_ context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) { + if sourceChainID == constants.PlatformChainID { + return avax.GetAllUTXOs(b.state, b.addrs) + } + + utxos, _, _, err := avax.GetAtomicUTXOs( + b.sharedMemory, + txs.Codec, + sourceChainID, + b.addrs, + ids.ShortEmpty, + ids.Empty, + math.MaxInt, + ) + return utxos, err +} + +func (b *Backend) GetUTXO(_ context.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) { + if chainID == constants.PlatformChainID { + return b.state.GetUTXO(utxoID) + } + + utxoBytes, err := b.sharedMemory.Get(chainID, [][]byte{utxoID[:]}) + if err != nil { + return nil, err + } + + utxo := avax.UTXO{} + if _, err := txs.Codec.Unmarshal(utxoBytes[0], &utxo); err != nil { + return nil, err + } + return &utxo, nil +} + +func (b *Backend) GetSubnetOwner(_ context.Context, subnetID ids.ID) (fx.Owner, error) { + return b.state.GetSubnetOwner(subnetID) +} diff --git a/vms/platformvm/txs/txstest/builder.go b/vms/platformvm/txs/txstest/builder.go new file mode 100644 index 000000000000..532720be981f --- /dev/null +++ b/vms/platformvm/txs/txstest/builder.go @@ -0,0 +1,43 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + "github.com/ava-labs/avalanchego/wallet/chain/p/signer" +) + +func NewWalletFactory( + ctx *snow.Context, + cfg *config.Config, + state state.State, +) *WalletFactory { + return &WalletFactory{ + ctx: ctx, + cfg: cfg, + state: state, + } +} + +type WalletFactory struct { + ctx *snow.Context + cfg *config.Config + state state.State +} + +func (w *WalletFactory) NewWallet(keys ...*secp256k1.PrivateKey) (builder.Builder, signer.Signer) { + var ( + kc = secp256k1fx.NewKeychain(keys...) + addrs = kc.Addresses() + backend = newBackend(addrs, w.state, w.ctx.SharedMemory) + context = newContext(w.ctx, w.cfg, w.state.GetTimestamp()) + ) + + return builder.New(addrs, context, backend), signer.New(kc, backend) +} diff --git a/vms/platformvm/txs/txstest/context.go b/vms/platformvm/txs/txstest/context.go new file mode 100644 index 000000000000..ec2252a632e1 --- /dev/null +++ b/vms/platformvm/txs/txstest/context.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txstest + +import ( + "time" + + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" +) + +func newContext( + ctx *snow.Context, + cfg *config.Config, + timestamp time.Time, +) *builder.Context { + var ( + feeCalc = fee.NewStaticCalculator(cfg.StaticFeeConfig, cfg.UpgradeConfig) + createSubnetFee = feeCalc.CalculateFee(&txs.CreateSubnetTx{}, timestamp) + createChainFee = feeCalc.CalculateFee(&txs.CreateChainTx{}, timestamp) + ) + + return &builder.Context{ + NetworkID: ctx.NetworkID, + AVAXAssetID: ctx.AVAXAssetID, + BaseTxFee: cfg.StaticFeeConfig.TxFee, + CreateSubnetTxFee: createSubnetFee, + TransformSubnetTxFee: cfg.StaticFeeConfig.TransformSubnetTxFee, + CreateBlockchainTxFee: createChainFee, + AddPrimaryNetworkValidatorFee: cfg.StaticFeeConfig.AddPrimaryNetworkValidatorFee, + AddPrimaryNetworkDelegatorFee: cfg.StaticFeeConfig.AddPrimaryNetworkDelegatorFee, + AddSubnetValidatorFee: cfg.StaticFeeConfig.AddSubnetValidatorFee, + AddSubnetDelegatorFee: cfg.StaticFeeConfig.AddSubnetDelegatorFee, + } +} diff --git a/vms/platformvm/upgrade/config.go b/vms/platformvm/upgrade/config.go new file mode 100644 index 000000000000..1d92736a2ee3 --- /dev/null +++ b/vms/platformvm/upgrade/config.go @@ -0,0 +1,50 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package upgrade + +import "time" + +type Config struct { + // Time of the AP3 network upgrade + ApricotPhase3Time time.Time + + // Time of the AP5 network upgrade + ApricotPhase5Time time.Time + + // Time of the Banff network upgrade + BanffTime time.Time + + // Time of the Cortina network upgrade + CortinaTime time.Time + + // Time of the Durango network upgrade + DurangoTime time.Time + + // Time of the E network upgrade + EUpgradeTime time.Time +} + +func (c *Config) IsApricotPhase3Activated(timestamp time.Time) bool { + return !timestamp.Before(c.ApricotPhase3Time) +} + +func (c *Config) IsApricotPhase5Activated(timestamp time.Time) bool { + return !timestamp.Before(c.ApricotPhase5Time) +} + +func (c *Config) IsBanffActivated(timestamp time.Time) bool { + return !timestamp.Before(c.BanffTime) +} + +func (c *Config) IsCortinaActivated(timestamp time.Time) bool { + return !timestamp.Before(c.CortinaTime) +} + +func (c *Config) IsDurangoActivated(timestamp time.Time) bool { + return !timestamp.Before(c.DurangoTime) +} + +func (c *Config) IsEActivated(timestamp time.Time) bool { + return !timestamp.Before(c.EUpgradeTime) +} diff --git a/vms/platformvm/utxo/handler.go b/vms/platformvm/utxo/handler.go deleted file mode 100644 index 6368d97c11c8..000000000000 --- a/vms/platformvm/utxo/handler.go +++ /dev/null @@ -1,671 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utxo - -import ( - "errors" - "fmt" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" - "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -var ( - _ Handler = (*handler)(nil) - - ErrInsufficientFunds = errors.New("insufficient funds") - ErrInsufficientUnlockedFunds = errors.New("insufficient unlocked funds") - ErrInsufficientLockedFunds = errors.New("insufficient locked funds") - errWrongNumberCredentials = errors.New("wrong number of credentials") - errWrongNumberUTXOs = errors.New("wrong number of UTXOs") - errAssetIDMismatch = errors.New("input asset ID does not match UTXO asset ID") - errLocktimeMismatch = errors.New("input locktime does not match UTXO locktime") - errCantSign = errors.New("can't sign") - errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") -) - -// TODO: Stake and Authorize should be replaced by similar methods in the -// P-chain wallet -type Spender interface { - // Spend the provided amount while deducting the provided fee. - // Arguments: - // - [keys] are the owners of the funds - // - [amount] is the amount of funds that are trying to be staked - // - [fee] is the amount of AVAX that should be burned - // - [changeAddr] is the address that change, if there is any, is sent to - // Returns: - // - [inputs] the inputs that should be consumed to fund the outputs - // - [returnedOutputs] the outputs that should be immediately returned to - // the UTXO set - // - [stakedOutputs] the outputs that should be locked for the duration of - // the staking period - // - [signers] the proof of ownership of the funds being moved - Spend( - utxoReader avax.UTXOReader, - keys []*secp256k1.PrivateKey, - amount uint64, - fee uint64, - changeAddr ids.ShortID, - ) ( - []*avax.TransferableInput, // inputs - []*avax.TransferableOutput, // returnedOutputs - []*avax.TransferableOutput, // stakedOutputs - [][]*secp256k1.PrivateKey, // signers - error, - ) - - // Authorize an operation on behalf of the named subnet with the provided - // keys. - Authorize( - state state.Chain, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, - ) ( - verify.Verifiable, // Input that names owners - []*secp256k1.PrivateKey, // Keys that prove ownership - error, - ) -} - -type Verifier interface { - // Verify that [tx] is semantically valid. - // [ins] and [outs] are the inputs and outputs of [tx]. - // [creds] are the credentials of [tx], which allow [ins] to be spent. - // [unlockedProduced] is the map of assets that were produced and their - // amounts. - // The [ins] must have at least [unlockedProduced] than the [outs]. - // - // Precondition: [tx] has already been syntactically verified. - // - // Note: [unlockedProduced] is modified by this method. - VerifySpend( - tx txs.UnsignedTx, - utxoDB avax.UTXOGetter, - ins []*avax.TransferableInput, - outs []*avax.TransferableOutput, - creds []verify.Verifiable, - unlockedProduced map[ids.ID]uint64, - ) error - - // Verify that [tx] is semantically valid. - // [utxos[i]] is the UTXO being consumed by [ins[i]]. - // [ins] and [outs] are the inputs and outputs of [tx]. - // [creds] are the credentials of [tx], which allow [ins] to be spent. - // [unlockedProduced] is the map of assets that were produced and their - // amounts. - // The [ins] must have at least [unlockedProduced] more than the [outs]. - // - // Precondition: [tx] has already been syntactically verified. - // - // Note: [unlockedProduced] is modified by this method. - VerifySpendUTXOs( - tx txs.UnsignedTx, - utxos []*avax.UTXO, - ins []*avax.TransferableInput, - outs []*avax.TransferableOutput, - creds []verify.Verifiable, - unlockedProduced map[ids.ID]uint64, - ) error -} - -type Handler interface { - Spender - Verifier -} - -func NewHandler( - ctx *snow.Context, - clk *mockable.Clock, - fx fx.Fx, -) Handler { - return &handler{ - ctx: ctx, - clk: clk, - fx: fx, - } -} - -type handler struct { - ctx *snow.Context - clk *mockable.Clock - fx fx.Fx -} - -func (h *handler) Spend( - utxoReader avax.UTXOReader, - keys []*secp256k1.PrivateKey, - amount uint64, - fee uint64, - changeAddr ids.ShortID, -) ( - []*avax.TransferableInput, // inputs - []*avax.TransferableOutput, // returnedOutputs - []*avax.TransferableOutput, // stakedOutputs - [][]*secp256k1.PrivateKey, // signers - error, -) { - addrs := set.NewSet[ids.ShortID](len(keys)) // The addresses controlled by [keys] - for _, key := range keys { - addrs.Add(key.PublicKey().Address()) - } - utxos, err := avax.GetAllUTXOs(utxoReader, addrs) // The UTXOs controlled by [keys] - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("couldn't get UTXOs: %w", err) - } - - kc := secp256k1fx.NewKeychain(keys...) // Keychain consumes UTXOs and creates new ones - - // Minimum time this transaction will be issued at - now := uint64(h.clk.Time().Unix()) - - ins := []*avax.TransferableInput{} - returnedOuts := []*avax.TransferableOutput{} - stakedOuts := []*avax.TransferableOutput{} - signers := [][]*secp256k1.PrivateKey{} - - // Amount of AVAX that has been staked - amountStaked := uint64(0) - - // Consume locked UTXOs - for _, utxo := range utxos { - // If we have consumed more AVAX than we are trying to stake, then we - // have no need to consume more locked AVAX - if amountStaked >= amount { - break - } - - if assetID := utxo.AssetID(); assetID != h.ctx.AVAXAssetID { - continue // We only care about staking AVAX, so ignore other assets - } - - out, ok := utxo.Out.(*stakeable.LockOut) - if !ok { - // This output isn't locked, so it will be handled during the next - // iteration of the UTXO set - continue - } - if out.Locktime <= now { - // This output is no longer locked, so it will be handled during the - // next iteration of the UTXO set - continue - } - - inner, ok := out.TransferableOut.(*secp256k1fx.TransferOutput) - if !ok { - // We only know how to clone secp256k1 outputs for now - continue - } - - inIntf, inSigners, err := kc.Spend(out.TransferableOut, now) - if err != nil { - // We couldn't spend the output, so move on to the next one - continue - } - in, ok := inIntf.(avax.TransferableIn) - if !ok { // should never happen - h.ctx.Log.Warn("wrong input type", - zap.String("expectedType", "avax.TransferableIn"), - zap.String("actualType", fmt.Sprintf("%T", inIntf)), - ) - continue - } - - // The remaining value is initially the full value of the input - remainingValue := in.Amount() - - // Stake any value that should be staked - amountToStake := min( - amount-amountStaked, // Amount we still need to stake - remainingValue, // Amount available to stake - ) - amountStaked += amountToStake - remainingValue -= amountToStake - - // Add the input to the consumed inputs - ins = append(ins, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: avax.Asset{ID: h.ctx.AVAXAssetID}, - In: &stakeable.LockIn{ - Locktime: out.Locktime, - TransferableIn: in, - }, - }) - - // Add the output to the staked outputs - stakedOuts = append(stakedOuts, &avax.TransferableOutput{ - Asset: avax.Asset{ID: h.ctx.AVAXAssetID}, - Out: &stakeable.LockOut{ - Locktime: out.Locktime, - TransferableOut: &secp256k1fx.TransferOutput{ - Amt: amountToStake, - OutputOwners: inner.OutputOwners, - }, - }, - }) - - if remainingValue > 0 { - // This input provided more value than was needed to be locked. - // Some of it must be returned - returnedOuts = append(returnedOuts, &avax.TransferableOutput{ - Asset: avax.Asset{ID: h.ctx.AVAXAssetID}, - Out: &stakeable.LockOut{ - Locktime: out.Locktime, - TransferableOut: &secp256k1fx.TransferOutput{ - Amt: remainingValue, - OutputOwners: inner.OutputOwners, - }, - }, - }) - } - - // Add the signers needed for this input to the set of signers - signers = append(signers, inSigners) - } - - // Amount of AVAX that has been burned - amountBurned := uint64(0) - - for _, utxo := range utxos { - // If we have consumed more AVAX than we are trying to stake, - // and we have burned more AVAX than we need to, - // then we have no need to consume more AVAX - if amountBurned >= fee && amountStaked >= amount { - break - } - - if assetID := utxo.AssetID(); assetID != h.ctx.AVAXAssetID { - continue // We only care about burning AVAX, so ignore other assets - } - - out := utxo.Out - inner, ok := out.(*stakeable.LockOut) - if ok { - if inner.Locktime > now { - // This output is currently locked, so this output can't be - // burned. Additionally, it may have already been consumed - // above. Regardless, we skip to the next UTXO - continue - } - out = inner.TransferableOut - } - - inIntf, inSigners, err := kc.Spend(out, now) - if err != nil { - // We couldn't spend this UTXO, so we skip to the next one - continue - } - in, ok := inIntf.(avax.TransferableIn) - if !ok { - // Because we only use the secp Fx right now, this should never - // happen - continue - } - - // The remaining value is initially the full value of the input - remainingValue := in.Amount() - - // Burn any value that should be burned - amountToBurn := min( - fee-amountBurned, // Amount we still need to burn - remainingValue, // Amount available to burn - ) - amountBurned += amountToBurn - remainingValue -= amountToBurn - - // Stake any value that should be staked - amountToStake := min( - amount-amountStaked, // Amount we still need to stake - remainingValue, // Amount available to stake - ) - amountStaked += amountToStake - remainingValue -= amountToStake - - // Add the input to the consumed inputs - ins = append(ins, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: avax.Asset{ID: h.ctx.AVAXAssetID}, - In: in, - }) - - if amountToStake > 0 { - // Some of this input was put for staking - stakedOuts = append(stakedOuts, &avax.TransferableOutput{ - Asset: avax.Asset{ID: h.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amountToStake, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{changeAddr}, - }, - }, - }) - } - - if remainingValue > 0 { - // This input had extra value, so some of it must be returned - returnedOuts = append(returnedOuts, &avax.TransferableOutput{ - Asset: avax.Asset{ID: h.ctx.AVAXAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: remainingValue, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{changeAddr}, - }, - }, - }) - } - - // Add the signers needed for this input to the set of signers - signers = append(signers, inSigners) - } - - if amountBurned < fee || amountStaked < amount { - return nil, nil, nil, nil, fmt.Errorf( - "%w (unlocked, locked) (%d, %d) but need (%d, %d)", - ErrInsufficientFunds, amountBurned, amountStaked, fee, amount, - ) - } - - avax.SortTransferableInputsWithSigners(ins, signers) // sort inputs and keys - avax.SortTransferableOutputs(returnedOuts, txs.Codec) // sort outputs - avax.SortTransferableOutputs(stakedOuts, txs.Codec) // sort outputs - - return ins, returnedOuts, stakedOuts, signers, nil -} - -func (h *handler) Authorize( - state state.Chain, - subnetID ids.ID, - keys []*secp256k1.PrivateKey, -) ( - verify.Verifiable, // Input that names owners - []*secp256k1.PrivateKey, // Keys that prove ownership - error, -) { - subnetOwner, err := state.GetSubnetOwner(subnetID) - if err != nil { - return nil, nil, fmt.Errorf( - "failed to fetch subnet owner for %s: %w", - subnetID, - err, - ) - } - - // Make sure the owners of the subnet match the provided keys - owner, ok := subnetOwner.(*secp256k1fx.OutputOwners) - if !ok { - return nil, nil, fmt.Errorf("expected *secp256k1fx.OutputOwners but got %T", subnetOwner) - } - - // Add the keys to a keychain - kc := secp256k1fx.NewKeychain(keys...) - - // Make sure that the operation is valid after a minimum time - now := uint64(h.clk.Time().Unix()) - - // Attempt to prove ownership of the subnet - indices, signers, matches := kc.Match(owner, now) - if !matches { - return nil, nil, errCantSign - } - - return &secp256k1fx.Input{SigIndices: indices}, signers, nil -} - -func (h *handler) VerifySpend( - tx txs.UnsignedTx, - utxoDB avax.UTXOGetter, - ins []*avax.TransferableInput, - outs []*avax.TransferableOutput, - creds []verify.Verifiable, - unlockedProduced map[ids.ID]uint64, -) error { - utxos := make([]*avax.UTXO, len(ins)) - for index, input := range ins { - utxo, err := utxoDB.GetUTXO(input.InputID()) - if err != nil { - return fmt.Errorf( - "failed to read consumed UTXO %s due to: %w", - &input.UTXOID, - err, - ) - } - utxos[index] = utxo - } - - return h.VerifySpendUTXOs(tx, utxos, ins, outs, creds, unlockedProduced) -} - -func (h *handler) VerifySpendUTXOs( - tx txs.UnsignedTx, - utxos []*avax.UTXO, - ins []*avax.TransferableInput, - outs []*avax.TransferableOutput, - creds []verify.Verifiable, - unlockedProduced map[ids.ID]uint64, -) error { - if len(ins) != len(creds) { - return fmt.Errorf( - "%w: %d inputs != %d credentials", - errWrongNumberCredentials, - len(ins), - len(creds), - ) - } - if len(ins) != len(utxos) { - return fmt.Errorf( - "%w: %d inputs != %d utxos", - errWrongNumberUTXOs, - len(ins), - len(utxos), - ) - } - for _, cred := range creds { // Verify credentials are well-formed. - if err := cred.Verify(); err != nil { - return err - } - } - - // Time this transaction is being verified - now := uint64(h.clk.Time().Unix()) - - // Track the amount of unlocked transfers - // assetID -> amount - unlockedConsumed := make(map[ids.ID]uint64) - - // Track the amount of locked transfers and their owners - // assetID -> locktime -> ownerID -> amount - lockedProduced := make(map[ids.ID]map[uint64]map[ids.ID]uint64) - lockedConsumed := make(map[ids.ID]map[uint64]map[ids.ID]uint64) - - for index, input := range ins { - utxo := utxos[index] // The UTXO consumed by [input] - - realAssetID := utxo.AssetID() - claimedAssetID := input.AssetID() - if realAssetID != claimedAssetID { - return fmt.Errorf( - "%w: %s != %s", - errAssetIDMismatch, - claimedAssetID, - realAssetID, - ) - } - - out := utxo.Out - locktime := uint64(0) - // Set [locktime] to this UTXO's locktime, if applicable - if inner, ok := out.(*stakeable.LockOut); ok { - out = inner.TransferableOut - locktime = inner.Locktime - } - - in := input.In - // The UTXO says it's locked until [locktime], but this input, which - // consumes it, is not locked even though [locktime] hasn't passed. This - // is invalid. - if inner, ok := in.(*stakeable.LockIn); now < locktime && !ok { - return errLockedFundsNotMarkedAsLocked - } else if ok { - if inner.Locktime != locktime { - // This input is locked, but its locktime is wrong - return fmt.Errorf( - "%w: %d != %d", - errLocktimeMismatch, - inner.Locktime, - locktime, - ) - } - in = inner.TransferableIn - } - - // Verify that this tx's credentials allow [in] to be spent - if err := h.fx.VerifyTransfer(tx, in, creds[index], out); err != nil { - return fmt.Errorf("failed to verify transfer: %w", err) - } - - amount := in.Amount() - - if now >= locktime { - newUnlockedConsumed, err := math.Add64(unlockedConsumed[realAssetID], amount) - if err != nil { - return err - } - unlockedConsumed[realAssetID] = newUnlockedConsumed - continue - } - - owned, ok := out.(fx.Owned) - if !ok { - return fmt.Errorf("expected fx.Owned but got %T", out) - } - owner := owned.Owners() - ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) - if err != nil { - return fmt.Errorf("couldn't marshal owner: %w", err) - } - lockedConsumedAsset, ok := lockedConsumed[realAssetID] - if !ok { - lockedConsumedAsset = make(map[uint64]map[ids.ID]uint64) - lockedConsumed[realAssetID] = lockedConsumedAsset - } - ownerID := hashing.ComputeHash256Array(ownerBytes) - owners, ok := lockedConsumedAsset[locktime] - if !ok { - owners = make(map[ids.ID]uint64) - lockedConsumedAsset[locktime] = owners - } - newAmount, err := math.Add64(owners[ownerID], amount) - if err != nil { - return err - } - owners[ownerID] = newAmount - } - - for _, out := range outs { - assetID := out.AssetID() - - output := out.Output() - locktime := uint64(0) - // Set [locktime] to this output's locktime, if applicable - if inner, ok := output.(*stakeable.LockOut); ok { - output = inner.TransferableOut - locktime = inner.Locktime - } - - amount := output.Amount() - - if locktime == 0 { - newUnlockedProduced, err := math.Add64(unlockedProduced[assetID], amount) - if err != nil { - return err - } - unlockedProduced[assetID] = newUnlockedProduced - continue - } - - owned, ok := output.(fx.Owned) - if !ok { - return fmt.Errorf("expected fx.Owned but got %T", out) - } - owner := owned.Owners() - ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) - if err != nil { - return fmt.Errorf("couldn't marshal owner: %w", err) - } - lockedProducedAsset, ok := lockedProduced[assetID] - if !ok { - lockedProducedAsset = make(map[uint64]map[ids.ID]uint64) - lockedProduced[assetID] = lockedProducedAsset - } - ownerID := hashing.ComputeHash256Array(ownerBytes) - owners, ok := lockedProducedAsset[locktime] - if !ok { - owners = make(map[ids.ID]uint64) - lockedProducedAsset[locktime] = owners - } - newAmount, err := math.Add64(owners[ownerID], amount) - if err != nil { - return err - } - owners[ownerID] = newAmount - } - - // Make sure that for each assetID and locktime, tokens produced <= tokens consumed - for assetID, producedAssetAmounts := range lockedProduced { - lockedConsumedAsset := lockedConsumed[assetID] - for locktime, producedAmounts := range producedAssetAmounts { - consumedAmounts := lockedConsumedAsset[locktime] - for ownerID, producedAmount := range producedAmounts { - consumedAmount := consumedAmounts[ownerID] - - if producedAmount > consumedAmount { - increase := producedAmount - consumedAmount - unlockedConsumedAsset := unlockedConsumed[assetID] - if increase > unlockedConsumedAsset { - return fmt.Errorf( - "%w: %s needs %d more %s for locktime %d", - ErrInsufficientLockedFunds, - ownerID, - increase-unlockedConsumedAsset, - assetID, - locktime, - ) - } - unlockedConsumed[assetID] = unlockedConsumedAsset - increase - } - } - } - } - - for assetID, unlockedProducedAsset := range unlockedProduced { - unlockedConsumedAsset := unlockedConsumed[assetID] - // More unlocked tokens produced than consumed. Invalid. - if unlockedProducedAsset > unlockedConsumedAsset { - return fmt.Errorf( - "%w: needs %d more %s", - ErrInsufficientUnlockedFunds, - unlockedProducedAsset-unlockedConsumedAsset, - assetID, - ) - } - } - return nil -} diff --git a/vms/platformvm/utxo/verifier.go b/vms/platformvm/utxo/verifier.go new file mode 100644 index 000000000000..4adde447bad5 --- /dev/null +++ b/vms/platformvm/utxo/verifier.go @@ -0,0 +1,333 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utxo + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +var ( + _ Verifier = (*verifier)(nil) + + ErrInsufficientFunds = errors.New("insufficient funds") + ErrInsufficientUnlockedFunds = errors.New("insufficient unlocked funds") + ErrInsufficientLockedFunds = errors.New("insufficient locked funds") + errWrongNumberCredentials = errors.New("wrong number of credentials") + errWrongNumberUTXOs = errors.New("wrong number of UTXOs") + errAssetIDMismatch = errors.New("input asset ID does not match UTXO asset ID") + errLocktimeMismatch = errors.New("input locktime does not match UTXO locktime") + errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") +) + +type Verifier interface { + // Verify that [tx] is semantically valid. + // [ins] and [outs] are the inputs and outputs of [tx]. + // [creds] are the credentials of [tx], which allow [ins] to be spent. + // [unlockedProduced] is the map of assets that were produced and their + // amounts. + // The [ins] must have at least [unlockedProduced] than the [outs]. + // + // Precondition: [tx] has already been syntactically verified. + // + // Note: [unlockedProduced] is modified by this method. + VerifySpend( + tx txs.UnsignedTx, + utxoDB avax.UTXOGetter, + ins []*avax.TransferableInput, + outs []*avax.TransferableOutput, + creds []verify.Verifiable, + unlockedProduced map[ids.ID]uint64, + ) error + + // Verify that [tx] is semantically valid. + // [utxos[i]] is the UTXO being consumed by [ins[i]]. + // [ins] and [outs] are the inputs and outputs of [tx]. + // [creds] are the credentials of [tx], which allow [ins] to be spent. + // [unlockedProduced] is the map of assets that were produced and their + // amounts. + // The [ins] must have at least [unlockedProduced] more than the [outs]. + // + // Precondition: [tx] has already been syntactically verified. + // + // Note: [unlockedProduced] is modified by this method. + VerifySpendUTXOs( + tx txs.UnsignedTx, + utxos []*avax.UTXO, + ins []*avax.TransferableInput, + outs []*avax.TransferableOutput, + creds []verify.Verifiable, + unlockedProduced map[ids.ID]uint64, + ) error +} + +func NewVerifier( + ctx *snow.Context, + clk *mockable.Clock, + fx fx.Fx, +) Verifier { + return &verifier{ + ctx: ctx, + clk: clk, + fx: fx, + } +} + +type verifier struct { + ctx *snow.Context + clk *mockable.Clock + fx fx.Fx +} + +func (h *verifier) VerifySpend( + tx txs.UnsignedTx, + utxoDB avax.UTXOGetter, + ins []*avax.TransferableInput, + outs []*avax.TransferableOutput, + creds []verify.Verifiable, + unlockedProduced map[ids.ID]uint64, +) error { + utxos := make([]*avax.UTXO, len(ins)) + for index, input := range ins { + utxo, err := utxoDB.GetUTXO(input.InputID()) + if err != nil { + return fmt.Errorf( + "failed to read consumed UTXO %s due to: %w", + &input.UTXOID, + err, + ) + } + utxos[index] = utxo + } + + return h.VerifySpendUTXOs(tx, utxos, ins, outs, creds, unlockedProduced) +} + +func (h *verifier) VerifySpendUTXOs( + tx txs.UnsignedTx, + utxos []*avax.UTXO, + ins []*avax.TransferableInput, + outs []*avax.TransferableOutput, + creds []verify.Verifiable, + unlockedProduced map[ids.ID]uint64, +) error { + if len(ins) != len(creds) { + return fmt.Errorf( + "%w: %d inputs != %d credentials", + errWrongNumberCredentials, + len(ins), + len(creds), + ) + } + if len(ins) != len(utxos) { + return fmt.Errorf( + "%w: %d inputs != %d utxos", + errWrongNumberUTXOs, + len(ins), + len(utxos), + ) + } + for _, cred := range creds { // Verify credentials are well-formed. + if err := cred.Verify(); err != nil { + return err + } + } + + // Time this transaction is being verified + now := uint64(h.clk.Time().Unix()) + + // Track the amount of unlocked transfers + // assetID -> amount + unlockedConsumed := make(map[ids.ID]uint64) + + // Track the amount of locked transfers and their owners + // assetID -> locktime -> ownerID -> amount + lockedProduced := make(map[ids.ID]map[uint64]map[ids.ID]uint64) + lockedConsumed := make(map[ids.ID]map[uint64]map[ids.ID]uint64) + + for index, input := range ins { + utxo := utxos[index] // The UTXO consumed by [input] + + realAssetID := utxo.AssetID() + claimedAssetID := input.AssetID() + if realAssetID != claimedAssetID { + return fmt.Errorf( + "%w: %s != %s", + errAssetIDMismatch, + claimedAssetID, + realAssetID, + ) + } + + out := utxo.Out + locktime := uint64(0) + // Set [locktime] to this UTXO's locktime, if applicable + if inner, ok := out.(*stakeable.LockOut); ok { + out = inner.TransferableOut + locktime = inner.Locktime + } + + in := input.In + // The UTXO says it's locked until [locktime], but this input, which + // consumes it, is not locked even though [locktime] hasn't passed. This + // is invalid. + if inner, ok := in.(*stakeable.LockIn); now < locktime && !ok { + return errLockedFundsNotMarkedAsLocked + } else if ok { + if inner.Locktime != locktime { + // This input is locked, but its locktime is wrong + return fmt.Errorf( + "%w: %d != %d", + errLocktimeMismatch, + inner.Locktime, + locktime, + ) + } + in = inner.TransferableIn + } + + // Verify that this tx's credentials allow [in] to be spent + if err := h.fx.VerifyTransfer(tx, in, creds[index], out); err != nil { + return fmt.Errorf("failed to verify transfer: %w", err) + } + + amount := in.Amount() + + if now >= locktime { + newUnlockedConsumed, err := math.Add64(unlockedConsumed[realAssetID], amount) + if err != nil { + return err + } + unlockedConsumed[realAssetID] = newUnlockedConsumed + continue + } + + owned, ok := out.(fx.Owned) + if !ok { + return fmt.Errorf("expected fx.Owned but got %T", out) + } + owner := owned.Owners() + ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) + if err != nil { + return fmt.Errorf("couldn't marshal owner: %w", err) + } + lockedConsumedAsset, ok := lockedConsumed[realAssetID] + if !ok { + lockedConsumedAsset = make(map[uint64]map[ids.ID]uint64) + lockedConsumed[realAssetID] = lockedConsumedAsset + } + ownerID := hashing.ComputeHash256Array(ownerBytes) + owners, ok := lockedConsumedAsset[locktime] + if !ok { + owners = make(map[ids.ID]uint64) + lockedConsumedAsset[locktime] = owners + } + newAmount, err := math.Add64(owners[ownerID], amount) + if err != nil { + return err + } + owners[ownerID] = newAmount + } + + for _, out := range outs { + assetID := out.AssetID() + + output := out.Output() + locktime := uint64(0) + // Set [locktime] to this output's locktime, if applicable + if inner, ok := output.(*stakeable.LockOut); ok { + output = inner.TransferableOut + locktime = inner.Locktime + } + + amount := output.Amount() + + if locktime == 0 { + newUnlockedProduced, err := math.Add64(unlockedProduced[assetID], amount) + if err != nil { + return err + } + unlockedProduced[assetID] = newUnlockedProduced + continue + } + + owned, ok := output.(fx.Owned) + if !ok { + return fmt.Errorf("expected fx.Owned but got %T", out) + } + owner := owned.Owners() + ownerBytes, err := txs.Codec.Marshal(txs.CodecVersion, owner) + if err != nil { + return fmt.Errorf("couldn't marshal owner: %w", err) + } + lockedProducedAsset, ok := lockedProduced[assetID] + if !ok { + lockedProducedAsset = make(map[uint64]map[ids.ID]uint64) + lockedProduced[assetID] = lockedProducedAsset + } + ownerID := hashing.ComputeHash256Array(ownerBytes) + owners, ok := lockedProducedAsset[locktime] + if !ok { + owners = make(map[ids.ID]uint64) + lockedProducedAsset[locktime] = owners + } + newAmount, err := math.Add64(owners[ownerID], amount) + if err != nil { + return err + } + owners[ownerID] = newAmount + } + + // Make sure that for each assetID and locktime, tokens produced <= tokens consumed + for assetID, producedAssetAmounts := range lockedProduced { + lockedConsumedAsset := lockedConsumed[assetID] + for locktime, producedAmounts := range producedAssetAmounts { + consumedAmounts := lockedConsumedAsset[locktime] + for ownerID, producedAmount := range producedAmounts { + consumedAmount := consumedAmounts[ownerID] + + if producedAmount > consumedAmount { + increase := producedAmount - consumedAmount + unlockedConsumedAsset := unlockedConsumed[assetID] + if increase > unlockedConsumedAsset { + return fmt.Errorf( + "%w: %s needs %d more %s for locktime %d", + ErrInsufficientLockedFunds, + ownerID, + increase-unlockedConsumedAsset, + assetID, + locktime, + ) + } + unlockedConsumed[assetID] = unlockedConsumedAsset - increase + } + } + } + } + + for assetID, unlockedProducedAsset := range unlockedProduced { + unlockedConsumedAsset := unlockedConsumed[assetID] + // More unlocked tokens produced than consumed. Invalid. + if unlockedProducedAsset > unlockedConsumedAsset { + return fmt.Errorf( + "%w: needs %d more %s", + ErrInsufficientUnlockedFunds, + unlockedProducedAsset-unlockedConsumedAsset, + assetID, + ) + } + } + return nil +} diff --git a/vms/platformvm/utxo/handler_test.go b/vms/platformvm/utxo/verifier_test.go similarity index 99% rename from vms/platformvm/utxo/handler_test.go rename to vms/platformvm/utxo/verifier_test.go index d0224ed4666a..24bb95e024d0 100644 --- a/vms/platformvm/utxo/handler_test.go +++ b/vms/platformvm/utxo/verifier_test.go @@ -41,7 +41,7 @@ func TestVerifySpendUTXOs(t *testing.T) { ctx := snowtest.Context(t, snowtest.PChainID) - h := &handler{ + h := &verifier{ ctx: ctx, clk: &mockable.Clock{}, fx: fx, diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index cdac03ca53db..9ed3dc7cbd4d 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -30,7 +30,6 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -43,9 +42,15 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) const ( @@ -253,21 +258,31 @@ func takeValidatorsSnapshotAtCurrentHeight(vm *VM, validatorsSetByHeightAndSubne } func addSubnetValidator(vm *VM, data *validatorInputData, subnetID ids.ID) (*state.Staker, error) { - addr := keys[0].PublicKey().Address() - signedTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - vm.Config.MinValidatorStake, - uint64(data.startTime.Unix()), - uint64(data.endTime.Unix()), - data.nodeID, - subnetID, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - addr, - nil, + factory := txstest.NewWalletFactory(vm.ctx, &vm.Config, vm.state) + builder, signer := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: data.nodeID, + Start: uint64(data.startTime.Unix()), + End: uint64(data.endTime.Unix()), + Wght: vm.Config.MinValidatorStake, + }, + Subnet: subnetID, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) if err != nil { - return nil, fmt.Errorf("could not create AddSubnetValidatorTx: %w", err) + return nil, fmt.Errorf("could not build AddSubnetValidatorTx: %w", err) + } + tx, err := walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, fmt.Errorf("could not sign AddSubnetValidatorTx: %w", err) } - return internalAddValidator(vm, signedTx) + return internalAddValidator(vm, tx) } func addPrimaryValidatorWithBLSKey(vm *VM, data *validatorInputData) (*state.Staker, error) { @@ -278,27 +293,47 @@ func addPrimaryValidatorWithBLSKey(vm *VM, data *validatorInputData) (*state.Sta return nil, fmt.Errorf("failed to generate BLS key: %w", err) } - signedTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.Config.MinValidatorStake, - uint64(data.startTime.Unix()), - uint64(data.endTime.Unix()), - data.nodeID, + factory := txstest.NewWalletFactory(vm.ctx, &vm.Config, vm.state) + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: data.nodeID, + Start: uint64(data.startTime.Unix()), + End: uint64(data.endTime.Unix()), + Wght: vm.Config.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - addr, + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - addr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) if err != nil { - return nil, fmt.Errorf("could not create AddPermissionlessValidatorTx: %w", err) + return nil, fmt.Errorf("could not build AddPermissionlessValidatorTx: %w", err) + } + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + if err != nil { + return nil, fmt.Errorf("could not sign AddPermissionlessValidatorTx: %w", err) } - return internalAddValidator(vm, signedTx) + return internalAddValidator(vm, tx) } func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { vm.ctx.Lock.Unlock() - err := vm.issueTx(context.Background(), signedTx) + err := vm.issueTxFromRPC(signedTx) vm.ctx.Lock.Lock() if err != nil { @@ -615,20 +650,25 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { UptimeLockedCalculator: uptime.NewLockedCalculator(), SybilProtectionEnabled: true, Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - TransformSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - ApricotPhase3Time: forkTime, - ApricotPhase5Time: forkTime, - BanffTime: forkTime, - CortinaTime: forkTime, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: forkTime, + ApricotPhase5Time: forkTime, + BanffTime: forkTime, + CortinaTime: forkTime, + EUpgradeTime: mockable.MaxTime, + }, }} vm.clock.Set(forkTime.Add(time.Second)) @@ -646,7 +686,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { defer ctx.Lock.Unlock() appSender := &common.SenderTest{} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, []byte) error { + appSender.SendAppGossipF = func(context.Context, common.SendConfig, []byte) error { return nil } @@ -678,18 +718,27 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { // Create a subnet and store it in testSubnet1 // Note: following Banff activation, block acceptance will move // chain time ahead - testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( - 1, // threshold - []ids.ShortID{keys[0].PublicKey().Address()}, - []*secp256k1.PrivateKey{keys[len(keys)-1]}, // pays tx fee - keys[0].PublicKey().Address(), // change addr - nil, + factory := txstest.NewWalletFactory(vm.ctx, &vm.Config, vm.state) + builder, signer := factory.NewWallet(keys[len(keys)-1]) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) if err != nil { return nil, ids.Empty, err } + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + if err != nil { + return nil, ids.Empty, err + } vm.ctx.Lock.Unlock() - err = vm.issueTx(context.Background(), testSubnet1) + err = vm.issueTxFromRPC(testSubnet1) vm.ctx.Lock.Lock() if err != nil { return nil, ids.Empty, err diff --git a/vms/platformvm/validators/manager.go b/vms/platformvm/validators/manager.go index 2c8b025a128b..781d119e226b 100644 --- a/vms/platformvm/validators/manager.go +++ b/vms/platformvm/validators/manager.go @@ -5,11 +5,11 @@ package validators import ( "context" + "errors" "fmt" "time" "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" @@ -30,7 +30,11 @@ const ( recentlyAcceptedWindowTTL = 2 * time.Minute ) -var _ validators.State = (*manager)(nil) +var ( + _ validators.State = (*manager)(nil) + + errUnfinalizedHeight = errors.New("failed to fetch validator set at unfinalized height") +) // Manager adds the ability to introduce newly accepted blocks IDs to the State // interface. @@ -247,7 +251,12 @@ func (m *manager) makePrimaryNetworkValidatorSet( return nil, 0, err } if currentHeight < targetHeight { - return nil, 0, database.ErrNotFound + return nil, 0, fmt.Errorf("%w with SubnetID = %s: current P-chain height (%d) < requested P-Chain height (%d)", + errUnfinalizedHeight, + constants.PrimaryNetworkID, + currentHeight, + targetHeight, + ) } // Rebuild primary network validators at [targetHeight] @@ -295,7 +304,12 @@ func (m *manager) makeSubnetValidatorSet( return nil, 0, err } if currentHeight < targetHeight { - return nil, 0, database.ErrNotFound + return nil, 0, fmt.Errorf("%w with SubnetID = %s: current P-chain height (%d) < requested P-Chain height (%d)", + errUnfinalizedHeight, + subnetID, + currentHeight, + targetHeight, + ) } // Rebuild subnet validators at [targetHeight] diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 912f3619e3e0..8215a54475a4 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -49,7 +49,6 @@ func BenchmarkGetValidatorSet(b *testing.B) { b.TempDir(), nil, logging.NoLog{}, - "", prometheus.NewRegistry(), ) require.NoError(err) @@ -104,7 +103,7 @@ func BenchmarkGetValidatorSet(b *testing.B) { execConfig, err := config.GetExecutionConfig(nil) require.NoError(err) - metrics, err := metrics.New("", prometheus.NewRegistry()) + metrics, err := metrics.New(prometheus.NewRegistry()) require.NoError(err) s, err := state.New( diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 8c4801e06ea0..efbfe0fa5453 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -12,9 +12,9 @@ import ( "time" "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -35,20 +35,20 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/txs/mempool" snowmanblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" + platformvmmetrics "github.com/ava-labs/avalanchego/vms/platformvm/metrics" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + pmempool "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" pvalidators "github.com/ava-labs/avalanchego/vms/platformvm/validators" ) @@ -62,11 +62,10 @@ var ( type VM struct { config.Config blockbuilder.Builder - network.Network + *network.Network validators.State - metrics metrics.Metrics - atomicUtxosManager avax.AtomicUTXOManager + metrics platformvmmetrics.Metrics // Used to get time. Useful for faking time during tests. clock mockable.Clock @@ -85,16 +84,12 @@ type VM struct { // Bootstrapped remembers if this chain has finished bootstrapping or not bootstrapped utils.Atomic[bool] - txBuilder txbuilder.Builder - manager blockexecutor.Manager + manager blockexecutor.Manager // Cancelled on shutdown onShutdownCtx context.Context // Call [onShutdownCtxCancel] to cancel [onShutdownCtx] during Shutdown() onShutdownCtxCancel context.CancelFunc - - // TODO: Remove after v1.11.x is activated - pruned utils.Atomic[bool] } // Initialize this blockchain. @@ -118,13 +113,13 @@ func (vm *VM) Initialize( } chainCtx.Log.Info("using VM execution config", zap.Reflect("config", execConfig)) - registerer := prometheus.NewRegistry() - if err := chainCtx.Metrics.Register(registerer); err != nil { + registerer, err := metrics.MakeAndRegister(chainCtx.Metrics, "") + if err != nil { return err } // Initialize metrics as soon as possible - vm.metrics, err = metrics.New("", registerer) + vm.metrics, err = platformvmmetrics.New(registerer) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } @@ -133,7 +128,7 @@ func (vm *VM) Initialize( vm.db = db // Note: this codec is never used to serialize anything - vm.codecRegistry = linearcodec.NewDefault(time.Time{}) + vm.codecRegistry = linearcodec.NewDefault() vm.fx = &secp256k1fx.Fx{} if err := vm.fx.Initialize(vm); err != nil { return err @@ -157,33 +152,22 @@ func (vm *VM) Initialize( validatorManager := pvalidators.NewManager(chainCtx.Log, vm.Config, vm.state, vm.metrics, &vm.clock) vm.State = validatorManager - vm.atomicUtxosManager = avax.NewAtomicUTXOManager(chainCtx.SharedMemory, txs.Codec) - utxoHandler := utxo.NewHandler(vm.ctx, &vm.clock, vm.fx) + utxoVerifier := utxo.NewVerifier(vm.ctx, &vm.clock, vm.fx) vm.uptimeManager = uptime.NewManager(vm.state, &vm.clock) vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) - vm.txBuilder = txbuilder.New( - vm.ctx, - &vm.Config, - &vm.clock, - vm.fx, - vm.state, - vm.atomicUtxosManager, - utxoHandler, - ) - txExecutorBackend := &txexecutor.Backend{ Config: &vm.Config, Ctx: vm.ctx, Clk: &vm.clock, Fx: vm.fx, - FlowChecker: utxoHandler, + FlowChecker: utxoVerifier, Uptimes: vm.uptimeManager, Rewards: rewards, Bootstrapped: &vm.bootstrapped, } - mempool, err := mempool.New("mempool", registerer, toEngine) + mempool, err := pmempool.New("mempool", registerer, toEngine) if err != nil { return fmt.Errorf("failed to create mempool: %w", err) } @@ -219,7 +203,8 @@ func (vm *VM) Initialize( vm.onShutdownCtx, vm.onShutdownCtxCancel = context.WithCancel(context.Background()) // TODO: Wait for this goroutine to exit during Shutdown once the platformvm // has better control of the context lock. - go vm.Network.Gossip(vm.onShutdownCtx) + go vm.Network.PushGossip(vm.onShutdownCtx) + go vm.Network.PullGossip(vm.onShutdownCtx) vm.Builder = blockbuilder.New( mempool, @@ -247,28 +232,13 @@ func (vm *VM) Initialize( // [periodicallyPruneMempool] grabs the context lock. go vm.periodicallyPruneMempool(execConfig.MempoolPruneFrequency) - shouldPrune, err := vm.state.ShouldPrune() - if err != nil { - return fmt.Errorf( - "failed to check if the database should be pruned: %w", - err, - ) - } - if !shouldPrune { - chainCtx.Log.Info("state already pruned and indexed") - vm.pruned.Set(true) - return nil - } - go func() { - err := vm.state.PruneAndIndex(&vm.ctx.Lock, vm.ctx.Log) + err := vm.state.ReindexBlocks(&vm.ctx.Lock, vm.ctx.Log) if err != nil { - vm.ctx.Log.Error("state pruning and height indexing failed", + vm.ctx.Log.Warn("reindexing blocks failed", zap.Error(err), ) } - - vm.pruned.Set(true) }() return nil @@ -332,12 +302,12 @@ func (vm *VM) initBlockchains() error { } } } else { - subnets, err := vm.state.GetSubnets() + subnetIDs, err := vm.state.GetSubnetIDs() if err != nil { return err } - for _, subnet := range subnets { - if err := vm.createSubnet(subnet.ID()); err != nil { + for _, subnetID := range subnetIDs { + if err := vm.createSubnet(subnetID); err != nil { return err } } @@ -384,7 +354,7 @@ func (vm *VM) onNormalOperationsStarted() error { } vl := validators.NewLogger(vm.ctx.Log, constants.PrimaryNetworkID, vm.ctx.NodeID) - vm.Validators.RegisterCallbackListener(constants.PrimaryNetworkID, vl) + vm.Validators.RegisterSetCallbackListener(constants.PrimaryNetworkID, vl) for subnetID := range vm.TrackedSubnets { vdrIDs := vm.Validators.GetValidatorIDs(subnetID) @@ -393,7 +363,7 @@ func (vm *VM) onNormalOperationsStarted() error { } vl := validators.NewLogger(vm.ctx.Log, subnetID, vm.ctx.NodeID) - vm.Validators.RegisterCallbackListener(subnetID, vl) + vm.Validators.RegisterSetCallbackListener(subnetID, vl) } if err := vm.state.Commit(); err != nil { @@ -502,19 +472,25 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { }, err } -func (vm *VM) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { - return vm.uptimeManager.Connect(nodeID, constants.PrimaryNetworkID) +func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + if err := vm.uptimeManager.Connect(nodeID, constants.PrimaryNetworkID); err != nil { + return err + } + return vm.Network.Connected(ctx, nodeID, version) } func (vm *VM) ConnectedSubnet(_ context.Context, nodeID ids.NodeID, subnetID ids.ID) error { return vm.uptimeManager.Connect(nodeID, subnetID) } -func (vm *VM) Disconnected(_ context.Context, nodeID ids.NodeID) error { +func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { if err := vm.uptimeManager.Disconnect(nodeID); err != nil { return err } - return vm.state.Commit() + if err := vm.state.Commit(); err != nil { + return err + } + return vm.Network.Disconnected(ctx, nodeID) } func (vm *VM) CodecRegistry() codec.Registry { @@ -529,20 +505,12 @@ func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } -func (vm *VM) VerifyHeightIndex(_ context.Context) error { - if vm.pruned.Get() { - return nil - } - - return snowmanblock.ErrIndexIncomplete -} - func (vm *VM) GetBlockIDAtHeight(_ context.Context, height uint64) (ids.ID, error) { return vm.state.GetBlockIDAtHeight(height) } -func (vm *VM) issueTx(ctx context.Context, tx *txs.Tx) error { - err := vm.Network.IssueTx(ctx, tx) +func (vm *VM) issueTxFromRPC(tx *txs.Tx) error { + err := vm.Network.IssueTxFromRPC(tx) if err != nil && !errors.Is(err, mempool.ErrDuplicateTx) { vm.ctx.Log.Debug("failed to add tx to mempool", zap.Stringer("txID", tx.ID()), diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index e612340546fe..629be17fe1ba 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -44,14 +44,18 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -62,22 +66,31 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { changeAddr := keys[0].PublicKey().Address() // create valid tx - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - changeAddr, + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -98,21 +111,30 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { firstDelegatorEndTime := firstDelegatorStartTime.Add(vm.MinStakeDuration) // create valid tx - addFirstDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - 4*vm.MinValidatorStake, // maximum amount of stake this delegator can provide - uint64(firstDelegatorStartTime.Unix()), - uint64(firstDelegatorEndTime.Unix()), - nodeID, - changeAddr, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uDelTx1, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(firstDelegatorStartTime.Unix()), + End: uint64(firstDelegatorEndTime.Unix()), + Wght: 4 * vm.MinValidatorStake, // maximum amount of stake this delegator can provide + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addFirstDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx1) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addFirstDelegatorTx)) vm.ctx.Lock.Lock() addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -135,21 +157,30 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { vm.clock.Set(secondDelegatorStartTime.Add(-10 * executor.SyncBound)) // create valid tx - addSecondDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - vm.MinDelegatorStake, - uint64(secondDelegatorStartTime.Unix()), - uint64(secondDelegatorEndTime.Unix()), - nodeID, - changeAddr, - []*secp256k1.PrivateKey{keys[0], keys[1], keys[3]}, - changeAddr, - nil, + builder, txSigner = factory.NewWallet(keys[0], keys[1], keys[3]) + uDelTx2, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(secondDelegatorStartTime.Unix()), + End: uint64(secondDelegatorEndTime.Unix()), + Wght: vm.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addSecondDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx2) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addSecondDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addSecondDelegatorTx)) vm.ctx.Lock.Lock() addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -162,21 +193,30 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { thirdDelegatorEndTime := thirdDelegatorStartTime.Add(vm.MinStakeDuration) // create valid tx - addThirdDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - vm.MinDelegatorStake, - uint64(thirdDelegatorStartTime.Unix()), - uint64(thirdDelegatorEndTime.Unix()), - nodeID, - changeAddr, - []*secp256k1.PrivateKey{keys[0], keys[1], keys[4]}, - changeAddr, - nil, + builder, txSigner = factory.NewWallet(keys[0], keys[1], keys[4]) + uDelTx3, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(thirdDelegatorStartTime.Unix()), + End: uint64(thirdDelegatorEndTime.Unix()), + Wght: vm.MinDelegatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addThirdDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx3) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - err = vm.issueTx(context.Background(), addThirdDelegatorTx) + err = vm.issueTxFromRPC(addThirdDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) vm.ctx.Lock.Lock() } @@ -220,8 +260,8 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, apricotPhase3) - vm.ApricotPhase3Time = test.ap3Time + vm, factory, _, _ := defaultVM(t, apricotPhase3) + vm.UpgradeConfig.ApricotPhase3Time = test.ap3Time vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -234,22 +274,31 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { changeAddr := keys[0].PublicKey().Address() // create valid tx - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( - validatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - id, + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: validatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // issue the add validator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -260,21 +309,29 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addFirstDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - delegator1Stake, - uint64(delegator1StartTime.Unix()), - uint64(delegator1EndTime.Unix()), - nodeID, - keys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uDelTx1, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(delegator1StartTime.Unix()), + End: uint64(delegator1EndTime.Unix()), + Wght: delegator1Stake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addFirstDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx1) + require.NoError(err) // issue the first add delegator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addFirstDelegatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the first add delegator tx @@ -285,21 +342,29 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addSecondDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - delegator2Stake, - uint64(delegator2StartTime.Unix()), - uint64(delegator2EndTime.Unix()), - nodeID, - keys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uDelTx2, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(delegator2StartTime.Unix()), + End: uint64(delegator2EndTime.Unix()), + Wght: delegator2Stake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addSecondDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx2) + require.NoError(err) // issue the second add delegator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addSecondDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addSecondDelegatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the second add delegator tx @@ -310,21 +375,29 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addThirdDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - delegator3Stake, - uint64(delegator3StartTime.Unix()), - uint64(delegator3EndTime.Unix()), - nodeID, - keys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uDelTx3, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(delegator3StartTime.Unix()), + End: uint64(delegator3EndTime.Unix()), + Wght: delegator3Stake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addThirdDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx3) + require.NoError(err) // issue the third add delegator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addThirdDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addThirdDelegatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the third add delegator tx @@ -335,21 +408,29 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addFourthDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - delegator4Stake, - uint64(delegator4StartTime.Unix()), - uint64(delegator4EndTime.Unix()), - nodeID, - keys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uDelTx4, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(delegator4StartTime.Unix()), + End: uint64(delegator4EndTime.Unix()), + Wght: delegator4Stake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addFourthDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx4) + require.NoError(err) // issue the fourth add delegator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addFourthDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addFourthDelegatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the fourth add delegator tx @@ -377,9 +458,12 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: mockable.MaxTime, - DurangoTime: mockable.MaxTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: mockable.MaxTime, + DurangoTime: mockable.MaxTime, + EUpgradeTime: mockable.MaxTime, + }, }} ctx := snowtest.Context(t, snowtest.PChainID) @@ -416,32 +500,55 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { addr0 := key0.PublicKey().Address() addr1 := key1.PublicKey().Address() - addSubnetTx0, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{addr0}, - []*secp256k1.PrivateKey{key0}, - addr0, - nil, + factory := txstest.NewWalletFactory( + vm.ctx, + &vm.Config, + vm.state, ) + + builder, txSigner := factory.NewWallet(key0) + utx0, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr0}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr0}, + }), + ) + require.NoError(err) + addSubnetTx0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx0) require.NoError(err) - addSubnetTx1, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{addr1}, - []*secp256k1.PrivateKey{key1}, - addr1, - nil, + builder, txSigner = factory.NewWallet(key1) + utx1, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr1}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr1}, + }), ) require.NoError(err) + addSubnetTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx1) + require.NoError(err) - addSubnetTx2, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{addr1}, - []*secp256k1.PrivateKey{key1}, - addr0, - nil, + utx2, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr1}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr0}, + }), ) require.NoError(err) + addSubnetTx2, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx2) + require.NoError(err) preferredID := vm.manager.Preferred() preferred, err := vm.manager.GetBlock(preferredID) @@ -495,7 +602,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM(t, cortina) + vm, factory, baseDB, mutableSharedMemory := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -504,18 +611,23 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { newValidatorEndTime := newValidatorStartTime.Add(defaultMinStakingDuration) // Create the tx to add a new validator - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(newValidatorStartTime.Unix()), - uint64(newValidatorEndTime.Unix()), - nodeID, - ids.GenerateTestShortID(), + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(newValidatorStartTime.Unix()), + End: uint64(newValidatorEndTime.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Create the standard block to add the new validator preferredID := vm.manager.Preferred() @@ -553,7 +665,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { ID: vm.ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, OutputOwners: secp256k1fx.OutputOwners{}, }, } @@ -570,7 +682,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { UTXOID: utxo.UTXOID, Asset: utxo.Asset, In: &secp256k1fx.TransferInput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, }, }, }, @@ -701,7 +813,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM(t, cortina) + vm, factory, baseDB, mutableSharedMemory := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -713,18 +825,23 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() // Create the tx to add the first new validator - addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( - vm.MaxValidatorStake, - uint64(newValidatorStartTime0.Unix()), - uint64(newValidatorEndTime0.Unix()), - nodeID0, - ids.GenerateTestShortID(), + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID0, + Start: uint64(newValidatorStartTime0.Unix()), + End: uint64(newValidatorEndTime0.Unix()), + Wght: vm.MaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addValidatorTx0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Create the standard block to add the first new validator preferredID := vm.manager.Preferred() @@ -797,7 +914,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { ID: vm.ctx.AVAXAssetID, }, Out: &secp256k1fx.TransferOutput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, OutputOwners: secp256k1fx.OutputOwners{}, }, } @@ -814,7 +931,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { UTXOID: utxo.UTXOID, Asset: utxo.Asset, In: &secp256k1fx.TransferInput{ - Amt: vm.TxFee, + Amt: vm.StaticFeeConfig.TxFee, }, }, }, @@ -886,18 +1003,23 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { nodeID1 := ids.GenerateTestNodeID() // Create the tx to add the second new validator - addValidatorTx1, err := vm.txBuilder.NewAddValidatorTx( - vm.MaxValidatorStake, - uint64(newValidatorStartTime1.Unix()), - uint64(newValidatorEndTime1.Unix()), - nodeID1, - ids.GenerateTestShortID(), + builder, txSigner = factory.NewWallet(keys[1]) + utx1, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID1, + Start: uint64(newValidatorStartTime1.Unix()), + End: uint64(newValidatorEndTime1.Unix()), + Wght: vm.MaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addValidatorTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx1) + require.NoError(err) // Create the standard block to add the second new validator preferredChainTime = importBlk.Timestamp() @@ -1015,7 +1137,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1042,18 +1164,23 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { extraNodeID := ids.GenerateTestNodeID() // Create the tx to add the first new validator - addValidatorTx0, err := vm.txBuilder.NewAddValidatorTx( - vm.MaxValidatorStake, - uint64(newValidatorStartTime0.Unix()), - uint64(newValidatorEndTime0.Unix()), - extraNodeID, - ids.GenerateTestShortID(), + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: extraNodeID, + Start: uint64(newValidatorStartTime0.Unix()), + End: uint64(newValidatorEndTime0.Unix()), + Wght: vm.MaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.GenerateTestShortID(), - nil, ) require.NoError(err) + addValidatorTx0, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // Create the standard block to add the first new validator preferredID := vm.manager.Preferred() @@ -1152,7 +1279,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2EndTime := delegator2StartTime.Add(3 * defaultMinStakingDuration) delegator2Stake := defaultMaxValidatorStake - validatorStake - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1164,22 +1291,31 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { changeAddr := keys[0].PublicKey().Address() // create valid tx - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( - validatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - id, + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: validatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // issue the add validator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1190,21 +1326,29 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addFirstDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - delegator1Stake, - uint64(delegator1StartTime.Unix()), - uint64(delegator1EndTime.Unix()), - nodeID, - keys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uDelTx, err := builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(delegator1StartTime.Unix()), + End: uint64(delegator1EndTime.Unix()), + Wght: delegator1Stake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addFirstDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx) + require.NoError(err) // issue the first add delegator tx vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addFirstDelegatorTx)) + require.NoError(vm.issueTxFromRPC(addFirstDelegatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the first add delegator tx @@ -1215,22 +1359,30 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx - addSecondDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( - delegator2Stake, - uint64(delegator2StartTime.Unix()), - uint64(delegator2EndTime.Unix()), - nodeID, - keys[0].PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uDelTx, err = builder.NewAddDelegatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(delegator2StartTime.Unix()), + End: uint64(delegator2EndTime.Unix()), + Wght: delegator2Stake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addSecondDelegatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uDelTx) + require.NoError(err) // attempting to issue the second add delegator tx should fail because the // total stake weight would go over the limit. vm.ctx.Lock.Unlock() - err = vm.issueTx(context.Background(), addSecondDelegatorTx) + err = vm.issueTxFromRPC(addSecondDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) vm.ctx.Lock.Lock() } @@ -1241,7 +1393,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1252,21 +1404,30 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( - defaultMaxValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - id, + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultMaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1276,17 +1437,22 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(addValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{changeAddr}, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uSubnetTx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uSubnetTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + require.NoError(vm.issueTxFromRPC(createSubnetTx)) vm.ctx.Lock.Lock() // trigger block creation for the subnet tx @@ -1296,20 +1462,27 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - defaultMaxValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uSubnetValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultMaxValidatorStake, + }, + Subnet: createSubnetTx.ID(), + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addSubnetValidatorTx)) + require.NoError(vm.issueTxFromRPC(addSubnetValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1327,21 +1500,24 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(err) require.Empty(emptyValidatorSet) - removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( + uRemoveSubnetValTx, err := builder.NewRemoveSubnetValidatorTx( nodeID, createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + removeSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uRemoveSubnetValTx) + require.NoError(err) // Set the clock so that the validator will be moved from the pending // validator set into the current validator set. vm.clock.Set(validatorStartTime) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), removeSubnetValidatorTx)) + require.NoError(vm.issueTxFromRPC(removeSubnetValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1366,7 +1542,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t validatorStartTime := latestForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1377,21 +1553,30 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t nodeID := ids.GenerateTestNodeID() changeAddr := keys[0].PublicKey().Address() - addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( - defaultMaxValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - id, + builder, txSigner := factory.NewWallet(keys[0], keys[1]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultMaxValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1401,17 +1586,22 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(addValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{changeAddr}, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uCreateSubnetTx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + require.NoError(vm.issueTxFromRPC(createSubnetTx)) vm.ctx.Lock.Lock() // trigger block creation for the subnet tx @@ -1421,20 +1611,27 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - defaultMaxValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultMaxValidatorStake, + }, + Subnet: createSubnetTx.ID(), + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + addSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addSubnetValidatorTx)) + require.NoError(vm.issueTxFromRPC(addSubnetValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1444,21 +1641,24 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t require.NoError(addSubnetValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( + uRemoveSubnetValTx, err := builder.NewRemoveSubnetValidatorTx( nodeID, createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0], keys[1]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + removeSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uRemoveSubnetValTx) + require.NoError(err) // Set the clock so that the validator will be moved from the pending // validator set into the current validator set. vm.clock.Set(validatorStartTime) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), removeSubnetValidatorTx)) + require.NoError(vm.issueTxFromRPC(removeSubnetValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -1474,7 +1674,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1504,23 +1704,39 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) // build primary network validator with BLS key - primaryTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.MinValidatorStake, - uint64(primaryStartTime.Unix()), - uint64(primaryEndTime.Unix()), - nodeID, + builder, txSigner := factory.NewWallet(keys...) + uPrimaryTx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryStartTime.Unix()), + End: uint64(primaryEndTime.Unix()), + Wght: vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk1), - addr, // reward address + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - keys, - addr, // change address - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) - uPrimaryTx := primaryTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + primaryTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryTx)) + require.NoError(vm.issueTxFromRPC(primaryTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1537,20 +1753,28 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) // insert the subnet validator - subnetTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetStartTime.Unix()), // Start time - uint64(subnetEndTime.Unix()), // end time - nodeID, // Node ID - subnetID, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - addr, - nil, + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(subnetStartTime.Unix()), + End: uint64(subnetEndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), subnetTx)) + require.NoError(vm.issueTxFromRPC(subnetTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1610,23 +1834,39 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require.NoError(err) require.NotEqual(sk1, sk2) - primaryRestartTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.MinValidatorStake, - uint64(primaryReStartTime.Unix()), - uint64(primaryReEndTime.Unix()), - nodeID, + builder, txSigner = factory.NewWallet(keys...) + uPrimaryRestartTx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryReStartTime.Unix()), + End: uint64(primaryReEndTime.Unix()), + Wght: vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk2), - addr, // reward address + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - keys, - addr, // change address - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) - uPrimaryRestartTx := primaryRestartTx.Unsigned.(*txs.AddPermissionlessValidatorTx) + primaryRestartTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryRestartTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + require.NoError(vm.issueTxFromRPC(primaryRestartTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1699,7 +1939,7 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1719,21 +1959,31 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // Add a primary network validator with no BLS key nodeID := ids.GenerateTestNodeID() addr := keys[0].PublicKey().Address() - primaryTx1, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(primaryStartTime1.Unix()), - uint64(primaryEndTime1.Unix()), - nodeID, - addr, + + builder, txSigner := factory.NewWallet(keys[0]) + uAddValTx1, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryStartTime1.Unix()), + End: uint64(primaryEndTime1.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - addr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + primaryTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx1) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryTx1)) + require.NoError(vm.issueTxFromRPC(primaryTx1)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1780,22 +2030,39 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { sk2, err := bls.NewSecretKey() require.NoError(err) - primaryRestartTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.MinValidatorStake, - uint64(primaryStartTime2.Unix()), - uint64(primaryEndTime2.Unix()), - nodeID, + builder, txSigner = factory.NewWallet(keys...) + uPrimaryRestartTx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryStartTime2.Unix()), + End: uint64(primaryEndTime2.Unix()), + Wght: vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk2), - addr, // reward address + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - keys, - addr, // change address - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + primaryRestartTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryRestartTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + require.NoError(vm.issueTxFromRPC(primaryRestartTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1828,7 +2095,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1852,21 +2119,31 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // Add a primary network validator with no BLS key nodeID := ids.GenerateTestNodeID() addr := keys[0].PublicKey().Address() - primaryTx1, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(primaryStartTime1.Unix()), - uint64(primaryEndTime1.Unix()), - nodeID, - addr, + + builder, txSigner := factory.NewWallet(keys[0]) + uPrimaryTx1, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryStartTime1.Unix()), + End: uint64(primaryEndTime1.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - addr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + primaryTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryTx1) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryTx1)) + require.NoError(vm.issueTxFromRPC(primaryTx1)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1883,20 +2160,28 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require.NoError(err) // insert the subnet validator - subnetTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetStartTime.Unix()), // Start time - uint64(subnetEndTime.Unix()), // end time - nodeID, // Node ID - subnetID, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - addr, - nil, + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(subnetStartTime.Unix()), + End: uint64(subnetEndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), subnetTx)) + require.NoError(vm.issueTxFromRPC(subnetTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -1955,22 +2240,39 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { sk2, err := bls.NewSecretKey() require.NoError(err) - primaryRestartTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.MinValidatorStake, - uint64(primaryStartTime2.Unix()), - uint64(primaryEndTime2.Unix()), - nodeID, + builder, txSigner = factory.NewWallet(keys...) + uPrimaryRestartTx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryStartTime2.Unix()), + End: uint64(primaryEndTime2.Unix()), + Wght: vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk2), - addr, // reward address + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - keys, - addr, // change address - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + primaryRestartTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryRestartTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryRestartTx)) + require.NoError(vm.issueTxFromRPC(primaryRestartTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -2012,7 +2314,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2034,21 +2336,31 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { // Add a primary network validator with no BLS key nodeID := ids.GenerateTestNodeID() addr := keys[0].PublicKey().Address() - primaryTx1, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(primaryStartTime1.Unix()), - uint64(primaryEndTime1.Unix()), - nodeID, - addr, + + builder, txSigner := factory.NewWallet(keys[0]) + uPrimaryTx1, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(primaryStartTime1.Unix()), + End: uint64(primaryEndTime1.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - addr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + primaryTx1, err := walletsigner.SignUnsigned(context.Background(), txSigner, uPrimaryTx1) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), primaryTx1)) + require.NoError(vm.issueTxFromRPC(primaryTx1)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -2062,20 +2374,28 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { require.NoError(err) // insert the subnet validator - subnetTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - 1, // Weight - uint64(subnetStartTime.Unix()), // Start time - uint64(subnetEndTime.Unix()), // end time - nodeID, // Node ID - subnetID, - []*secp256k1.PrivateKey{keys[0], keys[1]}, - addr, - nil, + builder, txSigner = factory.NewWallet(keys[0], keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(subnetStartTime.Unix()), + End: uint64(subnetEndTime.Unix()), + Wght: 1, + }, + Subnet: subnetID, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }), ) require.NoError(err) + subnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), subnetTx)) + require.NoError(vm.issueTxFromRPC(subnetTx)) vm.ctx.Lock.Lock() require.NoError(buildAndAcceptStandardBlock(vm)) @@ -2132,7 +2452,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { func TestValidatorSetRaceCondition(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, _, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2235,7 +2555,7 @@ func checkValidatorBlsKeyIsSet( return errors.New("unexpected BLS key") case expectedBlsKey != nil && val.PublicKey == nil: return errors.New("missing BLS key") - case !bytes.Equal(bls.SerializePublicKey(expectedBlsKey), bls.SerializePublicKey(val.PublicKey)): + case !bytes.Equal(bls.PublicKeyToUncompressedBytes(expectedBlsKey), bls.PublicKeyToUncompressedBytes(val.PublicKey)): return errors.New("incorrect BLS key") default: return nil diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 30de31201c20..9153ee6548bf 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -6,7 +6,6 @@ package platformvm import ( "bytes" "context" - "fmt" "testing" "time" @@ -20,12 +19,11 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" - "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/bootstrap" "github.com/ava-labs/avalanchego/snow/networking/benchlist" @@ -59,16 +57,22 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/fee" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/txstest" + "github.com/ava-labs/avalanchego/vms/platformvm/upgrade" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" smcon "github.com/ava-labs/avalanchego/snow/consensus/snowman" smeng "github.com/ava-labs/avalanchego/snow/engine/snowman" snowgetter "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" blockbuilder "github.com/ava-labs/avalanchego/vms/platformvm/block/builder" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" + walletbuilder "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) const ( @@ -77,6 +81,7 @@ const ( banff cortina durango + eUpgrade latestFork = durango @@ -201,7 +206,7 @@ func defaultGenesis(t *testing.T, avaxAssetID ids.ID) (*api.BuildGenesisArgs, [] return &buildGenesisArgs, genesisBytes } -func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemory) { +func defaultVM(t *testing.T, f fork) (*VM, *txstest.WalletFactory, database.Database, *mutableSharedMemory) { require := require.New(t) var ( apricotPhase3Time = mockable.MaxTime @@ -209,12 +214,16 @@ func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemo banffTime = mockable.MaxTime cortinaTime = mockable.MaxTime durangoTime = mockable.MaxTime + eUpgradeTime = mockable.MaxTime ) // always reset latestForkTime (a package level variable) // to ensure test independence latestForkTime = defaultGenesisTime.Add(time.Second) switch f { + case eUpgrade: + eUpgradeTime = latestForkTime + fallthrough case durango: durangoTime = latestForkTime fallthrough @@ -230,7 +239,7 @@ func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemo case apricotPhase3: apricotPhase3Time = latestForkTime default: - require.NoError(fmt.Errorf("unhandled fork %d", f)) + require.FailNow("unhandled fork", f) } vm := &VM{Config: config.Config{ @@ -238,21 +247,26 @@ func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemo UptimeLockedCalculator: uptime.NewLockedCalculator(), SybilProtectionEnabled: true, Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - TransformSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - ApricotPhase3Time: apricotPhase3Time, - ApricotPhase5Time: apricotPhase5Time, - BanffTime: banffTime, - CortinaTime: cortinaTime, - DurangoTime: durangoTime, + StaticFeeConfig: fee.StaticConfig{ + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + }, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + UpgradeConfig: upgrade.Config{ + ApricotPhase3Time: apricotPhase3Time, + ApricotPhase5Time: apricotPhase5Time, + BanffTime: banffTime, + CortinaTime: cortinaTime, + DurangoTime: durangoTime, + EUpgradeTime: eUpgradeTime, + }, }} db := memdb.New() @@ -274,7 +288,7 @@ func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemo _, genesisBytes := defaultGenesis(t, ctx.AVAXAssetID) appSender := &common.SenderTest{} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func(context.Context, []byte) error { + appSender.SendAppGossipF = func(context.Context, common.SendConfig, []byte) error { return nil } appSender.SendAppErrorF = func(context.Context, ids.NodeID, uint32, int32, string) error { @@ -299,21 +313,36 @@ func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemo require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + factory := txstest.NewWalletFactory( + ctx, + &vm.Config, + vm.state, + ) + // Create a subnet and store it in testSubnet1 // Note: following Banff activation, block acceptance will move // chain time ahead - var err error - testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( - 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet - // control keys are keys[0], keys[1], keys[2] - []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, - []*secp256k1.PrivateKey{keys[0]}, // pays tx fee - keys[0].PublicKey().Address(), // change addr - nil, + builder, signer := factory.NewWallet(keys[0]) + utx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ + keys[0].PublicKey().Address(), + keys[1].PublicKey().Address(), + keys[2].PublicKey().Address(), + }, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + testSubnet1, err = walletsigner.SignUnsigned(context.Background(), signer, utx) + require.NoError(err) + vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), testSubnet1)) + require.NoError(vm.issueTxFromRPC(testSubnet1)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -328,13 +357,13 @@ func defaultVM(t *testing.T, f fork) (*VM, database.Database, *mutableSharedMemo require.NoError(vm.Shutdown(context.Background())) }) - return vm, db, msm + return vm, factory, db, msm } // Ensure genesis state is parsed from bytes and stored correctly func TestGenesis(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, _, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -367,7 +396,7 @@ func TestGenesis(t *testing.T) { require.NoError(err) require.Equal(utxo.Address, addr) - require.Equal(uint64(utxo.Amount)-vm.CreateSubnetTxFee, out.Amount()) + require.Equal(uint64(utxo.Amount)-vm.StaticFeeConfig.CreateSubnetTxFee, out.Amount()) } } @@ -387,7 +416,7 @@ func TestGenesis(t *testing.T) { // accept proposal to add validator to primary network func TestAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -402,23 +431,36 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) // create valid tx - tx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.MinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - rewardAddress, + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), tx)) + require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) @@ -439,7 +481,7 @@ func TestAddValidatorCommit(t *testing.T) { // verify invalid attempt to add validator to primary network func TestInvalidAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -448,18 +490,23 @@ func TestInvalidAddValidatorCommit(t *testing.T) { endTime := startTime.Add(defaultMinStakingDuration) // create invalid tx - tx, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - ids.GenerateTestShortID(), + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) preferredID := vm.manager.Preferred() preferred, err := vm.manager.GetBlock(preferredID) @@ -490,7 +537,7 @@ func TestInvalidAddValidatorCommit(t *testing.T) { // Reject attempt to add validator to primary network func TestAddValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, cortina) + vm, factory, _, _ := defaultVM(t, cortina) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -502,22 +549,27 @@ func TestAddValidatorReject(t *testing.T) { ) // create valid tx - tx, err := vm.txBuilder.NewAddValidatorTx( - vm.MinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - rewardAddress, + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddValidatorTx( + &txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: vm.MinValidatorStake, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardAddress}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), tx)) + require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) @@ -536,7 +588,7 @@ func TestAddValidatorReject(t *testing.T) { // Reject proposal to add validator to primary network func TestAddValidatorInvalidNotReissued(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -550,23 +602,36 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { require.NoError(err) // create valid tx - tx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - vm.MinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - repeatNodeID, + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: repeatNodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: vm.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - ids.GenerateTestShortID(), + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - err = vm.issueTx(context.Background(), tx) + err = vm.issueTxFromRPC(tx) vm.ctx.Lock.Lock() require.ErrorIs(err, txexecutor.ErrDuplicateValidator) } @@ -574,7 +639,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { // Accept proposal to add validator to subnet func TestAddSubnetValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -587,21 +652,25 @@ func TestAddSubnetValidatorAccept(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := vm.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, + builder, txSigner := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), tx)) + require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) @@ -622,7 +691,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { // Reject proposal to add validator to subnet func TestAddSubnetValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -635,21 +704,25 @@ func TestAddSubnetValidatorReject(t *testing.T) { // create valid tx // note that [startTime, endTime] is a subset of time that keys[0] // validates primary network ([defaultValidateStartTime, defaultValidateEndTime]) - tx, err := vm.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - testSubnet1.ID(), - []*secp256k1.PrivateKey{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, - ids.ShortEmpty, // change addr - nil, + builder, txSigner := factory.NewWallet(testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]) + utx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: defaultWeight, + }, + Subnet: testSubnet1.ID(), + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) // trigger block creation vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), tx)) + require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) @@ -669,7 +742,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { // Test case where primary network validator rewarded func TestRewardValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, _, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -737,7 +810,7 @@ func TestRewardValidatorAccept(t *testing.T) { // Test case where primary network validator not rewarded func TestRewardValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, _, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -807,7 +880,7 @@ func TestRewardValidatorReject(t *testing.T) { // Ensure BuildBlock errors when there is no block to build func TestUnneededBuildBlock(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, _, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -818,24 +891,24 @@ func TestUnneededBuildBlock(t *testing.T) { // test acceptance of proposal to create a new chain func TestCreateChain(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - tx, err := vm.txBuilder.NewCreateChainTx( + builder, txSigner := factory.NewWallet(testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]) + utx, err := builder.NewCreateChainTx( testSubnet1.ID(), nil, ids.ID{'t', 'e', 's', 't', 'v', 'm'}, nil, "name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, // change addr - nil, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), tx)) + require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) @@ -868,25 +941,31 @@ func TestCreateChain(t *testing.T) { // 3) Advance timestamp to validator's end time (removing validator from current) func TestCreateSubnet(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - nodeID := genesisNodeIDs[0] - createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( - 1, // threshold - []ids.ShortID{ // control keys - keys[0].PublicKey().Address(), - keys[1].PublicKey().Address(), + builder, txSigner := factory.NewWallet(keys[0]) + uCreateSubnetTx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].PublicKey().Address(), + keys[1].PublicKey().Address(), + }, }, - []*secp256k1.PrivateKey{keys[0]}, // payer - keys[0].PublicKey().Address(), // change addr - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) + subnetID := createSubnetTx.ID() vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + require.NoError(vm.issueTxFromRPC(createSubnetTx)) vm.ctx.Lock.Lock() // should contain the CreateSubnetTx @@ -897,40 +976,36 @@ func TestCreateSubnet(t *testing.T) { require.NoError(blk.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - _, txStatus, err := vm.state.GetTx(createSubnetTx.ID()) + _, txStatus, err := vm.state.GetTx(subnetID) require.NoError(err) require.Equal(status.Committed, txStatus) - subnets, err := vm.state.GetSubnets() + subnetIDs, err := vm.state.GetSubnetIDs() require.NoError(err) - - found := false - for _, subnet := range subnets { - if subnet.ID() == createSubnetTx.ID() { - found = true - break - } - } - require.True(found) + require.Contains(subnetIDs, subnetID) // Now that we've created a new subnet, add a validator to that subnet + nodeID := genesisNodeIDs[0] startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) // [startTime, endTime] is subset of time keys[0] validates default subnet so tx is valid - addValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - defaultWeight, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - nodeID, - createSubnetTx.ID(), - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, + uAddValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: defaultWeight, + }, + Subnet: subnetID, + }, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() blk, err = vm.Builder.BuildBlock(context.Background()) // should add validator to the new subnet @@ -945,10 +1020,10 @@ func TestCreateSubnet(t *testing.T) { require.NoError(err) require.Equal(status.Committed, txStatus) - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetPendingValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) - _, err = vm.state.GetCurrentValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.NoError(err) // fast forward clock to time validator should stop validating @@ -958,17 +1033,17 @@ func TestCreateSubnet(t *testing.T) { require.NoError(blk.Verify(context.Background())) require.NoError(blk.Accept(context.Background())) // remove validator from current validator set - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetPendingValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) - _, err = vm.state.GetCurrentValidator(createSubnetTx.ID(), nodeID) + _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) } // test asset import func TestAtomicImport(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM(t, latestFork) + vm, factory, baseDB, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -984,14 +1059,15 @@ func TestAtomicImport(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - _, err := vm.txBuilder.NewImportTx( + builder, _ := factory.NewWallet(keys[0]) + _, err := builder.NewImportTx( vm.ctx.XChainID, - recipientKey.PublicKey().Address(), - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, + }, ) - require.ErrorIs(err, txbuilder.ErrNoFunds) + require.ErrorIs(err, walletbuilder.ErrInsufficientFunds) // Provide the avm UTXO @@ -1024,17 +1100,20 @@ func TestAtomicImport(t *testing.T) { }, })) - tx, err := vm.txBuilder.NewImportTx( + builder, txSigner := factory.NewWallet(recipientKey) + utx, err := builder.NewImportTx( vm.ctx.XChainID, - recipientKey.PublicKey().Address(), - []*secp256k1.PrivateKey{recipientKey}, - ids.ShortEmpty, // change addr - nil, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, + }, ) require.NoError(err) + tx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), tx)) + require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() blk, err := vm.Builder.BuildBlock(context.Background()) @@ -1056,7 +1135,7 @@ func TestAtomicImport(t *testing.T) { // test optimistic asset import func TestOptimisticAtomicImport(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, apricotPhase3) + vm, _, _, _ := defaultVM(t, apricotPhase3) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1123,9 +1202,12 @@ func TestRestartFullyAccepted(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} firstCtx := snowtest.Context(t, snowtest.PChainID) @@ -1210,9 +1292,12 @@ func TestRestartFullyAccepted(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} secondCtx := snowtest.Context(t, snowtest.PChainID) @@ -1250,8 +1335,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { baseDB := memdb.New() vmDB := prefixdb.New(chains.VMDBPrefix, baseDB) bootstrappingDB := prefixdb.New(chains.ChainBootstrappingDBPrefix, baseDB) - blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) - require.NoError(err) vm := &VM{Config: config.Config{ Chains: chains.TestManager, @@ -1260,9 +1343,12 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} initialClkTime := latestForkTime.Add(time.Second) @@ -1346,7 +1432,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { TimeoutCoefficient: 1.25, }, benchlist, - "", + prometheus.NewRegistry(), prometheus.NewRegistry(), ) require.NoError(err) @@ -1357,7 +1443,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { chainRouter := &router.ChainRouter{} metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(logging.NoLog{}, metrics, "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second) + mc, err := message.NewCreator(logging.NoLog{}, metrics, constants.DefaultNetworkCompressionType, 10*time.Second) require.NoError(err) require.NoError(chainRouter.Initialize( @@ -1370,7 +1456,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { set.Set[ids.ID]{}, nil, router.HealthConfig{}, - "", prometheus.NewRegistry(), )) @@ -1378,20 +1463,15 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { externalSender.Default(true) // Passes messages from the consensus engine to the network - gossipConfig := subnets.GossipConfig{ - AcceptedFrontierPeerSize: 1, - OnAcceptPeerSize: 1, - AppGossipValidatorSize: 1, - AppGossipNonValidatorSize: 1, - } sender, err := sender.New( consensusCtx, mc, externalSender, chainRouter, timeoutManager, - p2p.EngineType_ENGINE_TYPE_SNOWMAN, - subnets.New(consensusCtx.NodeID, subnets.Config{GossipConfig: gossipConfig}), + p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(consensusCtx.NodeID, subnets.Config{}), + prometheus.NewRegistry(), ) require.NoError(err) @@ -1410,7 +1490,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { totalWeight, err := beacons.TotalWeight(ctx.SubnetID) require.NoError(err) startup := tracker.NewStartup(peers, (totalWeight+1)/2) - beacons.RegisterCallbackListener(ctx.SubnetID, startup) + beacons.RegisterSetCallbackListener(ctx.SubnetID, startup) // The engine handles consensus snowGetHandler, err := snowgetter.New( @@ -1423,16 +1503,26 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { ) require.NoError(err) + peerTracker, err := p2p.NewPeerTracker( + ctx.Log, + "peer_tracker", + consensusCtx.Registerer, + set.Of(ctx.NodeID), + nil, + ) + require.NoError(err) + bootstrapConfig := bootstrap.Config{ AllGetsServer: snowGetHandler, Ctx: consensusCtx, Beacons: beacons, SampleK: beacons.Count(ctx.SubnetID), StartupTracker: startup, + PeerTracker: peerTracker, Sender: sender, BootstrapTracker: bootstrapTracker, AncestorsMaxContainersReceived: 2000, - Blocked: blocked, + DB: bootstrappingDB, VM: vm, } @@ -1455,6 +1545,8 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { vm, subnets.New(ctx.NodeID, subnets.Config{}), tracker.NewPeers(), + peerTracker, + prometheus.NewRegistry(), ) require.NoError(err) @@ -1468,8 +1560,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { K: 1, AlphaPreference: 1, AlphaConfidence: 1, - BetaVirtuous: 20, - BetaRogue: 20, + Beta: 20, ConcurrentRepolls: 1, OptimalProcessing: 1, MaxOutstandingItems: 1, @@ -1500,7 +1591,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { }) consensusCtx.State.Set(snow.EngineState{ - Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + Type: p2ppb.EngineType_ENGINE_TYPE_SNOWMAN, State: snow.NormalOp, }) @@ -1512,7 +1603,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { ctx.Lock.Lock() var reqID uint32 - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(msg message.OutboundMessage, config common.SendConfig, _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) require.NoError(err) require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) @@ -1521,41 +1612,42 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.True(ok) reqID = requestID - return nodeIDs + return config.NodeIDs } + peerTracker.Connected(peerID, version.CurrentApp) require.NoError(bootstrapper.Connected(context.Background(), peerID, version.CurrentApp)) - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(msg message.OutboundMessage, config common.SendConfig, _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) require.NoError(err) require.Equal(message.GetAcceptedOp, inMsgIntf.Op()) - inMsg := inMsgIntf.Message().(*p2p.GetAccepted) + inMsg := inMsgIntf.Message().(*p2ppb.GetAccepted) reqID = inMsg.RequestId - return nodeIDs + return config.NodeIDs } require.NoError(bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, advanceTimeBlkID)) - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(msg message.OutboundMessage, config common.SendConfig, _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) require.NoError(err) require.Equal(message.GetAncestorsOp, inMsgIntf.Op()) - inMsg := inMsgIntf.Message().(*p2p.GetAncestors) + inMsg := inMsgIntf.Message().(*p2ppb.GetAncestors) reqID = inMsg.RequestId containerID, err := ids.ToID(inMsg.ContainerId) require.NoError(err) require.Equal(advanceTimeBlkID, containerID) - return nodeIDs + return config.NodeIDs } frontier := set.Of(advanceTimeBlkID) require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(msg message.OutboundMessage, config common.SendConfig, _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) require.NoError(err) require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) @@ -1564,19 +1656,19 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.True(ok) reqID = requestID - return nodeIDs + return config.NodeIDs } require.NoError(bootstrapper.Ancestors(context.Background(), peerID, reqID, [][]byte{advanceTimeBlkBytes})) - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + externalSender.SendF = func(msg message.OutboundMessage, config common.SendConfig, _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) require.NoError(err) require.Equal(message.GetAcceptedOp, inMsgIntf.Op()) - inMsg := inMsgIntf.Message().(*p2p.GetAccepted) + inMsg := inMsgIntf.Message().(*p2ppb.GetAccepted) reqID = inMsg.RequestId - return nodeIDs + return config.NodeIDs } require.NoError(bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, advanceTimeBlkID)) @@ -1601,9 +1693,12 @@ func TestUnverifiedParent(t *testing.T) { MinStakeDuration: defaultMinStakingDuration, MaxStakeDuration: defaultMaxStakingDuration, RewardConfig: defaultRewardConfig, - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} initialClkTime := latestForkTime.Add(time.Second) @@ -1702,7 +1797,7 @@ func TestUnverifiedParent(t *testing.T) { } func TestMaxStakeAmount(t *testing.T) { - vm, _, _ := defaultVM(t, latestFork) + vm, _, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -1761,9 +1856,12 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { RewardConfig: defaultRewardConfig, Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} firstCtx := snowtest.Context(t, snowtest.PChainID) @@ -1809,9 +1907,12 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { UptimePercentage: secondUptimePercentage / 100., Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} secondCtx := snowtest.Context(t, snowtest.PChainID) @@ -1908,9 +2009,12 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { RewardConfig: defaultRewardConfig, Validators: validators.NewManager(), UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: latestForkTime, - CortinaTime: latestForkTime, - DurangoTime: latestForkTime, + UpgradeConfig: upgrade.Config{ + BanffTime: latestForkTime, + CortinaTime: latestForkTime, + DurangoTime: latestForkTime, + EUpgradeTime: mockable.MaxTime, + }, }} ctx := snowtest.Context(t, snowtest.PChainID) @@ -2006,7 +2110,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { validatorStartTime := latestForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2018,22 +2122,39 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - addValidatorTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - defaultMaxValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, + builder, txSigner := factory.NewWallet(keys[0]) + uAddValTx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultMaxValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - id, + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, reward.PercentDenominator, - []*secp256k1.PrivateKey{keys[0]}, - keys[0].Address(), - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() // trigger block creation for the validator tx @@ -2043,17 +2164,22 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(addValidatorBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{id}, - []*secp256k1.PrivateKey{keys[0]}, - keys[0].Address(), - nil, + uCreateSubnetTx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{id}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + require.NoError(vm.issueTxFromRPC(createSubnetTx)) vm.ctx.Lock.Lock() // trigger block creation for the subnet tx @@ -2063,26 +2189,38 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(createSubnetBlock.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( - defaultMaxValidatorStake, - uint64(validatorStartTime.Unix()), - uint64(validatorEndTime.Unix()), - nodeID, - createSubnetTx.ID(), - []*secp256k1.PrivateKey{key, keys[1]}, - keys[1].Address(), - nil, + builder, txSigner = factory.NewWallet(key, keys[1]) + uAddSubnetValTx, err := builder.NewAddSubnetValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(validatorStartTime.Unix()), + End: uint64(validatorEndTime.Unix()), + Wght: defaultMaxValidatorStake, + }, + Subnet: createSubnetTx.ID(), + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[1].PublicKey().Address()}, + }), ) require.NoError(err) + addSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddSubnetValTx) + require.NoError(err) - removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( + builder, txSigner = factory.NewWallet(key, keys[2]) + uRemoveSubnetValTx, err := builder.NewRemoveSubnetValidatorTx( nodeID, createSubnetTx.ID(), - []*secp256k1.PrivateKey{key, keys[2]}, - keys[2].Address(), - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[2].PublicKey().Address()}, + }), ) require.NoError(err) + removeSubnetValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uRemoveSubnetValTx) + require.NoError(err) statelessBlock, err := block.NewBanffStandardBlock( vm.state.GetTimestamp(), @@ -2108,23 +2246,29 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { func TestTransferSubnetOwnershipTx(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - // Create a subnet - createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( - 1, - []ids.ShortID{keys[0].PublicKey().Address()}, - []*secp256k1.PrivateKey{keys[0]}, - keys[0].Address(), - nil, + builder, txSigner := factory.NewWallet(keys[0]) + uCreateSubnetTx, err := builder.NewCreateSubnetTx( + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }), ) require.NoError(err) + createSubnetTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uCreateSubnetTx) + require.NoError(err) + subnetID := createSubnetTx.ID() vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), createSubnetTx)) + require.NoError(vm.issueTxFromRPC(createSubnetTx)) vm.ctx.Lock.Lock() createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2146,20 +2290,24 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { keys[0].PublicKey().Address(), }, } + ctx, err := walletbuilder.NewSnowContext(vm.ctx.NetworkID, vm.ctx.AVAXAssetID) + require.NoError(err) + expectedOwner.InitCtx(ctx) require.Equal(expectedOwner, subnetOwner) - transferSubnetOwnershipTx, err := vm.txBuilder.NewTransferSubnetOwnershipTx( + uTransferSubnetOwnershipTx, err := builder.NewTransferSubnetOwnershipTx( subnetID, - 1, - []ids.ShortID{keys[1].PublicKey().Address()}, - []*secp256k1.PrivateKey{keys[0]}, - ids.ShortEmpty, // change addr - nil, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[1].PublicKey().Address()}, + }, ) require.NoError(err) + transferSubnetOwnershipTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uTransferSubnetOwnershipTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), transferSubnetOwnershipTx)) + require.NoError(vm.issueTxFromRPC(transferSubnetOwnershipTx)) vm.ctx.Lock.Lock() transferSubnetOwnershipBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2181,31 +2329,43 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { keys[1].PublicKey().Address(), }, } + expectedOwner.InitCtx(ctx) require.Equal(expectedOwner, subnetOwner) } func TestBaseTx(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() sendAmt := uint64(100000) changeAddr := ids.ShortEmpty - baseTx, err := vm.txBuilder.NewBaseTx( - sendAmt, - secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - keys[1].Address(), + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewBaseTx( + []*avax.TransferableOutput{ + { + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: sendAmt, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].Address(), + }, + }, + }, }, }, - []*secp256k1.PrivateKey{keys[0]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + baseTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) totalInputAmt := uint64(0) key0InputAmt := uint64(0) @@ -2241,11 +2401,11 @@ func TestBaseTx(t *testing.T) { } require.Equal(totalOutputAmt, key0OutputAmt+key1OutputAmt+changeAddrOutputAmt) - require.Equal(vm.TxFee, totalInputAmt-totalOutputAmt) + require.Equal(vm.StaticFeeConfig.TxFee, totalInputAmt-totalOutputAmt) require.Equal(sendAmt, key1OutputAmt) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), baseTx)) + require.NoError(vm.issueTxFromRPC(baseTx)) vm.ctx.Lock.Lock() baseTxBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -2261,7 +2421,7 @@ func TestBaseTx(t *testing.T) { func TestPruneMempool(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM(t, latestFork) + vm, factory, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() @@ -2269,22 +2429,33 @@ func TestPruneMempool(t *testing.T) { sendAmt := uint64(100000) changeAddr := ids.ShortEmpty - baseTx, err := vm.txBuilder.NewBaseTx( - sendAmt, - secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - keys[1].Address(), + builder, txSigner := factory.NewWallet(keys[0]) + utx, err := builder.NewBaseTx( + []*avax.TransferableOutput{ + { + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: sendAmt, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[1].Address(), + }, + }, + }, }, }, - []*secp256k1.PrivateKey{keys[0]}, - changeAddr, - nil, + walletcommon.WithChangeOwner(&secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }), ) require.NoError(err) + baseTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, utx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), baseTx)) + require.NoError(vm.issueTxFromRPC(baseTx)) vm.ctx.Lock.Lock() // [baseTx] should be in the mempool. @@ -2301,22 +2472,35 @@ func TestPruneMempool(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) - addValidatorTx, err := vm.txBuilder.NewAddPermissionlessValidatorTx( - defaultMinValidatorStake, - uint64(startTime.Unix()), - uint64(endTime.Unix()), - ids.GenerateTestNodeID(), + builder, txSigner = factory.NewWallet(keys[1]) + uAddValTx, err := builder.NewAddPermissionlessValidatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: ids.GenerateTestNodeID(), + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: defaultMinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + }, signer.NewProofOfPossession(sk), - keys[2].Address(), + vm.ctx.AVAXAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[2].Address()}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[2].Address()}, + }, 20000, - []*secp256k1.PrivateKey{keys[1]}, - ids.ShortEmpty, - nil, ) require.NoError(err) + addValidatorTx, err := walletsigner.SignUnsigned(context.Background(), txSigner, uAddValTx) + require.NoError(err) vm.ctx.Lock.Unlock() - require.NoError(vm.issueTx(context.Background(), addValidatorTx)) + require.NoError(vm.issueTxFromRPC(addValidatorTx)) vm.ctx.Lock.Lock() // Advance clock to [endTime], making [addValidatorTx] invalid. diff --git a/vms/platformvm/warp/codec.go b/vms/platformvm/warp/codec.go index 6ef6e526bdc1..8d2193827346 100644 --- a/vms/platformvm/warp/codec.go +++ b/vms/platformvm/warp/codec.go @@ -5,7 +5,6 @@ package warp import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -18,7 +17,7 @@ var Codec codec.Manager func init() { Codec = codec.NewManager(math.MaxInt) - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() err := utils.Err( lc.RegisterType(&BitSetSignature{}), diff --git a/vms/platformvm/warp/payload/codec.go b/vms/platformvm/warp/payload/codec.go index d188029abfed..b89db089d454 100644 --- a/vms/platformvm/warp/payload/codec.go +++ b/vms/platformvm/warp/payload/codec.go @@ -4,8 +4,6 @@ package payload import ( - "time" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/utils" @@ -22,7 +20,7 @@ var Codec codec.Manager func init() { Codec = codec.NewManager(MaxMessageSize) - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() err := utils.Err( lc.RegisterType(&Hash{}), diff --git a/vms/platformvm/warp/signature_test.go b/vms/platformvm/warp/signature_test.go index 56ab16c4709c..eedec396fcdc 100644 --- a/vms/platformvm/warp/signature_test.go +++ b/vms/platformvm/warp/signature_test.go @@ -55,7 +55,7 @@ func newTestValidator() *testValidator { sk: sk, vdr: &Validator{ PublicKey: pk, - PublicKeyBytes: bls.SerializePublicKey(pk), + PublicKeyBytes: bls.PublicKeyToUncompressedBytes(pk), Weight: 3, NodeIDs: []ids.NodeID{nodeID}, }, diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index 2ada068adc76..0d33a9f12f26 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -55,12 +55,20 @@ func GetCanonicalValidatorSet( // Get the validator set at the given height. vdrSet, err := pChainState.GetValidatorSet(ctx, pChainHeight, subnetID) if err != nil { - return nil, 0, fmt.Errorf("failed to fetch validator set (P-Chain Height: %d, SubnetID: %s): %w", pChainHeight, subnetID, err) + return nil, 0, err } + // Convert the validator set into the canonical ordering. + return FlattenValidatorSet(vdrSet) +} + +// FlattenValidatorSet converts the provided [vdrSet] into a canonical ordering. +// Also returns the total weight of the validator set. +func FlattenValidatorSet(vdrSet map[ids.NodeID]*validators.GetValidatorOutput) ([]*Validator, uint64, error) { var ( vdrs = make(map[string]*Validator, len(vdrSet)) totalWeight uint64 + err error ) for _, vdr := range vdrSet { totalWeight, err = math.Add64(totalWeight, vdr.Weight) @@ -72,7 +80,7 @@ func GetCanonicalValidatorSet( continue } - pkBytes := bls.SerializePublicKey(vdr.PublicKey) + pkBytes := bls.PublicKeyToUncompressedBytes(vdr.PublicKey) uniqueVdr, ok := vdrs[string(pkBytes)] if !ok { uniqueVdr = &Validator{ diff --git a/vms/platformvm/warp/validator_test.go b/vms/platformvm/warp/validator_test.go index af680eddd98e..7d14ed252616 100644 --- a/vms/platformvm/warp/validator_test.go +++ b/vms/platformvm/warp/validator_test.go @@ -148,8 +148,8 @@ func TestGetCanonicalValidatorSet(t *testing.T) { require.Len(vdrs, len(tt.expectedVdrs)) for i, expectedVdr := range tt.expectedVdrs { gotVdr := vdrs[i] - expectedPKBytes := bls.PublicKeyToBytes(expectedVdr.PublicKey) - gotPKBytes := bls.PublicKeyToBytes(gotVdr.PublicKey) + expectedPKBytes := bls.PublicKeyToCompressedBytes(expectedVdr.PublicKey) + gotPKBytes := bls.PublicKeyToCompressedBytes(gotVdr.PublicKey) require.Equal(expectedPKBytes, gotPKBytes) require.Equal(expectedVdr.PublicKeyBytes, gotVdr.PublicKeyBytes) require.Equal(expectedVdr.Weight, gotVdr.Weight) @@ -165,7 +165,7 @@ func TestFilterValidators(t *testing.T) { pk0 := bls.PublicFromSecretKey(sk0) vdr0 := &Validator{ PublicKey: pk0, - PublicKeyBytes: bls.SerializePublicKey(pk0), + PublicKeyBytes: bls.PublicKeyToUncompressedBytes(pk0), Weight: 1, } @@ -174,7 +174,7 @@ func TestFilterValidators(t *testing.T) { pk1 := bls.PublicFromSecretKey(sk1) vdr1 := &Validator{ PublicKey: pk1, - PublicKeyBytes: bls.SerializePublicKey(pk1), + PublicKeyBytes: bls.PublicKeyToUncompressedBytes(pk1), Weight: 2, } diff --git a/vms/propertyfx/fx_test.go b/vms/propertyfx/fx_test.go index 0cd995ba5282..10a96e6e5b0b 100644 --- a/vms/propertyfx/fx_test.go +++ b/vms/propertyfx/fx_test.go @@ -39,7 +39,7 @@ var ( func TestFxInitialize(t *testing.T) { vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} @@ -56,7 +56,7 @@ func TestFxVerifyMintOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -98,7 +98,7 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -132,7 +132,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -163,7 +163,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -195,7 +195,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -226,7 +226,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -264,7 +264,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -301,7 +301,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -335,7 +335,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -364,7 +364,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -399,7 +399,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -431,7 +431,7 @@ func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := secp256k1fx.TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) diff --git a/vms/proposervm/batched_vm.go b/vms/proposervm/batched_vm.go index 0bf514827193..853c858e6bba 100644 --- a/vms/proposervm/batched_vm.go +++ b/vms/proposervm/batched_vm.go @@ -101,7 +101,7 @@ func (vm *VM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.B ) for ; blocksIndex < len(blks); blocksIndex++ { blkBytes := blks[blocksIndex] - statelessBlock, err := statelessblock.Parse(blkBytes, vm.DurangoTime) + statelessBlock, err := statelessblock.Parse(blkBytes, vm.ctx.ChainID) if err != nil { break } diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 3564b0b85b8f..be134823c894 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -16,8 +17,8 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/snowtest" @@ -32,7 +33,7 @@ func TestCoreVMNotRemote(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - _, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + _, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -61,22 +62,13 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + coreVM, proRemoteVM := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() // Build some prefork blocks.... - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -86,24 +78,15 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreBlk1.ID(): + switch blkID { + case coreBlk1.ID(): return coreBlk1, nil default: return nil, errUnknownBlock } } - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: coreBlk1.Timestamp(), - } + coreBlk2 := snowmantest.BuildChild(coreBlk1) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -113,24 +96,15 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreBlk2.ID(): + switch blkID { + case coreBlk2.ID(): return coreBlk2, nil default: return nil, errUnknownBlock } } - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - TimestampV: coreBlk2.Timestamp(), - } + coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -211,21 +185,13 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + coreVM, proRemoteVM := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() // Build some post-Fork blocks.... - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -237,15 +203,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) require.NoError(waitForProposerWindow(proRemoteVM, builtBlk1, 0)) - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - } + coreBlk2 := snowmantest.BuildChild(coreBlk1) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -257,15 +215,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) require.NoError(waitForProposerWindow(proRemoteVM, builtBlk2, 0)) - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -300,8 +250,8 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil case bytes.Equal(b, coreBlk2.Bytes()): @@ -371,23 +321,15 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { ) // enable ProBlks in next future - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime, durangoTime) + coreVM, proRemoteVM := initTestRemoteProposerVM(t, forkTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() // Build some prefork blocks.... proRemoteVM.Set(preForkTime) - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: preForkTime, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) + coreBlk1.TimestampV = preForkTime coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -406,16 +348,8 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { } } - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: postForkTime, - } + coreBlk2 := snowmantest.BuildChild(coreBlk1) + coreBlk2.TimestampV = postForkTime coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -436,15 +370,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // .. and some post-fork proRemoteVM.Set(postForkTime) - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -457,15 +383,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) require.NoError(waitForProposerWindow(proRemoteVM, builtBlk3, builtBlk3.(*postForkBlock).PChainHeight())) - coreBlk4 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{4}, - ParentV: coreBlk3.ID(), - HeightV: coreBlk3.Height() + 1, - } + coreBlk4 := snowmantest.BuildChild(coreBlk3) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil } @@ -573,21 +491,13 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + coreVM, proRemoteVM := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() // Build some prefork blocks.... - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -597,23 +507,15 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreBlk1.ID(): + switch blkID { + case coreBlk1.ID(): return coreBlk1, nil default: return nil, errUnknownBlock } } - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - } + coreBlk2 := snowmantest.BuildChild(coreBlk1) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -631,15 +533,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { } } - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -695,21 +589,13 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, activationTime, durangoTime) + coreVM, proRemoteVM := initTestRemoteProposerVM(t, activationTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() // Build some post-Fork blocks.... - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -721,15 +607,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) require.NoError(waitForProposerWindow(proRemoteVM, builtBlk1, 0)) - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - } + coreBlk2 := snowmantest.BuildChild(coreBlk1) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -741,15 +619,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) require.NoError(waitForProposerWindow(proRemoteVM, builtBlk2, builtBlk2.(*postForkBlock).PChainHeight())) - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -812,23 +682,15 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { ) // enable ProBlks in next future - coreVM, proRemoteVM, coreGenBlk := initTestRemoteProposerVM(t, forkTime, durangoTime) + coreVM, proRemoteVM := initTestRemoteProposerVM(t, forkTime, durangoTime) defer func() { require.NoError(proRemoteVM.Shutdown(context.Background())) }() // Build some prefork blocks.... proRemoteVM.Set(preForkTime) - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: preForkTime, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) + coreBlk1.TimestampV = preForkTime coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -847,16 +709,8 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { } } - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk1.ID(), - HeightV: coreBlk1.Height() + 1, - TimestampV: postForkTime, - } + coreBlk2 := snowmantest.BuildChild(coreBlk1) + coreBlk2.TimestampV = postForkTime coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -877,15 +731,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { // .. and some post-fork proRemoteVM.Set(postForkTime) - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk2.ID(), - HeightV: coreBlk2.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -898,15 +744,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) require.NoError(waitForProposerWindow(proRemoteVM, builtBlk3, builtBlk3.(*postForkBlock).PChainHeight())) - coreBlk4 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{4}, - ParentV: coreBlk3.ID(), - HeightV: coreBlk3.Height() + 1, - } + coreBlk4 := snowmantest.BuildChild(coreBlk3) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil } @@ -977,20 +815,9 @@ func initTestRemoteProposerVM( ) ( TestRemoteProposerVM, *VM, - *snowman.TestBlock, ) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: []byte{0}, - } - initialState := []byte("genesis state") coreVM := TestRemoteProposerVM{ TestVM: &block.TestVM{}, @@ -1013,27 +840,24 @@ func initTestRemoteProposerVM( return nil } coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreGenBlk.ID(): - return coreGenBlk, nil + switch blkID { + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } - coreVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } proVM := New( coreVM, @@ -1045,6 +869,7 @@ func initTestRemoteProposerVM( NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1052,7 +877,7 @@ func initTestRemoteProposerVM( T: t, } valState.GetMinimumHeightF = func(context.Context) (uint64, error) { - return coreGenBlk.Height(), nil + return snowmantest.GenesisHeight, nil } valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return defaultPChainHeight, nil @@ -1104,6 +929,6 @@ func initTestRemoteProposerVM( coreVM.InitializeF = nil require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) - return coreVM, proVM, coreGenBlk + require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + return coreVM, proVM } diff --git a/vms/proposervm/block.go b/vms/proposervm/block.go index c43e8c5e5d8b..d320a80543bc 100644 --- a/vms/proposervm/block.go +++ b/vms/proposervm/block.go @@ -36,6 +36,7 @@ var ( errTimeTooAdvanced = errors.New("time is too far advanced") errProposerWindowNotStarted = errors.New("proposer window hasn't started") errUnexpectedProposer = errors.New("unexpected proposer for current window") + errProposerMismatch = errors.New("proposer mismatch") errProposersNotActivated = errors.New("proposers haven't been activated yet") errPChainHeightTooLow = errors.New("block P-chain height is too low") ) @@ -152,9 +153,9 @@ func (p *postForkCommonComponents) Verify( return err } - // Verify the signature of the node - if err := child.SignedBlock.Verify(shouldHaveProposer, p.vm.ctx.ChainID); err != nil { - return err + hasProposer := child.SignedBlock.Proposer() != ids.EmptyNodeID + if shouldHaveProposer != hasProposer { + return fmt.Errorf("%w: shouldHaveProposer (%v) != hasProposer (%v)", errProposerMismatch, shouldHaveProposer, hasProposer) } p.vm.ctx.Log.Debug("verified post-fork block", @@ -275,6 +276,7 @@ func (p *postForkCommonComponents) buildChild( zap.Stringer("blkID", child.ID()), zap.Stringer("innerBlkID", innerBlock.ID()), zap.Uint64("height", child.Height()), + zap.Uint64("pChainHeight", pChainHeight), zap.Time("parentTimestamp", parentTimestamp), zap.Time("blockTimestamp", newTimestamp), ) @@ -367,7 +369,10 @@ func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( currentSlot = proposer.TimeToSlot(parentTimestamp, blkTimestamp) proposerID = blk.Proposer() ) + // populate the slot for the block. + blk.slot = ¤tSlot + // find the expected proposer expectedProposerID, err := p.vm.Windower.ExpectedProposer( ctx, blkHeight, @@ -450,6 +455,11 @@ func (p *postForkCommonComponents) shouldBuildSignedBlockPostDurango( ) return false, err } + + // report the build slot to the metrics. + p.vm.proposerBuildSlotGauge.Set(float64(proposer.TimeToSlot(parentTimestamp, nextStartTime))) + + // set the scheduler to let us know when the next block need to be built. p.vm.Scheduler.SetBuildBlockTime(nextStartTime) // In case the inner VM only issued one pendingTxs message, we should diff --git a/vms/proposervm/block/block.go b/vms/proposervm/block/block.go index 0f5b374391f7..68da910e1dbd 100644 --- a/vms/proposervm/block/block.go +++ b/vms/proposervm/block/block.go @@ -17,9 +17,8 @@ import ( var ( _ SignedBlock = (*statelessBlock)(nil) - errUnexpectedProposer = errors.New("expected no proposer but one was provided") - errMissingProposer = errors.New("expected proposer but none was provided") - errInvalidCertificate = errors.New("invalid certificate") + errUnexpectedSignature = errors.New("signature provided when none was expected") + errInvalidCertificate = errors.New("invalid certificate") ) type Block interface { @@ -28,7 +27,8 @@ type Block interface { Block() []byte Bytes() []byte - initialize(bytes []byte, durangoTime time.Time) error + initialize(bytes []byte) error + verify(chainID ids.ID) error } type SignedBlock interface { @@ -36,9 +36,10 @@ type SignedBlock interface { PChainHeight() uint64 Timestamp() time.Time - Proposer() ids.NodeID - Verify(shouldHaveProposer bool, chainID ids.ID) error + // Proposer returns the ID of the node that proposed this block. If no node + // signed this block, [ids.EmptyNodeID] will be returned. + Proposer() ids.NodeID } type statelessUnsignedBlock struct { @@ -76,7 +77,7 @@ func (b *statelessBlock) Bytes() []byte { return b.bytes } -func (b *statelessBlock) initialize(bytes []byte, durangoTime time.Time) error { +func (b *statelessBlock) initialize(bytes []byte) error { b.bytes = bytes // The serialized form of the block is the unsignedBytes followed by the @@ -91,13 +92,8 @@ func (b *statelessBlock) initialize(bytes []byte, durangoTime time.Time) error { return nil } - // TODO: Remove durangoTime after v1.11.x has activated. var err error - if b.timestamp.Before(durangoTime) { - b.cert, err = staking.ParseCertificate(b.StatelessBlock.Certificate) - } else { - b.cert, err = staking.ParseCertificatePermissive(b.StatelessBlock.Certificate) - } + b.cert, err = staking.ParseCertificate(b.StatelessBlock.Certificate) if err != nil { return fmt.Errorf("%w: %w", errInvalidCertificate, err) } @@ -106,26 +102,12 @@ func (b *statelessBlock) initialize(bytes []byte, durangoTime time.Time) error { return nil } -func (b *statelessBlock) PChainHeight() uint64 { - return b.StatelessBlock.PChainHeight -} - -func (b *statelessBlock) Timestamp() time.Time { - return b.timestamp -} - -func (b *statelessBlock) Proposer() ids.NodeID { - return b.proposer -} - -func (b *statelessBlock) Verify(shouldHaveProposer bool, chainID ids.ID) error { - if !shouldHaveProposer { - if len(b.Signature) > 0 || len(b.StatelessBlock.Certificate) > 0 { - return errUnexpectedProposer +func (b *statelessBlock) verify(chainID ids.ID) error { + if len(b.StatelessBlock.Certificate) == 0 { + if len(b.Signature) > 0 { + return errUnexpectedSignature } return nil - } else if b.cert == nil { - return errMissingProposer } header, err := BuildHeader(chainID, b.StatelessBlock.ParentID, b.id) @@ -140,3 +122,15 @@ func (b *statelessBlock) Verify(shouldHaveProposer bool, chainID ids.ID) error { b.Signature, ) } + +func (b *statelessBlock) PChainHeight() uint64 { + return b.StatelessBlock.PChainHeight +} + +func (b *statelessBlock) Timestamp() time.Time { + return b.timestamp +} + +func (b *statelessBlock) Proposer() ids.NodeID { + return b.proposer +} diff --git a/vms/proposervm/block/block_test.go b/vms/proposervm/block/block_test.go index 8a8a57ae3b9d..2b8918eb902c 100644 --- a/vms/proposervm/block/block_test.go +++ b/vms/proposervm/block/block_test.go @@ -14,37 +14,22 @@ import ( "github.com/ava-labs/avalanchego/utils/units" ) -func equal(require *require.Assertions, chainID ids.ID, want, have SignedBlock) { +func equal(require *require.Assertions, want, have Block) { require.Equal(want.ID(), have.ID()) require.Equal(want.ParentID(), have.ParentID()) - require.Equal(want.PChainHeight(), have.PChainHeight()) - require.Equal(want.Timestamp(), have.Timestamp()) require.Equal(want.Block(), have.Block()) - require.Equal(want.Proposer(), have.Proposer()) require.Equal(want.Bytes(), have.Bytes()) - require.Equal(want.Verify(false, chainID), have.Verify(false, chainID)) - require.Equal(want.Verify(true, chainID), have.Verify(true, chainID)) -} - -func TestVerifyNoCertWithSignature(t *testing.T) { - parentID := ids.ID{1} - timestamp := time.Unix(123, 0) - pChainHeight := uint64(2) - innerBlockBytes := []byte{3} - - require := require.New(t) - - builtBlockIntf, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) - require.NoError(err) - - builtBlock := builtBlockIntf.(*statelessBlock) - builtBlock.Signature = []byte{0} - err = builtBlock.Verify(false, ids.Empty) - require.ErrorIs(err, errUnexpectedProposer) + signedWant, wantIsSigned := want.(SignedBlock) + signedHave, haveIsSigned := have.(SignedBlock) + require.Equal(wantIsSigned, haveIsSigned) + if !wantIsSigned { + return + } - err = builtBlock.Verify(true, ids.Empty) - require.ErrorIs(err, errMissingProposer) + require.Equal(signedWant.PChainHeight(), signedHave.PChainHeight()) + require.Equal(signedWant.Timestamp(), signedHave.Timestamp()) + require.Equal(signedWant.Proposer(), signedHave.Proposer()) } func TestBlockSizeLimit(t *testing.T) { diff --git a/vms/proposervm/block/build.go b/vms/proposervm/block/build.go index b13255c91dd1..228ab97604da 100644 --- a/vms/proposervm/block/build.go +++ b/vms/proposervm/block/build.go @@ -36,9 +36,7 @@ func BuildUnsigned( return nil, err } - // Invariant: The durango timestamp isn't used here because the certificate - // is empty. - return block, block.initialize(bytes, time.Time{}) + return block, block.initialize(bytes) } func Build( @@ -125,6 +123,5 @@ func BuildOption( return nil, err } - // Invariant: The durango timestamp isn't used. - return block, block.initialize(bytes, time.Time{}) + return block, block.initialize(bytes) } diff --git a/vms/proposervm/block/build_test.go b/vms/proposervm/block/build_test.go index 8388e8a434f8..2ed9510c696c 100644 --- a/vms/proposervm/block/build_test.go +++ b/vms/proposervm/block/build_test.go @@ -26,8 +26,10 @@ func TestBuild(t *testing.T) { tlsCert, err := staking.NewTLSCert() require.NoError(err) - cert := staking.CertificateFromX509(tlsCert.Leaf) + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) key := tlsCert.PrivateKey.(crypto.Signer) + nodeID := ids.NodeIDFromCert(cert) builtBlock, err := Build( parentID, @@ -44,11 +46,7 @@ func TestBuild(t *testing.T) { require.Equal(pChainHeight, builtBlock.PChainHeight()) require.Equal(timestamp, builtBlock.Timestamp()) require.Equal(innerBlockBytes, builtBlock.Block()) - - require.NoError(builtBlock.Verify(true, chainID)) - - err = builtBlock.Verify(false, chainID) - require.ErrorIs(err, errUnexpectedProposer) + require.Equal(nodeID, builtBlock.Proposer()) } func TestBuildUnsigned(t *testing.T) { @@ -67,11 +65,6 @@ func TestBuildUnsigned(t *testing.T) { require.Equal(timestamp, builtBlock.Timestamp()) require.Equal(innerBlockBytes, builtBlock.Block()) require.Equal(ids.EmptyNodeID, builtBlock.Proposer()) - - require.NoError(builtBlock.Verify(false, ids.Empty)) - - err = builtBlock.Verify(true, ids.Empty) - require.ErrorIs(err, errMissingProposer) } func TestBuildHeader(t *testing.T) { diff --git a/vms/proposervm/block/codec.go b/vms/proposervm/block/codec.go index ca2318002093..a00ad7de2506 100644 --- a/vms/proposervm/block/codec.go +++ b/vms/proposervm/block/codec.go @@ -5,7 +5,6 @@ package block import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -17,7 +16,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() // The maximum block size is enforced by the p2p message size limit. // See: [constants.DefaultMaxMessageSize] Codec = codec.NewManager(math.MaxInt) diff --git a/vms/proposervm/block/option.go b/vms/proposervm/block/option.go index 7edb39bd429f..115b6d0b9f99 100644 --- a/vms/proposervm/block/option.go +++ b/vms/proposervm/block/option.go @@ -4,8 +4,6 @@ package block import ( - "time" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -34,8 +32,12 @@ func (b *option) Bytes() []byte { return b.bytes } -func (b *option) initialize(bytes []byte, _ time.Time) error { +func (b *option) initialize(bytes []byte) error { b.id = hashing.ComputeHash256Array(bytes) b.bytes = bytes return nil } + +func (*option) verify(ids.ID) error { + return nil +} diff --git a/vms/proposervm/block/option_test.go b/vms/proposervm/block/option_test.go deleted file mode 100644 index d5af9c100079..000000000000 --- a/vms/proposervm/block/option_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package block - -import "github.com/stretchr/testify/require" - -func equalOption(require *require.Assertions, want, have Block) { - require.Equal(want.ID(), have.ID()) - require.Equal(want.ParentID(), have.ParentID()) - require.Equal(want.Block(), have.Block()) - require.Equal(want.Bytes(), have.Bytes()) -} diff --git a/vms/proposervm/block/parse.go b/vms/proposervm/block/parse.go index bf9b44adf1f4..f6bc63877166 100644 --- a/vms/proposervm/block/parse.go +++ b/vms/proposervm/block/parse.go @@ -5,10 +5,23 @@ package block import ( "fmt" - "time" + + "github.com/ava-labs/avalanchego/ids" ) -func Parse(bytes []byte, durangoTime time.Time) (Block, error) { +// Parse a block and verify that the signature attached to the block is valid +// for the certificate provided in the block. +func Parse(bytes []byte, chainID ids.ID) (Block, error) { + block, err := ParseWithoutVerification(bytes) + if err != nil { + return nil, err + } + return block, block.verify(chainID) +} + +// ParseWithoutVerification parses a block without verifying that the signature +// on the block is correct. +func ParseWithoutVerification(bytes []byte) (Block, error) { var block Block parsedVersion, err := Codec.Unmarshal(bytes, &block) if err != nil { @@ -17,7 +30,7 @@ func Parse(bytes []byte, durangoTime time.Time) (Block, error) { if parsedVersion != CodecVersion { return nil, fmt.Errorf("expected codec version %d but got %d", CodecVersion, parsedVersion) } - return block, block.initialize(bytes, durangoTime) + return block, block.initialize(bytes) } func ParseHeader(bytes []byte) (Header, error) { diff --git a/vms/proposervm/block/parse_test.go b/vms/proposervm/block/parse_test.go index 148bac82c0a6..ce1d5d97cbb8 100644 --- a/vms/proposervm/block/parse_test.go +++ b/vms/proposervm/block/parse_test.go @@ -17,8 +17,6 @@ import ( ) func TestParse(t *testing.T) { - require := require.New(t) - parentID := ids.ID{1} timestamp := time.Unix(123, 0) pChainHeight := uint64(2) @@ -26,12 +24,13 @@ func TestParse(t *testing.T) { chainID := ids.ID{4} tlsCert, err := staking.NewTLSCert() - require.NoError(err) + require.NoError(t, err) - cert := staking.CertificateFromX509(tlsCert.Leaf) + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(t, err) key := tlsCert.PrivateKey.(crypto.Signer) - builtBlock, err := Build( + signedBlock, err := Build( parentID, timestamp, pChainHeight, @@ -40,41 +39,106 @@ func TestParse(t *testing.T) { chainID, key, ) - require.NoError(err) - - builtBlockBytes := builtBlock.Bytes() - durangoTimes := []time.Time{ - timestamp.Add(time.Second), // Durango not activated yet - timestamp.Add(-time.Second), // Durango activated + require.NoError(t, err) + + unsignedBlock, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) + require.NoError(t, err) + + signedWithoutCertBlockIntf, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) + require.NoError(t, err) + signedWithoutCertBlock := signedWithoutCertBlockIntf.(*statelessBlock) + signedWithoutCertBlock.Signature = []byte{5} + + signedWithoutCertBlock.bytes, err = Codec.Marshal(CodecVersion, &signedWithoutCertBlockIntf) + require.NoError(t, err) + + optionBlock, err := BuildOption(parentID, innerBlockBytes) + require.NoError(t, err) + + tests := []struct { + name string + block Block + chainID ids.ID + expectedErr error + }{ + { + name: "correct chainID", + block: signedBlock, + chainID: chainID, + expectedErr: nil, + }, + { + name: "invalid chainID", + block: signedBlock, + chainID: ids.ID{5}, + expectedErr: staking.ErrECDSAVerificationFailure, + }, + { + name: "unsigned block", + block: unsignedBlock, + chainID: chainID, + expectedErr: nil, + }, + { + name: "invalid signature", + block: signedWithoutCertBlockIntf, + chainID: chainID, + expectedErr: errUnexpectedSignature, + }, + { + name: "option block", + block: optionBlock, + chainID: chainID, + expectedErr: nil, + }, } - for _, durangoTime := range durangoTimes { - parsedBlockIntf, err := Parse(builtBlockBytes, durangoTime) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, chainID, builtBlock, parsedBlock) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + blockBytes := test.block.Bytes() + parsedBlockWithoutVerification, err := ParseWithoutVerification(blockBytes) + require.NoError(err) + equal(require, test.block, parsedBlockWithoutVerification) + + parsedBlock, err := Parse(blockBytes, test.chainID) + require.ErrorIs(err, test.expectedErr) + if test.expectedErr == nil { + equal(require, test.block, parsedBlock) + } + }) } } -func TestParseDuplicateExtension(t *testing.T) { - require := require.New(t) - - blockHex := "0000000000000100000000000000000000000000000000000000000000000000000000000000000000000000007b0000000000000002000004bd308204b9308202a1a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313232303830333233323835335a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100c2b2de1c16924d9b9254a0d5b80a4bc5f9beaa4f4f40a0e4efb69eb9b55d7d37f8c82328c237d7c5b451f5427b487284fa3f365f9caa53c7fcfef8d7a461d743bd7d88129f2da62b877ebe9d6feabf1bd12923e6c12321382c782fc3bb6b6cb4986a937a1edc3814f4e621e1a62053deea8c7649e43edd97ab6b56315b00d9ab5026bb9c31fb042dc574ba83c54e720e0120fcba2e8a66b77839be3ece0d4a6383ef3f76aac952b49a15b65e18674cd1340c32cecbcbaf80ae45be001366cb56836575fb0ab51ea44bf7278817e99b6b180fdd110a49831a132968489822c56692161bbd372cf89d9b8ee5a734cff15303b3a960ee78d79e76662a701941d9ec084429f26707f767e9b1d43241c0e4f96655d95c1f4f4aa00add78eff6bf0a6982766a035bf0b465786632c5bb240788ca0fdf032d8815899353ea4bec5848fd30118711e5b356bde8a0da074cc25709623225e734ff5bd0cf65c40d9fd8fccf746d8f8f35145bcebcf378d2b086e57d78b11e84f47fa467c4d037f92bff6dd4e934e0189b58193f24c4222ffb72b5c06361cf68ca64345bc3e230cc0f40063ad5f45b1659c643662996328c2eeddcd760d6f7c9cbae081ccc065844f7ea78c858564a408979764de882793706acc67d88092790dff567ed914b03355330932616a0f26f994b963791f0b1dbd8df979db86d1ea490700a3120293c3c2b10bef10203010001a33c303a300e0603551d0f0101ff0404030204b030130603551d25040c300a06082b0601050507030230130603551d25040c300a06082b06010505070302300d06092a864886f70d01010b05000382020100a21a0d73ec9ef4eb39f810557ac70b0b775772b8bae5f42c98565bc50b5b2c57317aa9cb1da12f55d0aac7bb36a00cd4fd0d7384c4efa284b53520c5a3c4b8a65240b393eeab02c802ea146c0728c3481c9e8d3aaad9d4dd7607103dcfaa96da83460adbe18174ed5b71bde7b0a93d4fb52234a9ff54e3fd25c5b74790dfb090f2e59dc5907357f510cc3a0b70ccdb87aee214def794b316224f318b471ffa13b66e44b467670e881cb1628c99c048a503376d9b6d7b8eef2e7be47ff7d5c1d56221f4cf7fa2519b594cb5917815c64dc75d8d281bcc99b5a12899b08f2ca0f189857b64a1afc5963337f3dd6e79390e85221569f6dbbb13aadce06a3dfb5032f0cc454809627872cd7cd0cea5eba187723f07652c8abc3fc42bd62136fc66287f2cc19a7cb416923ad1862d7f820b55cacb65e43731cb6df780e2651e457a3438456aeeeb278ad9c0ad2e760f6c1cbe276eeb621c8a4e609b5f2d902beb3212e3e45df99497021ff536d0b56390c5d785a8bf7909f6b61bdc705d7d92ae22f58e7b075f164a0450d82d8286bf449072751636ab5185f59f518b845a75d112d6f7b65223479202cff67635e2ad88106bc8a0cc9352d87c5b182ac19a4680a958d814a093acf46730f87da0df6926291d02590f215041b44a0a1a32eeb3a52cddabc3d256689bace18a8d85e644cf9137cce3718f7caac1cb16ae06e874f4c701000000010300000200b8e3a4d9a4394bac714cb597f5ba1a81865185e35c782d0317e7abc0b52d49ff8e10f787bedf86f08148e3dbd2d2d478caa2a2893d31db7d5ee51339883fe84d3004440f16cb3797a7fab0f627d3ebd79217e995488e785cd6bb7b96b9d306f8109daa9cfc4162f9839f60fb965bcb3b56a5fa787549c153a4c80027398f73a617b90b7f24f437b140cd3ac832c0b75ec98b9423b275782988a9fd426937b8f82fbb0e88a622934643fb6335c1a080a4d13125544b04585d5f5295be7cd2c8be364246ea3d5df3e837b39a85074575a1fa2f4799050460110bdfb20795c8a9172a20f61b95e1c5c43eccd0c2c155b67385366142c63409cb3fb488e7aba6c8930f7f151abf1c24a54bd21c3f7a06856ea9db35beddecb30d2c61f533a3d0590bdbb438c6f2a2286dfc3c71b383354f0abad72771c2cc3687b50c2298783e53857cf26058ed78d0c1cf53786eb8d006a058ee3c85a7b2b836b5d03ef782709ce8f2725548e557b3de45a395a669a15f1d910e97015d22ac70020cab7e2531e8b1f739b023b49e742203e9e19a7fe0053826a9a2fe2e118d3b83498c2cb308573202ad41aa4a390aee4b6b5dd2164e5c5cd1b5f68b7d5632cf7dbb9a9139663c9aac53a74b2c6fc73cad80e228a186ba027f6f32f0182d62503e04fcced385f2e7d2e11c00940622ebd533b4d144689082f9777e5b16c36f9af9066e0ad6564d43" - blockBytes, err := hex.DecodeString(blockHex) - require.NoError(err) - - // Note: The above blockHex specifies 123 as the block's timestamp. - timestamp := time.Unix(123, 0) - durangoNotYetActivatedTime := timestamp.Add(time.Second) - durangoAlreadyActivatedTime := timestamp.Add(-time.Second) +func TestParseBytes(t *testing.T) { + chainID := ids.ID{4} + tests := []struct { + name string + hex string + expectedErr error + }{ + { + name: "duplicate extensions in certificate", + hex: "0000000000000100000000000000000000000000000000000000000000000000000000000000000000000000007b0000000000000002000004bd308204b9308202a1a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313232303830333233323835335a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100c2b2de1c16924d9b9254a0d5b80a4bc5f9beaa4f4f40a0e4efb69eb9b55d7d37f8c82328c237d7c5b451f5427b487284fa3f365f9caa53c7fcfef8d7a461d743bd7d88129f2da62b877ebe9d6feabf1bd12923e6c12321382c782fc3bb6b6cb4986a937a1edc3814f4e621e1a62053deea8c7649e43edd97ab6b56315b00d9ab5026bb9c31fb042dc574ba83c54e720e0120fcba2e8a66b77839be3ece0d4a6383ef3f76aac952b49a15b65e18674cd1340c32cecbcbaf80ae45be001366cb56836575fb0ab51ea44bf7278817e99b6b180fdd110a49831a132968489822c56692161bbd372cf89d9b8ee5a734cff15303b3a960ee78d79e76662a701941d9ec084429f26707f767e9b1d43241c0e4f96655d95c1f4f4aa00add78eff6bf0a6982766a035bf0b465786632c5bb240788ca0fdf032d8815899353ea4bec5848fd30118711e5b356bde8a0da074cc25709623225e734ff5bd0cf65c40d9fd8fccf746d8f8f35145bcebcf378d2b086e57d78b11e84f47fa467c4d037f92bff6dd4e934e0189b58193f24c4222ffb72b5c06361cf68ca64345bc3e230cc0f40063ad5f45b1659c643662996328c2eeddcd760d6f7c9cbae081ccc065844f7ea78c858564a408979764de882793706acc67d88092790dff567ed914b03355330932616a0f26f994b963791f0b1dbd8df979db86d1ea490700a3120293c3c2b10bef10203010001a33c303a300e0603551d0f0101ff0404030204b030130603551d25040c300a06082b0601050507030230130603551d25040c300a06082b06010505070302300d06092a864886f70d01010b05000382020100a21a0d73ec9ef4eb39f810557ac70b0b775772b8bae5f42c98565bc50b5b2c57317aa9cb1da12f55d0aac7bb36a00cd4fd0d7384c4efa284b53520c5a3c4b8a65240b393eeab02c802ea146c0728c3481c9e8d3aaad9d4dd7607103dcfaa96da83460adbe18174ed5b71bde7b0a93d4fb52234a9ff54e3fd25c5b74790dfb090f2e59dc5907357f510cc3a0b70ccdb87aee214def794b316224f318b471ffa13b66e44b467670e881cb1628c99c048a503376d9b6d7b8eef2e7be47ff7d5c1d56221f4cf7fa2519b594cb5917815c64dc75d8d281bcc99b5a12899b08f2ca0f189857b64a1afc5963337f3dd6e79390e85221569f6dbbb13aadce06a3dfb5032f0cc454809627872cd7cd0cea5eba187723f07652c8abc3fc42bd62136fc66287f2cc19a7cb416923ad1862d7f820b55cacb65e43731cb6df780e2651e457a3438456aeeeb278ad9c0ad2e760f6c1cbe276eeb621c8a4e609b5f2d902beb3212e3e45df99497021ff536d0b56390c5d785a8bf7909f6b61bdc705d7d92ae22f58e7b075f164a0450d82d8286bf449072751636ab5185f59f518b845a75d112d6f7b65223479202cff67635e2ad88106bc8a0cc9352d87c5b182ac19a4680a958d814a093acf46730f87da0df6926291d02590f215041b44a0a1a32eeb3a52cddabc3d256689bace18a8d85e644cf9137cce3718f7caac1cb16ae06e874f4c701000000010300000200b8e3a4d9a4394bac714cb597f5ba1a81865185e35c782d0317e7abc0b52d49ff8e10f787bedf86f08148e3dbd2d2d478caa2a2893d31db7d5ee51339883fe84d3004440f16cb3797a7fab0f627d3ebd79217e995488e785cd6bb7b96b9d306f8109daa9cfc4162f9839f60fb965bcb3b56a5fa787549c153a4c80027398f73a617b90b7f24f437b140cd3ac832c0b75ec98b9423b275782988a9fd426937b8f82fbb0e88a622934643fb6335c1a080a4d13125544b04585d5f5295be7cd2c8be364246ea3d5df3e837b39a85074575a1fa2f4799050460110bdfb20795c8a9172a20f61b95e1c5c43eccd0c2c155b67385366142c63409cb3fb488e7aba6c8930f7f151abf1c24a54bd21c3f7a06856ea9db35beddecb30d2c61f533a3d0590bdbb438c6f2a2286dfc3c71b383354f0abad72771c2cc3687b50c2298783e53857cf26058ed78d0c1cf53786eb8d006a058ee3c85a7b2b836b5d03ef782709ce8f2725548e557b3de45a395a669a15f1d910e97015d22ac70020cab7e2531e8b1f739b023b49e742203e9e19a7fe0053826a9a2fe2e118d3b83498c2cb308573202ad41aa4a390aee4b6b5dd2164e5c5cd1b5f68b7d5632cf7dbb9a9139663c9aac53a74b2c6fc73cad80e228a186ba027f6f32f0182d62503e04fcced385f2e7d2e11c00940622ebd533b4d144689082f9777e5b16c36f9af9066e0ad6564d43", + expectedErr: nil, + }, + { + name: "gibberish", + hex: "000102030405", + expectedErr: codec.ErrUnknownVersion, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) - _, err = Parse(blockBytes, durangoNotYetActivatedTime) - require.ErrorIs(err, errInvalidCertificate) + bytes, err := hex.DecodeString(test.hex) + require.NoError(err) - _, err = Parse(blockBytes, durangoAlreadyActivatedTime) - require.NoError(err) + _, err = Parse(bytes, chainID) + require.ErrorIs(err, test.expectedErr) + }) + } } func TestParseHeader(t *testing.T) { @@ -98,56 +162,3 @@ func TestParseHeader(t *testing.T) { equalHeader(require, builtHeader, parsedHeader) } - -func TestParseOption(t *testing.T) { - require := require.New(t) - - parentID := ids.ID{1} - innerBlockBytes := []byte{3} - - builtOption, err := BuildOption(parentID, innerBlockBytes) - require.NoError(err) - - builtOptionBytes := builtOption.Bytes() - - parsedOption, err := Parse(builtOptionBytes, time.Time{}) - require.NoError(err) - - equalOption(require, builtOption, parsedOption) -} - -func TestParseUnsigned(t *testing.T) { - require := require.New(t) - - parentID := ids.ID{1} - timestamp := time.Unix(123, 0) - pChainHeight := uint64(2) - innerBlockBytes := []byte{3} - - builtBlock, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) - require.NoError(err) - - builtBlockBytes := builtBlock.Bytes() - durangoTimes := []time.Time{ - timestamp.Add(time.Second), // Durango not activated yet - timestamp.Add(-time.Second), // Durango activated - } - for _, durangoTime := range durangoTimes { - parsedBlockIntf, err := Parse(builtBlockBytes, durangoTime) - require.NoError(err) - - parsedBlock, ok := parsedBlockIntf.(SignedBlock) - require.True(ok) - - equal(require, ids.Empty, builtBlock, parsedBlock) - } -} - -func TestParseGibberish(t *testing.T) { - require := require.New(t) - - bytes := []byte{0, 1, 2, 3, 4, 5} - - _, err := Parse(bytes, time.Time{}) - require.ErrorIs(err, codec.ErrUnknownVersion) -} diff --git a/vms/proposervm/block_server.go b/vms/proposervm/block_server.go deleted file mode 100644 index 6a056c8bc827..000000000000 --- a/vms/proposervm/block_server.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package proposervm - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/vms/proposervm/indexer" -) - -var _ indexer.BlockServer = (*VM)(nil) - -// Note: this is a contention heavy call that should be avoided -// for frequent/repeated indexer ops -func (vm *VM) GetFullPostForkBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { - vm.ctx.Lock.Lock() - defer vm.ctx.Lock.Unlock() - - return vm.getPostForkBlock(ctx, blkID) -} - -func (vm *VM) Commit() error { - vm.ctx.Lock.Lock() - defer vm.ctx.Lock.Unlock() - - return vm.db.Commit() -} diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index 3743cf8fa626..d55a615537d0 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -12,13 +12,14 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" @@ -45,11 +46,11 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { blkID = ids.GenerateTestID() ) - innerBlk := snowman.NewMockBlock(ctrl) + innerBlk := snowmantest.NewMockBlock(ctrl) innerBlk.EXPECT().ID().Return(blkID).AnyTimes() innerBlk.EXPECT().Height().Return(parentHeight + 1).AnyTimes() - builtBlk := snowman.NewMockBlock(ctrl) + builtBlk := snowmantest.NewMockBlock(ctrl) builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() @@ -74,6 +75,7 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { DurangoTime: time.Unix(0, 0), StakingCertLeaf: &staking.Certificate{}, StakingLeafSigner: pk, + Registerer: prometheus.NewRegistry(), }, ChainVM: innerVM, blockBuilderVM: innerBlockBuilderVM, @@ -109,7 +111,7 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(ctx)) }() @@ -118,24 +120,16 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { parentTime := time.Now().Truncate(time.Second) proVM.Set(parentTime) - coreParentBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreParentBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreParentBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreParentBlk.ID(): + switch blkID { + case coreParentBlk.ID(): return coreParentBlk, nil - case blkID == coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -144,8 +138,8 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { switch { case bytes.Equal(b, coreParentBlk.Bytes()): return coreParentBlk, nil - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -176,15 +170,7 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { }, nil } - coreChildBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreParentBlk.ID(), - HeightV: coreParentBlk.Height() + 1, - } + coreChildBlk := snowmantest.BuildChild(coreParentBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreChildBlk, nil } @@ -195,10 +181,12 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay - time.Second) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(proVM.ctx.NodeID, childBlk.(*postForkBlock).Proposer()) // signed block + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(proVM.ctx.NodeID, childBlk.Proposer()) // signed block } { @@ -207,34 +195,41 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { localTime := parentBlk.Timestamp().Add(proposer.MaxVerifyDelay) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // signed block + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } { - // Set local clock among MaxVerifyDelay and MaxBuildDelay from parent timestamp - // Check that child block is unsigned + // Set local clock between MaxVerifyDelay and MaxBuildDelay from parent + // timestamp. + // Check that child block is unsigned. localTime := parentBlk.Timestamp().Add((proposer.MaxVerifyDelay + proposer.MaxBuildDelay) / 2) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } { - // Set local clock after MaxBuildDelay from parent timestamp - // Check that child block is unsigned + // Set local clock after MaxBuildDelay from parent timestamp. + // Check that child block is unsigned. localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } } @@ -246,7 +241,7 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(ctx)) }() @@ -255,24 +250,16 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { parentTime := time.Now().Truncate(time.Second) proVM.Set(parentTime) - coreParentBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreParentBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreParentBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreParentBlk.ID(): + switch blkID { + case coreParentBlk.ID(): return coreParentBlk, nil - case blkID == coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -281,8 +268,8 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { switch { case bytes.Equal(b, coreParentBlk.Bytes()): return coreParentBlk, nil - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -315,15 +302,7 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { }, nil } - coreChildBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreParentBlk.ID(), - HeightV: coreParentBlk.Height() + 1, - } + coreChildBlk := snowmantest.BuildChild(coreParentBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreChildBlk, nil } @@ -364,10 +343,12 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { localTime := parentBlk.Timestamp().Add(proposer.MaxBuildDelay) proVM.Set(localTime) - childBlk, err := proVM.BuildBlock(ctx) + childBlkIntf, err := proVM.BuildBlock(ctx) require.NoError(err) - require.IsType(&postForkBlock{}, childBlk) - require.Equal(ids.EmptyNodeID, childBlk.(*postForkBlock).Proposer()) // unsigned so no proposer + require.IsType(&postForkBlock{}, childBlkIntf) + + childBlk := childBlkIntf.(*postForkBlock) + require.Equal(ids.EmptyNodeID, childBlk.Proposer()) // unsigned block } } @@ -387,7 +368,7 @@ func TestPostDurangoBuildChildResetScheduler(t *testing.T) { parentHeight uint64 = 1234 ) - innerBlk := snowman.NewMockBlock(ctrl) + innerBlk := snowmantest.NewMockBlock(ctrl) innerBlk.EXPECT().Height().Return(parentHeight + 1).AnyTimes() vdrState := validators.NewMockState(ctrl) @@ -407,6 +388,7 @@ func TestPostDurangoBuildChildResetScheduler(t *testing.T) { DurangoTime: time.Unix(0, 0), StakingCertLeaf: &staking.Certificate{}, StakingLeafSigner: pk, + Registerer: prometheus.NewRegistry(), }, ChainVM: block.NewMockChainVM(ctrl), ctx: &snow.Context{ @@ -414,8 +396,9 @@ func TestPostDurangoBuildChildResetScheduler(t *testing.T) { ValidatorState: vdrState, Log: logging.NoLog{}, }, - Windower: windower, - Scheduler: scheduler, + Windower: windower, + Scheduler: scheduler, + proposerBuildSlotGauge: prometheus.NewGauge(prometheus.GaugeOpts{}), } vm.Clock.Set(now) diff --git a/vms/proposervm/config.go b/vms/proposervm/config.go index a7eb4ff0db9b..296f6a60520c 100644 --- a/vms/proposervm/config.go +++ b/vms/proposervm/config.go @@ -7,6 +7,8 @@ import ( "crypto" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/staking" ) @@ -32,6 +34,9 @@ type Config struct { // Block certificate StakingCertLeaf *staking.Certificate + + // Registerer for prometheus metrics + Registerer prometheus.Registerer } func (c *Config) IsDurangoActivated(timestamp time.Time) bool { diff --git a/vms/proposervm/height_indexed_vm.go b/vms/proposervm/height_indexed_vm.go index a29334f6d8dd..49c38a7df07f 100644 --- a/vms/proposervm/height_indexed_vm.go +++ b/vms/proposervm/height_indexed_vm.go @@ -11,64 +11,12 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) const pruneCommitPeriod = 1024 -// shouldHeightIndexBeRepaired checks if index needs repairing and stores a -// checkpoint if repairing is needed. -// -// vm.ctx.Lock should be held -func (vm *VM) shouldHeightIndexBeRepaired(ctx context.Context) (bool, error) { - _, err := vm.State.GetCheckpoint() - if err != database.ErrNotFound { - return true, err - } - - // no checkpoint. Either index is complete or repair was never attempted. - // index is complete iff lastAcceptedBlock is indexed - latestProBlkID, err := vm.State.GetLastAccepted() - if err == database.ErrNotFound { - return false, nil - } - if err != nil { - return false, err - } - - lastAcceptedBlk, err := vm.getPostForkBlock(ctx, latestProBlkID) - if err != nil { - // Could not retrieve last accepted block. - return false, err - } - - _, err = vm.State.GetBlockIDAtHeight(lastAcceptedBlk.Height()) - if err != database.ErrNotFound { - return false, err - } - - // Index needs repairing. Mark the checkpoint so that, in case new blocks - // are accepted after the lock is released here but before indexing has - // started, we do not miss rebuilding the full index. - return true, vm.State.SetCheckpoint(latestProBlkID) -} - -// vm.ctx.Lock should be held -func (vm *VM) VerifyHeightIndex(context.Context) error { - if !vm.hIndexer.IsRepaired() { - return block.ErrIndexIncomplete - } - return nil -} - // vm.ctx.Lock should be held func (vm *VM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { - if !vm.hIndexer.IsRepaired() { - return ids.Empty, block.ErrIndexIncomplete - } - - // The indexer will only report that the index has been repaired if the - // underlying VM supports indexing. switch forkHeight, err := vm.State.GetForkHeight(); err { case nil: if height < forkHeight { @@ -85,32 +33,7 @@ func (vm *VM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, er } } -// As postFork blocks/options are accepted, height index is updated even if its -// repairing is ongoing. vm.ctx.Lock should be held func (vm *VM) updateHeightIndex(height uint64, blkID ids.ID) error { - _, err := vm.State.GetCheckpoint() - switch err { - case nil: - // Index rebuilding is ongoing. We can update the index with the current - // block. - - case database.ErrNotFound: - // No checkpoint means indexing has either not started or is already - // done. - if !vm.hIndexer.IsRepaired() { - return nil - } - - // Indexing must have finished. We can update the index with the current - // block. - - default: - return fmt.Errorf("failed to load index checkpoint: %w", err) - } - return vm.storeHeightEntry(height, blkID) -} - -func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { forkHeight, err := vm.State.GetForkHeight() switch err { case nil: diff --git a/vms/proposervm/indexer/block_server.go b/vms/proposervm/indexer/block_server.go deleted file mode 100644 index fcecaf9e9fcf..000000000000 --- a/vms/proposervm/indexer/block_server.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package indexer - -import ( - "context" - - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" -) - -// BlockServer represents all requests heightIndexer can issue -// against ProposerVM. All methods must be thread-safe. -type BlockServer interface { - versiondb.Commitable - - // Note: this is a contention heavy call that should be avoided - // for frequent/repeated indexer ops - GetFullPostForkBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) -} diff --git a/vms/proposervm/indexer/block_server_test.go b/vms/proposervm/indexer/block_server_test.go deleted file mode 100644 index a973d66a05a9..000000000000 --- a/vms/proposervm/indexer/block_server_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package indexer - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" -) - -var ( - errGetWrappingBlk = errors.New("unexpectedly called GetWrappingBlk") - errCommit = errors.New("unexpectedly called Commit") - - _ BlockServer = (*TestBlockServer)(nil) -) - -// TestBatchedVM is a BatchedVM that is useful for testing. -type TestBlockServer struct { - T *testing.T - - CantGetFullPostForkBlock bool - CantCommit bool - - GetFullPostForkBlockF func(ctx context.Context, blkID ids.ID) (snowman.Block, error) - CommitF func() error -} - -func (tsb *TestBlockServer) GetFullPostForkBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { - if tsb.GetFullPostForkBlockF != nil { - return tsb.GetFullPostForkBlockF(ctx, blkID) - } - if tsb.CantGetFullPostForkBlock && tsb.T != nil { - require.FailNow(tsb.T, errGetWrappingBlk.Error()) - } - return nil, errGetWrappingBlk -} - -func (tsb *TestBlockServer) Commit() error { - if tsb.CommitF != nil { - return tsb.CommitF() - } - if tsb.CantCommit && tsb.T != nil { - require.FailNow(tsb.T, errCommit.Error()) - } - return errCommit -} diff --git a/vms/proposervm/indexer/height_indexer.go b/vms/proposervm/indexer/height_indexer.go deleted file mode 100644 index c0a1e4155b3b..000000000000 --- a/vms/proposervm/indexer/height_indexer.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package indexer - -import ( - "context" - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/proposervm/state" -) - -// default number of heights to index before committing -const ( - defaultCommitFrequency = 1024 - // Sleep [sleepDurationMultiplier]x (10x) the amount of time we spend - // processing the block to ensure the async indexing does not bottleneck the - // node. - sleepDurationMultiplier = 10 -) - -var _ HeightIndexer = (*heightIndexer)(nil) - -type HeightIndexer interface { - // Returns whether the height index is fully repaired. - IsRepaired() bool - - // MarkRepaired atomically sets the indexing repaired state. - MarkRepaired(isRepaired bool) - - // Resumes repairing of the height index from the checkpoint. - RepairHeightIndex(context.Context) error -} - -func NewHeightIndexer( - server BlockServer, - log logging.Logger, - indexState state.State, -) HeightIndexer { - return newHeightIndexer(server, log, indexState) -} - -func newHeightIndexer( - server BlockServer, - log logging.Logger, - indexState state.State, -) *heightIndexer { - return &heightIndexer{ - server: server, - log: log, - state: indexState, - commitFrequency: defaultCommitFrequency, - } -} - -type heightIndexer struct { - server BlockServer - log logging.Logger - - jobDone utils.Atomic[bool] - state state.State - - commitFrequency int -} - -func (hi *heightIndexer) IsRepaired() bool { - return hi.jobDone.Get() -} - -func (hi *heightIndexer) MarkRepaired(repaired bool) { - hi.jobDone.Set(repaired) -} - -// RepairHeightIndex ensures the height -> proBlkID height block index is well formed. -// Starting from the checkpoint, it will go back to snowman++ activation fork -// or genesis. PreFork blocks will be handled by innerVM height index. -// RepairHeightIndex can take a non-trivial time to complete; hence we make sure -// the process has limited memory footprint, can be resumed from periodic checkpoints -// and works asynchronously without blocking the VM. -func (hi *heightIndexer) RepairHeightIndex(ctx context.Context) error { - startBlkID, err := hi.state.GetCheckpoint() - if err == database.ErrNotFound { - hi.MarkRepaired(true) - return nil // nothing to do - } - if err != nil { - return err - } - - // retrieve checkpoint height. We explicitly track block height - // in doRepair to avoid heavier DB reads. - startBlk, err := hi.server.GetFullPostForkBlock(ctx, startBlkID) - if err != nil { - return err - } - - startHeight := startBlk.Height() - if err := hi.doRepair(ctx, startBlkID, startHeight); err != nil { - return fmt.Errorf("could not repair height index: %w", err) - } - if err := hi.flush(); err != nil { - return fmt.Errorf("could not write final height index update: %w", err) - } - return nil -} - -// if height index needs repairing, doRepair would do that. It -// iterates back via parents, checking and rebuilding height indexing. -// Note: batch commit is deferred to doRepair caller -func (hi *heightIndexer) doRepair(ctx context.Context, currentProBlkID ids.ID, lastIndexedHeight uint64) error { - var ( - start = time.Now() - lastLogTime = start - indexedBlks int - lastIndexedBlks int - ) - for { - if err := ctx.Err(); err != nil { - return err - } - - processingStart := time.Now() - currentAcceptedBlk, _, err := hi.state.GetBlock(currentProBlkID) - if err == database.ErrNotFound { - // We have visited all the proposerVM blocks. Because we previously - // verified that we needed to perform a repair, we know that this - // will not happen on the first iteration. This guarantees that - // forkHeight will be correctly initialized. - forkHeight := lastIndexedHeight + 1 - if err := hi.state.SetForkHeight(forkHeight); err != nil { - return err - } - if err := hi.state.DeleteCheckpoint(); err != nil { - return err - } - hi.MarkRepaired(true) - - // it will commit on exit - hi.log.Info("indexing finished", - zap.Int("numIndexedBlocks", indexedBlks), - zap.Duration("duration", time.Since(start)), - zap.Uint64("forkHeight", forkHeight), - ) - return nil - } - if err != nil { - return err - } - - // Keep memory footprint under control by committing when a size threshold is reached - if indexedBlks-lastIndexedBlks > hi.commitFrequency { - // Note: checkpoint must be the lowest block in the batch. This ensures that - // checkpoint is the highest un-indexed block from which process would restart. - if err := hi.state.SetCheckpoint(currentProBlkID); err != nil { - return err - } - - if err := hi.flush(); err != nil { - return err - } - - hi.log.Debug("indexed blocks", - zap.Int("numIndexBlocks", indexedBlks), - ) - lastIndexedBlks = indexedBlks - } - - // Rebuild height block index. - if err := hi.state.SetBlockIDAtHeight(lastIndexedHeight, currentProBlkID); err != nil { - return err - } - - // Periodically log progress - indexedBlks++ - now := time.Now() - if now.Sub(lastLogTime) > 15*time.Second { - lastLogTime = now - hi.log.Info("indexed blocks", - zap.Int("numIndexBlocks", indexedBlks), - zap.Uint64("lastIndexedHeight", lastIndexedHeight), - ) - } - - // keep checking the parent - currentProBlkID = currentAcceptedBlk.ParentID() - lastIndexedHeight-- - - processingDuration := time.Since(processingStart) - // Sleep [sleepDurationMultiplier]x (5x) the amount of time we spend processing the block - // to ensure the indexing does not bottleneck the node. - time.Sleep(processingDuration * sleepDurationMultiplier) - } -} - -// flush writes the commits to the underlying DB -func (hi *heightIndexer) flush() error { - if err := hi.state.Commit(); err != nil { - return err - } - return hi.server.Commit() -} diff --git a/vms/proposervm/indexer/height_indexer_test.go b/vms/proposervm/indexer/height_indexer_test.go deleted file mode 100644 index 2a093530048a..000000000000 --- a/vms/proposervm/indexer/height_indexer_test.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package indexer - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/proposervm/block" - "github.com/ava-labs/avalanchego/vms/proposervm/state" -) - -func TestHeightBlockIndexPostFork(t *testing.T) { - require := require.New(t) - - db := memdb.New() - vdb := versiondb.New(db) - storedState := state.New(vdb) - - // Build a chain of post fork blocks - var ( - blkNumber = uint64(10) - lastBlkID = ids.Empty.Prefix(0) // initially set to a dummyGenesisID - proBlks = make(map[ids.ID]snowman.Block) - ) - - for blkHeight := uint64(1); blkHeight <= blkNumber; blkHeight++ { - blockBytes := ids.Empty.Prefix(blkHeight + blkNumber + 1) - dummyTS := time.Time{} - dummyPCH := uint64(2022) - - // store postForkStatelessBlk in State ... - postForkStatelessBlk, err := block.BuildUnsigned( - lastBlkID, - dummyTS, - dummyPCH, - blockBytes[:], - ) - require.NoError(err) - require.NoError(storedState.PutBlock(postForkStatelessBlk, choices.Accepted)) - - // ... and create a corresponding test block just for block server - postForkBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: postForkStatelessBlk.ID(), - StatusV: choices.Accepted, - }, - HeightV: blkHeight, - } - proBlks[postForkBlk.ID()] = postForkBlk - - lastBlkID = postForkStatelessBlk.ID() - } - - blkSrv := &TestBlockServer{ - CantGetFullPostForkBlock: true, - CantCommit: true, - - GetFullPostForkBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - blk, found := proBlks[blkID] - if !found { - return nil, database.ErrNotFound - } - return blk, nil - }, - CommitF: func() error { - return nil - }, - } - - hIndex := newHeightIndexer(blkSrv, - logging.NoLog{}, - storedState, - ) - hIndex.commitFrequency = 0 // commit each block - - // checkpoint last accepted block and show the whole chain in reindexed - require.NoError(hIndex.state.SetCheckpoint(lastBlkID)) - require.NoError(hIndex.RepairHeightIndex(context.Background())) - require.True(hIndex.IsRepaired()) - - // check that height index is fully built - loadedForkHeight, err := storedState.GetForkHeight() - require.NoError(err) - require.Equal(uint64(1), loadedForkHeight) - for height := uint64(1); height <= blkNumber; height++ { - _, err := storedState.GetBlockIDAtHeight(height) - require.NoError(err) - } -} - -func TestHeightBlockIndexAcrossFork(t *testing.T) { - require := require.New(t) - - db := memdb.New() - vdb := versiondb.New(db) - storedState := state.New(vdb) - - // Build a chain of post fork blocks - var ( - blkNumber = uint64(10) - forkHeight = blkNumber / 2 - lastBlkID = ids.Empty.Prefix(0) // initially set to a last pre fork blk - proBlks = make(map[ids.ID]snowman.Block) - ) - - for blkHeight := forkHeight; blkHeight <= blkNumber; blkHeight++ { - blockBytes := ids.Empty.Prefix(blkHeight + blkNumber + 1) - dummyTS := time.Time{} - dummyPCH := uint64(2022) - - // store postForkStatelessBlk in State ... - postForkStatelessBlk, err := block.BuildUnsigned( - lastBlkID, - dummyTS, - dummyPCH, - blockBytes[:], - ) - require.NoError(err) - require.NoError(storedState.PutBlock(postForkStatelessBlk, choices.Accepted)) - - // ... and create a corresponding test block just for block server - postForkBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: postForkStatelessBlk.ID(), - StatusV: choices.Accepted, - }, - HeightV: blkHeight, - } - proBlks[postForkBlk.ID()] = postForkBlk - - lastBlkID = postForkStatelessBlk.ID() - } - - blkSrv := &TestBlockServer{ - CantGetFullPostForkBlock: true, - CantCommit: true, - - GetFullPostForkBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - blk, found := proBlks[blkID] - if !found { - return nil, database.ErrNotFound - } - return blk, nil - }, - CommitF: func() error { - return nil - }, - } - - hIndex := newHeightIndexer(blkSrv, - logging.NoLog{}, - storedState, - ) - hIndex.commitFrequency = 0 // commit each block - - // checkpoint last accepted block and show the whole chain in reindexed - require.NoError(hIndex.state.SetCheckpoint(lastBlkID)) - require.NoError(hIndex.RepairHeightIndex(context.Background())) - require.True(hIndex.IsRepaired()) - - // check that height index is fully built - loadedForkHeight, err := storedState.GetForkHeight() - require.NoError(err) - require.Equal(forkHeight, loadedForkHeight) - for height := uint64(0); height < forkHeight; height++ { - _, err := storedState.GetBlockIDAtHeight(height) - require.ErrorIs(err, database.ErrNotFound) - } - for height := forkHeight; height <= blkNumber; height++ { - _, err := storedState.GetBlockIDAtHeight(height) - require.NoError(err) - } -} - -func TestHeightBlockIndexResumeFromCheckPoint(t *testing.T) { - require := require.New(t) - - db := memdb.New() - vdb := versiondb.New(db) - storedState := state.New(vdb) - - // Build a chain of post fork blocks - var ( - blkNumber = uint64(10) - forkHeight = blkNumber / 2 - lastBlkID = ids.Empty.Prefix(0) // initially set to a last pre fork blk - proBlks = make(map[ids.ID]snowman.Block) - ) - - for blkHeight := forkHeight; blkHeight <= blkNumber; blkHeight++ { - blockBytes := ids.Empty.Prefix(blkHeight + blkNumber + 1) - dummyTS := time.Time{} - dummyPCH := uint64(2022) - - // store postForkStatelessBlk in State ... - postForkStatelessBlk, err := block.BuildUnsigned( - lastBlkID, - dummyTS, - dummyPCH, - blockBytes[:], - ) - require.NoError(err) - require.NoError(storedState.PutBlock(postForkStatelessBlk, choices.Accepted)) - - // ... and create a corresponding test block just for block server - postForkBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: postForkStatelessBlk.ID(), - StatusV: choices.Accepted, - }, - HeightV: blkHeight, - } - proBlks[postForkBlk.ID()] = postForkBlk - - lastBlkID = postForkStatelessBlk.ID() - } - - blkSrv := &TestBlockServer{ - CantGetFullPostForkBlock: true, - CantCommit: true, - - GetFullPostForkBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - blk, found := proBlks[blkID] - if !found { - return nil, database.ErrNotFound - } - return blk, nil - }, - CommitF: func() error { - return nil - }, - } - - hIndex := newHeightIndexer(blkSrv, - logging.NoLog{}, - storedState, - ) - hIndex.commitFrequency = 0 // commit each block - - // pick a random block in the chain and checkpoint it;... - rndPostForkHeight := rand.Intn(int(blkNumber-forkHeight)) + int(forkHeight) // #nosec G404 - var checkpointBlk snowman.Block - for _, blk := range proBlks { - if blk.Height() != uint64(rndPostForkHeight) { - continue // not the blk we are looking for - } - - checkpointBlk = blk - require.NoError(hIndex.state.SetCheckpoint(checkpointBlk.ID())) - break - } - - // perform repair and show index is built - require.NoError(hIndex.RepairHeightIndex(context.Background())) - require.True(hIndex.IsRepaired()) - - // check that height index is fully built - loadedForkHeight, err := storedState.GetForkHeight() - require.NoError(err) - require.Equal(forkHeight, loadedForkHeight) - for height := forkHeight; height <= checkpointBlk.Height(); height++ { - _, err := storedState.GetBlockIDAtHeight(height) - require.NoError(err) - } -} diff --git a/vms/proposervm/post_fork_block.go b/vms/proposervm/post_fork_block.go index 707b6dc327c7..2c875807eb79 100644 --- a/vms/proposervm/post_fork_block.go +++ b/vms/proposervm/post_fork_block.go @@ -17,6 +17,11 @@ var _ PostForkBlock = (*postForkBlock)(nil) type postForkBlock struct { block.SignedBlock postForkCommonComponents + + // slot of the proposer that produced this block. + // It is populated in verifyPostDurangoBlockDelay. + // It is used to report metrics during Accept. + slot *uint64 } // Accept: @@ -27,7 +32,13 @@ func (b *postForkBlock) Accept(ctx context.Context) error { if err := b.acceptOuterBlk(); err != nil { return err } - return b.acceptInnerBlk(ctx) + if err := b.acceptInnerBlk(ctx); err != nil { + return err + } + if b.slot != nil { + b.vm.acceptedBlocksSlotHistogram.Observe(float64(*b.slot)) + } + return nil } func (b *postForkBlock) acceptOuterBlk() error { diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index a16d4a7d6219..416fc087b101 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/block" @@ -31,7 +32,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // setup proBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ - innerBlk: &snowman.TestBlock{}, + innerBlk: snowmantest.BuildChild(snowmantest.Genesis), }, } @@ -44,31 +45,17 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - _, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + _, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() + innerTestBlock := snowmantest.BuildChild(snowmantest.Genesis) innerOracleBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - }, - BytesV: []byte{1}, - }, + Block: *innerTestBlock, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - }, - BytesV: []byte{2}, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - }, - BytesV: []byte{3}, - }, + snowmantest.BuildChild(innerTestBlock), + snowmantest.BuildChild(innerTestBlock), }, } @@ -104,7 +91,7 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime // pre Durango ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -115,22 +102,14 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { } // create parent block ... - parentCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + parentCoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case parentCoreBlk.ID(): return parentCoreBlk, nil default: @@ -139,8 +118,8 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, parentCoreBlk.Bytes()): return parentCoreBlk, nil default: @@ -155,11 +134,7 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // .. create child block ... - childCoreBlk := &snowman.TestBlock{ - ParentV: parentCoreBlk.ID(), - BytesV: []byte{2}, - HeightV: parentCoreBlk.Height() + 1, - } + childCoreBlk := snowmantest.BuildChild(parentCoreBlk) childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, @@ -208,7 +183,7 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime // post Durango ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -218,22 +193,14 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { return pChainHeight, nil } - parentCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + parentCoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case parentCoreBlk.ID(): return parentCoreBlk, nil default: @@ -242,8 +209,8 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, parentCoreBlk.Bytes()): return parentCoreBlk, nil default: @@ -257,11 +224,7 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { require.NoError(parentBlk.Verify(context.Background())) require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) - childCoreBlk := &snowman.TestBlock{ - ParentV: parentCoreBlk.ID(), - BytesV: []byte{2}, - HeightV: parentCoreBlk.Height() + 1, - } + childCoreBlk := snowmantest.BuildChild(parentCoreBlk) childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, @@ -317,7 +280,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -347,22 +310,14 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { } // create parent block ... - parentCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + parentCoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case parentCoreBlk.ID(): return parentCoreBlk, nil default: @@ -371,8 +326,8 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, parentCoreBlk.Bytes()): return parentCoreBlk, nil default: @@ -391,15 +346,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { parentPChainHeight = parentBlk.(*postForkBlock).PChainHeight() ) - childCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - ParentV: parentCoreBlk.ID(), - HeightV: parentCoreBlk.Height() + 1, - BytesV: []byte{2}, - } + childCoreBlk := snowmantest.BuildChild(parentCoreBlk) childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, @@ -538,7 +485,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -552,22 +499,14 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { } // create parent block ... - parentCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + parentCoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case parentCoreBlk.ID(): return parentCoreBlk, nil default: @@ -576,8 +515,8 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, parentCoreBlk.Bytes()): return parentCoreBlk, nil default: @@ -596,15 +535,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { parentBlkPChainHeight := parentBlk.(*postForkBlock).PChainHeight() require.NoError(waitForProposerWindow(proVM, parentBlk, parentBlkPChainHeight)) - childCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - ParentV: parentCoreBlk.ID(), - HeightV: parentBlk.Height() + 1, - BytesV: []byte{2}, - } + childCoreBlk := snowmantest.BuildChild(parentCoreBlk) childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, @@ -709,7 +640,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -723,36 +654,14 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } // create post fork oracle block ... + innerTestBlock := snowmantest.BuildChild(snowmantest.Genesis) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, + Block: *innerTestBlock, } + preferredOracleBlkChild := snowmantest.BuildChild(innerTestBlock) oracleCoreBlk.opts = [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - }, + preferredOracleBlkChild, + snowmantest.BuildChild(innerTestBlock), } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -760,8 +669,8 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -774,8 +683,8 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, oracleCoreBlk.Bytes()): return oracleCoreBlk, nil case bytes.Equal(b, oracleCoreBlk.opts[0].Bytes()): @@ -810,15 +719,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) parentBlkPChainHeight := postForkOracleBlk.PChainHeight() // option takes proposal blocks' Pchain height - childCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - ParentV: oracleCoreBlk.opts[0].ID(), - BytesV: []byte{2}, - HeightV: oracleCoreBlk.opts[0].Height() + 1, - } + childCoreBlk := snowmantest.BuildChild(preferredOracleBlkChild) childBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ vm: proVM, @@ -909,7 +810,7 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -919,22 +820,14 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { return pChainHeight, nil } - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -943,8 +836,8 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil default: @@ -976,7 +869,7 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -986,22 +879,14 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { return pChainHeight, nil } - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -1010,8 +895,8 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil default: @@ -1029,7 +914,7 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { if coreBlk.Status() == choices.Accepted { return coreBlk.ID(), nil } - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } acceptedID, err := proVM.LastAccepted(context.Background()) require.NoError(err) @@ -1043,7 +928,7 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1054,20 +939,12 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t } // generate two blocks with the same core block and store them - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } - minimumHeight = coreGenBlk.Height() + minimumHeight = snowmantest.GenesisHeight proBlk1, err := proVM.BuildBlock(context.Background()) require.NoError(err) @@ -1094,20 +971,12 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -1130,44 +999,19 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create post fork oracle block ... + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, - } - coreOpt0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - } - coreOpt1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, + Block: *coreTestBlk, + opts: [2]snowman.Block{ + snowmantest.BuildChild(coreTestBlk), + snowmantest.BuildChild(coreTestBlk), }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - } - oracleCoreBlk.opts = [2]snowman.Block{ - coreOpt0, - coreOpt1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -1175,8 +1019,8 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -1189,8 +1033,8 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, oracleCoreBlk.Bytes()): return oracleCoreBlk, nil case bytes.Equal(b, oracleCoreBlk.opts[0].Bytes()): @@ -1248,25 +1092,16 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 5) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 5) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } - + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -1275,8 +1110,8 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil default: @@ -1285,8 +1120,8 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { } statelessChild, err := block.BuildUnsigned( - coreGenBlk.ID(), - coreGenBlk.Timestamp(), + snowmantest.GenesisID, + snowmantest.GenesisTimestamp, 4, coreBlk.Bytes(), ) diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index dd16f8cdb518..43b7d5f5b90f 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -16,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) @@ -23,7 +25,7 @@ import ( var _ snowman.OracleBlock = (*TestOptionsBlock)(nil) type TestOptionsBlock struct { - snowman.TestBlock + snowmantest.Block opts [2]snowman.Block optsErr error } @@ -40,41 +42,19 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create post fork oracle block ... + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) + preferredBlk := snowmantest.BuildChild(coreTestBlk) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, - } - oracleCoreBlk.opts = [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, + Block: *coreTestBlk, + opts: [2]snowman.Block{ + preferredBlk, + snowmantest.BuildChild(coreTestBlk), }, } @@ -83,8 +63,8 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -97,8 +77,8 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, oracleCoreBlk.Bytes()): return oracleCoreBlk, nil case bytes.Equal(b, oracleCoreBlk.opts[0].Bytes()): @@ -130,15 +110,7 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { // show we can build on options require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) - childCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(4444), - StatusV: choices.Processing, - }, - ParentV: oracleCoreBlk.opts[0].ID(), - BytesV: []byte{4}, - HeightV: oracleCoreBlk.opts[0].Height() + 1, - } + childCoreBlk := snowmantest.BuildChild(preferredBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return childCoreBlk, nil } @@ -159,44 +131,21 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create post fork oracle block ... + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) + coreOpt0 := snowmantest.BuildChild(coreTestBlk) + coreOpt1 := snowmantest.BuildChild(coreTestBlk) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, - } - coreOpt0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - } - coreOpt1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, + Block: *coreTestBlk, + opts: [2]snowman.Block{ + coreOpt0, + coreOpt1, }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - } - oracleCoreBlk.opts = [2]snowman.Block{ - coreOpt0, - coreOpt1, } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -204,8 +153,8 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -218,8 +167,8 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, oracleCoreBlk.Bytes()): return oracleCoreBlk, nil case bytes.Equal(b, oracleCoreBlk.opts[0].Bytes()): @@ -264,41 +213,18 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create post fork oracle block ... + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, - } - oracleCoreBlk.opts = [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, + Block: *coreTestBlk, + opts: [2]snowman.Block{ + snowmantest.BuildChild(coreTestBlk), + snowmantest.BuildChild(coreTestBlk), }, } @@ -307,8 +233,8 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -321,8 +247,8 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, oracleCoreBlk.Bytes()): return oracleCoreBlk, nil case bytes.Equal(b, oracleCoreBlk.opts[0].Bytes()): @@ -344,7 +270,7 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { if oracleCoreBlk.Status() == choices.Accepted { return oracleCoreBlk.ID(), nil } - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } acceptedID, err := proVM.LastAccepted(context.Background()) require.NoError(err) @@ -377,41 +303,18 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create post fork oracle block ... + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, - } - oracleCoreBlk.opts = [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - HeightV: oracleCoreBlk.Height() + 1, + Block: *coreTestBlk, + opts: [2]snowman.Block{ + snowmantest.BuildChild(coreTestBlk), + snowmantest.BuildChild(coreTestBlk), }, } @@ -420,8 +323,8 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -434,8 +337,8 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, oracleCoreBlk.Bytes()): return oracleCoreBlk, nil case bytes.Equal(b, oracleCoreBlk.opts[0].Bytes()): @@ -482,41 +385,26 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) coreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, + Block: *coreTestBlk, optsErr: snowman.ErrNotOracle, } - coreChildBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk.ID(), - HeightV: coreBlk.Height() + 1, - } + coreChildBlk := snowmantest.BuildChild(coreTestBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil case coreChildBlk.ID(): @@ -527,8 +415,8 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil case bytes.Equal(b, coreChildBlk.Bytes()): @@ -570,44 +458,20 @@ func TestOptionTimestampValidity(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, db := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, db := initTestProposerVM(t, activationTime, durangoTime, 0) - coreOracleBlkID := ids.GenerateTestID() + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) coreOracleBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: coreOracleBlkID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - }, + Block: *coreTestBlk, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreOracleBlkID, - HeightV: coreGenBlk.Height() + 2, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreOracleBlkID, - HeightV: coreGenBlk.Height() + 2, - }, + snowmantest.BuildChild(coreTestBlk), + snowmantest.BuildChild(coreTestBlk), }, } oracleBlkTime := proVM.Time().Truncate(time.Second) statelessBlock, err := block.BuildUnsigned( - coreGenBlk.ID(), + snowmantest.GenesisID, oracleBlkTime, 0, coreOracleBlk.Bytes(), @@ -616,8 +480,8 @@ func TestOptionTimestampValidity(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreOracleBlk.ID(): return coreOracleBlk, nil case coreOracleBlk.opts[0].ID(): @@ -630,8 +494,8 @@ func TestOptionTimestampValidity(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreOracleBlk.Bytes()): return coreOracleBlk, nil case bytes.Equal(b, coreOracleBlk.opts[0].Bytes()): @@ -685,6 +549,7 @@ func TestOptionTimestampValidity(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -707,8 +572,8 @@ func TestOptionTimestampValidity(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreOracleBlk.ID(): return coreOracleBlk, nil case coreOracleBlk.opts[0].ID(): @@ -721,8 +586,8 @@ func TestOptionTimestampValidity(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreOracleBlk.Bytes()): return coreOracleBlk, nil case bytes.Equal(b, coreOracleBlk.opts[0].Bytes()): diff --git a/vms/proposervm/pre_fork_block.go b/vms/proposervm/pre_fork_block.go index 199c1c98db7d..737659cacc05 100644 --- a/vms/proposervm/pre_fork_block.go +++ b/vms/proposervm/pre_fork_block.go @@ -5,18 +5,24 @@ package proposervm import ( "context" + "errors" "fmt" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var _ Block = (*preForkBlock)(nil) +var ( + _ Block = (*preForkBlock)(nil) + + errChildOfPreForkBlockHasProposer = errors.New("child of pre-fork block has proposer") +) type preForkBlock struct { snowman.Block @@ -39,7 +45,7 @@ func (b *preForkBlock) acceptInnerBlk(ctx context.Context) error { } func (b *preForkBlock) Status() choices.Status { - forkHeight, err := b.vm.getForkHeight() + forkHeight, err := b.vm.GetForkHeight() if err == database.ErrNotFound { return b.Block.Status() } @@ -167,8 +173,8 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB } // Verify the lack of signature on the node - if err := child.SignedBlock.Verify(false, b.vm.ctx.ChainID); err != nil { - return err + if child.SignedBlock.Proposer() != ids.EmptyNodeID { + return errChildOfPreForkBlockHasProposer } // Verify the inner block and track it as verified @@ -248,6 +254,7 @@ func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { zap.Stringer("blkID", blk.ID()), zap.Stringer("innerBlkID", innerBlock.ID()), zap.Uint64("height", blk.Height()), + zap.Uint64("pChainHeight", pChainHeight), zap.Time("parentTimestamp", parentTimestamp), zap.Time("blockTimestamp", newTimestamp)) return blk, nil diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index 3261f5f9ee9a..892209781e8c 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" @@ -30,7 +31,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // setup proBlk := preForkBlock{ - Block: &snowman.TestBlock{}, + Block: snowmantest.BuildChild(snowmantest.Genesis), } // test @@ -54,38 +55,19 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create pre fork oracle block ... + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) + preferredTestBlk := snowmantest.BuildChild(coreTestBlk) oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - }, - } - oracleCoreBlk.opts = [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), + Block: *coreTestBlk, + opts: [2]snowman.Block{ + preferredTestBlk, + snowmantest.BuildChild(coreTestBlk), }, } @@ -94,8 +76,8 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -121,14 +103,7 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(4444), - StatusV: choices.Processing, - }, - BytesV: []byte{4}, - ParentV: oracleCoreBlk.opts[0].ID(), - }, + Block: *snowmantest.BuildChild(preferredTestBlk), } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return lastCoreBlk, nil @@ -143,46 +118,30 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { require := require.New(t) var ( - activationTime = genesisTimestamp.Add(10 * time.Second) + activationTime = snowmantest.GenesisTimestamp.Add(10 * time.Second) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create pre fork oracle block pre activation time... - oracleCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: activationTime.Add(-1 * time.Second), - }, - } + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) + coreTestBlk.TimestampV = activationTime.Add(-1 * time.Second) // ... whose options are post activation time - oracleCoreBlk.opts = [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: oracleCoreBlk.ID(), - TimestampV: activationTime.Add(time.Second), - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(3333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: oracleCoreBlk.ID(), - TimestampV: activationTime.Add(time.Second), + preferredBlk := snowmantest.BuildChild(coreTestBlk) + preferredBlk.TimestampV = activationTime.Add(time.Second) + + unpreferredBlk := snowmantest.BuildChild(coreTestBlk) + unpreferredBlk.TimestampV = activationTime.Add(time.Second) + + oracleCoreBlk := &TestOptionsBlock{ + Block: *coreTestBlk, + opts: [2]snowman.Block{ + preferredBlk, + unpreferredBlk, }, } @@ -191,8 +150,8 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case oracleCoreBlk.ID(): return oracleCoreBlk, nil case oracleCoreBlk.opts[0].ID(): @@ -218,14 +177,7 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(4444), - StatusV: choices.Processing, - }, - BytesV: []byte{4}, - ParentV: oracleCoreBlk.opts[0].ID(), - }, + Block: *snowmantest.BuildChild(preferredBlk), } coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return lastCoreBlk, nil @@ -240,33 +192,23 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { require := require.New(t) var ( - activationTime = genesisTimestamp.Add(10 * time.Second) + activationTime = snowmantest.GenesisTimestamp.Add(10 * time.Second) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - require.True(coreGenBlk.Timestamp().Before(activationTime)) - // create parent block ... - parentCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), - } + parentCoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return parentCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case parentCoreBlk.ID(): return parentCoreBlk, nil default: @@ -275,8 +217,8 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, parentCoreBlk.Bytes()): return parentCoreBlk, nil default: @@ -288,14 +230,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { require.NoError(err) // .. create child block ... - childCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - TimestampV: parentCoreBlk.Timestamp(), - } + childCoreBlk := snowmantest.BuildChild(parentCoreBlk) childBlk := preForkBlock{ Block: childCoreBlk, vm: proVM, @@ -319,28 +254,19 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { require := require.New(t) var ( - activationTime = genesisTimestamp.Add(10 * time.Second) + activationTime = snowmantest.GenesisTimestamp.Add(10 * time.Second) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - require.True(coreGenBlk.Timestamp().Before(activationTime)) preActivationTime := activationTime.Add(-1 * time.Second) proVM.Set(preActivationTime) - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: preActivationTime, - VerifyV: nil, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) + coreBlk.TimestampV = preActivationTime coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -354,7 +280,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { // postFork block does NOT verify if parent is before fork activation time postForkStatelessChild, err := statelessblock.Build( - coreGenBlk.ID(), + snowmantest.GenesisID, coreBlk.Timestamp(), 0, // pChainHeight proVM.StakingCertLeaf, @@ -385,22 +311,15 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } require.NoError(proVM.SetPreference(context.Background(), preForkChild.ID())) - secondCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - }, - BytesV: []byte{2}, - ParentV: coreBlk.ID(), - TimestampV: postActivationTime, - VerifyV: nil, - } + secondCoreBlk := snowmantest.BuildChild(coreBlk) + secondCoreBlk.TimestampV = postActivationTime coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return secondCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -416,22 +335,14 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { require.NoError(lastPreForkBlk.Verify(context.Background())) require.NoError(proVM.SetPreference(context.Background(), lastPreForkBlk.ID())) - thirdCoreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), - }, - BytesV: []byte{3}, - ParentV: secondCoreBlk.ID(), - TimestampV: postActivationTime, - VerifyV: nil, - } + thirdCoreBlk := snowmantest.BuildChild(secondCoreBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return thirdCoreBlk, nil } coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil case secondCoreBlk.ID(): @@ -453,26 +364,17 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { require := require.New(t) var ( - activationTime = genesisTimestamp.Add(-1 * time.Second) + activationTime = snowmantest.GenesisTimestamp.Add(-1 * time.Second) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) proVM.Set(activationTime) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // build parent block after fork activation time ... - coreBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), - VerifyV: nil, - } + coreBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlock, nil } @@ -501,26 +403,19 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -529,8 +424,8 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil default: @@ -548,7 +443,7 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { if coreBlk.Status() == choices.Accepted { return coreBlk.ID(), nil } - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } acceptedID, err := proVM.LastAccepted(context.Background()) require.NoError(err) @@ -563,20 +458,12 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -595,55 +482,31 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { require := require.New(t) var ( - activationTime = genesisTimestamp.Add(10 * time.Second) + activationTime = snowmantest.GenesisTimestamp.Add(10 * time.Second) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - require.True(coreGenBlk.Timestamp().Before(activationTime)) postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) - coreBlkID := ids.GenerateTestID() + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) + coreTestBlk.TimestampV = postActivationTime coreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: coreBlkID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: postActivationTime, - }, + Block: *coreTestBlk, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlkID, - TimestampV: postActivationTime, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlkID, - TimestampV: postActivationTime, - }, + snowmantest.BuildChild(coreTestBlk), + snowmantest.BuildChild(coreTestBlk), }, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil case coreBlk.opts[0].ID(): @@ -656,8 +519,8 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil case bytes.Equal(b, coreBlk.opts[0].Bytes()): @@ -689,55 +552,31 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { require := require.New(t) var ( - activationTime = genesisTimestamp.Add(10 * time.Second) + activationTime = snowmantest.GenesisTimestamp.Add(10 * time.Second) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - require.True(coreGenBlk.Timestamp().Before(activationTime)) postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) - coreBlkID := ids.GenerateTestID() + coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) + coreTestBlk.TimestampV = postActivationTime coreBlk := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: coreBlkID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: postActivationTime, - }, + Block: *coreTestBlk, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlkID, - TimestampV: postActivationTime, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlkID, - TimestampV: postActivationTime, - }, + snowmantest.BuildChild(coreTestBlk), + snowmantest.BuildChild(coreTestBlk), }, } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil case coreBlk.opts[0].ID(): @@ -750,8 +589,8 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil case bytes.Equal(b, coreBlk.opts[0].Bytes()): @@ -798,10 +637,10 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { pChainHeight := uint64(1337) blkID := ids.GenerateTestID() - innerBlk := snowman.NewMockBlock(ctrl) + innerBlk := snowmantest.NewMockBlock(ctrl) innerBlk.EXPECT().ID().Return(blkID).AnyTimes() innerBlk.EXPECT().Timestamp().Return(mockable.MaxTime) - builtBlk := snowman.NewMockBlock(ctrl) + builtBlk := snowmantest.NewMockBlock(ctrl) builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() diff --git a/vms/proposervm/proposer/windower.go b/vms/proposervm/proposer/windower.go index b9a633c702c0..ae79aecafcd5 100644 --- a/vms/proposervm/proposer/windower.go +++ b/vms/proposervm/proposer/windower.go @@ -6,7 +6,6 @@ package proposer import ( "context" "errors" - "fmt" "math/bits" "time" @@ -37,7 +36,8 @@ const ( var ( _ Windower = (*windower)(nil) - ErrAnyoneCanPropose = errors.New("anyone can propose") + ErrAnyoneCanPropose = errors.New("anyone can propose") + ErrUnexpectedSamplerFailure = errors.New("unexpected sampler failure") ) type Windower interface { @@ -132,9 +132,9 @@ func (w *windower) Proposers(ctx context.Context, blockHeight, pChainHeight uint source.Seed(w.chainSource ^ blockHeight) numToSample := int(min(uint64(maxWindows), totalWeight)) - indices, err := sampler.Sample(numToSample) - if err != nil { - return nil, err + indices, ok := sampler.Sample(numToSample) + if !ok { + return nil, ErrUnexpectedSamplerFailure } nodeIDs := make([]ids.NodeID, numToSample) @@ -231,7 +231,7 @@ func (w *windower) makeSampler( pChainHeight uint64, source sampler.Source, ) (sampler.WeightedWithoutReplacement, []validatorData, error) { - // Get the canconical representation of the validator set at the provided + // Get the canonical representation of the validator set at the provided // p-chain height. validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) if err != nil { @@ -271,9 +271,9 @@ func (w *windower) expectedProposer( // biasing the seed generation. For example, without reversing the slot // height=0 and slot=1 would equal height=1 and slot=0. source.Seed(w.chainSource ^ blockHeight ^ bits.Reverse64(slot)) - indices, err := sampler.Sample(1) - if err != nil { - return ids.EmptyNodeID, fmt.Errorf("failed sampling proposers: %w", err) + indices, ok := sampler.Sample(1) + if !ok { + return ids.EmptyNodeID, ErrUnexpectedSamplerFailure } return validators[indices[0]].id, nil } diff --git a/vms/proposervm/state/block_height_index.go b/vms/proposervm/state/block_height_index.go index b60fca0c363d..4a7ed4362c68 100644 --- a/vms/proposervm/state/block_height_index.go +++ b/vms/proposervm/state/block_height_index.go @@ -19,8 +19,7 @@ var ( heightPrefix = []byte("height") metadataPrefix = []byte("metadata") - forkKey = []byte("fork") - checkpointKey = []byte("checkpoint") + forkKey = []byte("fork") ) type HeightIndexGetter interface { @@ -40,23 +39,11 @@ type HeightIndexWriter interface { DeleteBlockIDAtHeight(height uint64) error } -// A checkpoint is the blockID of the next block to be considered -// for height indexing. We store checkpoints to be able to duly resume -// long-running re-indexing ops. -type HeightIndexBatchSupport interface { - versiondb.Commitable - - GetCheckpoint() (ids.ID, error) - SetCheckpoint(blkID ids.ID) error - DeleteCheckpoint() error -} - // HeightIndex contains mapping of blockHeights to accepted proposer block IDs // along with some metadata (fork height and checkpoint). type HeightIndex interface { HeightIndexWriter HeightIndexGetter - HeightIndexBatchSupport } type heightIndex struct { @@ -127,15 +114,3 @@ func (hi *heightIndex) GetForkHeight() (uint64, error) { func (hi *heightIndex) SetForkHeight(height uint64) error { return database.PutUInt64(hi.metadataDB, forkKey, height) } - -func (hi *heightIndex) GetCheckpoint() (ids.ID, error) { - return database.GetID(hi.metadataDB, checkpointKey) -} - -func (hi *heightIndex) SetCheckpoint(blkID ids.ID) error { - return database.PutID(hi.metadataDB, checkpointKey, blkID) -} - -func (hi *heightIndex) DeleteCheckpoint() error { - return hi.metadataDB.Delete(checkpointKey) -} diff --git a/vms/proposervm/state/block_state.go b/vms/proposervm/state/block_state.go index 0c5e210a8d81..8e888332e9c2 100644 --- a/vms/proposervm/state/block_state.go +++ b/vms/proposervm/state/block_state.go @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) @@ -110,11 +109,7 @@ func (s *blockState) GetBlock(blkID ids.ID) (block.Block, choices.Status, error) } // The key was in the database - // - // Invariant: Blocks stored on disk were previously accepted by this node. - // Because the durango activation relaxes TLS cert parsing rules, we assume - // it is always activated here. - blk, err := block.Parse(blkWrapper.Block, version.DefaultUpgradeTime) + blk, err := block.ParseWithoutVerification(blkWrapper.Block) if err != nil { return nil, choices.Unknown, err } diff --git a/vms/proposervm/state/block_state_test.go b/vms/proposervm/state/block_state_test.go index 269d7fbddc2e..33a43c5febae 100644 --- a/vms/proposervm/state/block_state_test.go +++ b/vms/proposervm/state/block_state_test.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -func testBlockState(a *require.Assertions, bs BlockState) { +func testBlockState(require *require.Assertions, bs BlockState) { parentID := ids.ID{1} timestamp := time.Unix(123, 0) pChainHeight := uint64(2) @@ -27,9 +27,10 @@ func testBlockState(a *require.Assertions, bs BlockState) { chainID := ids.ID{4} tlsCert, err := staking.NewTLSCert() - a.NoError(err) + require.NoError(err) - cert := staking.CertificateFromX509(tlsCert.Leaf) + cert, err := staking.ParseCertificate(tlsCert.Leaf.Raw) + require.NoError(err) key := tlsCert.PrivateKey.(crypto.Signer) b, err := block.Build( @@ -41,26 +42,25 @@ func testBlockState(a *require.Assertions, bs BlockState) { chainID, key, ) - a.NoError(err) + require.NoError(err) _, _, err = bs.GetBlock(b.ID()) - a.Equal(database.ErrNotFound, err) + require.Equal(database.ErrNotFound, err) _, _, err = bs.GetBlock(b.ID()) - a.Equal(database.ErrNotFound, err) + require.Equal(database.ErrNotFound, err) - err = bs.PutBlock(b, choices.Accepted) - a.NoError(err) + require.NoError(bs.PutBlock(b, choices.Accepted)) fetchedBlock, fetchedStatus, err := bs.GetBlock(b.ID()) - a.NoError(err) - a.Equal(choices.Accepted, fetchedStatus) - a.Equal(b.Bytes(), fetchedBlock.Bytes()) + require.NoError(err) + require.Equal(choices.Accepted, fetchedStatus) + require.Equal(b.Bytes(), fetchedBlock.Bytes()) fetchedBlock, fetchedStatus, err = bs.GetBlock(b.ID()) - a.NoError(err) - a.Equal(choices.Accepted, fetchedStatus) - a.Equal(b.Bytes(), fetchedBlock.Bytes()) + require.NoError(err) + require.Equal(choices.Accepted, fetchedStatus) + require.Equal(b.Bytes(), fetchedBlock.Bytes()) } func TestBlockState(t *testing.T) { diff --git a/vms/proposervm/state/codec.go b/vms/proposervm/state/codec.go index 63727894e356..d533ed948c8d 100644 --- a/vms/proposervm/state/codec.go +++ b/vms/proposervm/state/codec.go @@ -5,7 +5,6 @@ package state import ( "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -16,7 +15,7 @@ const CodecVersion = 0 var Codec codec.Manager func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt32) err := Codec.RegisterCodec(CodecVersion, lc) diff --git a/vms/proposervm/state/mock_state.go b/vms/proposervm/state/mock_state.go index 6384528a61dd..ce28025a93e9 100644 --- a/vms/proposervm/state/mock_state.go +++ b/vms/proposervm/state/mock_state.go @@ -41,20 +41,6 @@ func (m *MockState) EXPECT() *MockStateMockRecorder { return m.recorder } -// Commit mocks base method. -func (m *MockState) Commit() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Commit") - ret0, _ := ret[0].(error) - return ret0 -} - -// Commit indicates an expected call of Commit. -func (mr *MockStateMockRecorder) Commit() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockState)(nil).Commit)) -} - // DeleteBlock mocks base method. func (m *MockState) DeleteBlock(arg0 ids.ID) error { m.ctrl.T.Helper() @@ -83,20 +69,6 @@ func (mr *MockStateMockRecorder) DeleteBlockIDAtHeight(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).DeleteBlockIDAtHeight), arg0) } -// DeleteCheckpoint mocks base method. -func (m *MockState) DeleteCheckpoint() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteCheckpoint") - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteCheckpoint indicates an expected call of DeleteCheckpoint. -func (mr *MockStateMockRecorder) DeleteCheckpoint() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCheckpoint", reflect.TypeOf((*MockState)(nil).DeleteCheckpoint)) -} - // DeleteLastAccepted mocks base method. func (m *MockState) DeleteLastAccepted() error { m.ctrl.T.Helper() @@ -142,21 +114,6 @@ func (mr *MockStateMockRecorder) GetBlockIDAtHeight(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).GetBlockIDAtHeight), arg0) } -// GetCheckpoint mocks base method. -func (m *MockState) GetCheckpoint() (ids.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCheckpoint") - ret0, _ := ret[0].(ids.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCheckpoint indicates an expected call of GetCheckpoint. -func (mr *MockStateMockRecorder) GetCheckpoint() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCheckpoint", reflect.TypeOf((*MockState)(nil).GetCheckpoint)) -} - // GetForkHeight mocks base method. func (m *MockState) GetForkHeight() (uint64, error) { m.ctrl.T.Helper() @@ -230,20 +187,6 @@ func (mr *MockStateMockRecorder) SetBlockIDAtHeight(arg0, arg1 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBlockIDAtHeight", reflect.TypeOf((*MockState)(nil).SetBlockIDAtHeight), arg0, arg1) } -// SetCheckpoint mocks base method. -func (m *MockState) SetCheckpoint(arg0 ids.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetCheckpoint", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetCheckpoint indicates an expected call of SetCheckpoint. -func (mr *MockStateMockRecorder) SetCheckpoint(arg0 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCheckpoint", reflect.TypeOf((*MockState)(nil).SetCheckpoint), arg0) -} - // SetForkHeight mocks base method. func (m *MockState) SetForkHeight(arg0 uint64) error { m.ctrl.T.Helper() diff --git a/vms/proposervm/state_syncable_vm.go b/vms/proposervm/state_syncable_vm.go index 08a321cab7bb..12f61e057396 100644 --- a/vms/proposervm/state_syncable_vm.go +++ b/vms/proposervm/state_syncable_vm.go @@ -19,12 +19,6 @@ func (vm *VM) StateSyncEnabled(ctx context.Context) (bool, error) { return false, nil } - // if vm implements Snowman++, a block height index must be available - // to support state sync - if vm.VerifyHeightIndex(ctx) != nil { - return false, nil - } - return vm.ssVM.StateSyncEnabled(ctx) } @@ -100,12 +94,6 @@ func (vm *VM) GetStateSummary(ctx context.Context, height uint64) (block.StateSu // Note: building state summary requires a well formed height index. func (vm *VM) buildStateSummary(ctx context.Context, innerSummary block.StateSummary) (block.StateSummary, error) { - // if vm implements Snowman++, a block height index must be available - // to support state sync - if err := vm.VerifyHeightIndex(ctx); err != nil { - return nil, fmt.Errorf("could not build state summary: %w", err) - } - forkHeight, err := vm.GetForkHeight() switch err { case nil: diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index 0fa24de139d1..479c311b5fed 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/snowtest" @@ -39,33 +41,18 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { }, } - // signal height index is complete - innerVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } - // load innerVM expectations - innerGenesisBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.ID{'i', 'n', 'n', 'e', 'r', 'G', 'e', 'n', 'e', 's', 'i', 's', 'I', 'D'}, - }, - HeightV: 0, - BytesV: []byte("genesis state"), - } innerVM.InitializeF = func(context.Context, *snow.Context, database.Database, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { return nil } - innerVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } innerVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return innerGenesisBlk.ID(), nil + return snowmantest.GenesisID, nil } innerVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { - return innerGenesisBlk, nil + return snowmantest.Genesis, nil } // create the VM @@ -79,6 +66,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -89,7 +77,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { context.Background(), ctx, prefixdb.New([]byte{}, memdb.New()), - innerGenesisBlk.Bytes(), + snowmantest.GenesisBytes, nil, nil, nil, @@ -109,7 +97,6 @@ func TestStateSyncEnabled(t *testing.T) { }() // ProposerVM State Sync disabled if innerVM State sync is disabled - vm.hIndexer.MarkRepaired(true) innerVM.StateSyncEnabledF = func(context.Context) (bool, error) { return false, nil } @@ -172,11 +159,10 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { require.Equal(innerSummary.Bytes(), summary.Bytes()) // Post fork summary case - vm.hIndexer.MarkRepaired(true) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) // store post fork block associated with summary - innerBlk := &snowman.TestBlock{ + innerBlk := &snowmantest.Block{ BytesV: []byte{1}, ParentV: ids.GenerateTestID(), HeightV: innerSummary.Height(), @@ -257,11 +243,10 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { require.Equal(innerSummary.Bytes(), summary.Bytes()) // Post fork summary case - vm.hIndexer.MarkRepaired(true) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) // store post fork block associated with summary - innerBlk := &snowman.TestBlock{ + innerBlk := &snowmantest.Block{ BytesV: []byte{1}, ParentV: ids.GenerateTestID(), HeightV: innerSummary.Height(), @@ -345,11 +330,10 @@ func TestStateSyncGetStateSummary(t *testing.T) { require.Equal(innerSummary.Bytes(), summary.Bytes()) // Post fork summary case - vm.hIndexer.MarkRepaired(true) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) // store post fork block associated with summary - innerBlk := &snowman.TestBlock{ + innerBlk := &snowmantest.Block{ BytesV: []byte{1}, ParentV: ids.GenerateTestID(), HeightV: innerSummary.Height(), @@ -418,11 +402,10 @@ func TestParseStateSummary(t *testing.T) { require.Equal(summary.Bytes(), parsedSummary.Bytes()) // Get a post fork block than parse it - vm.hIndexer.MarkRepaired(true) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) // store post fork block associated with summary - innerBlk := &snowman.TestBlock{ + innerBlk := &snowmantest.Block{ BytesV: []byte{1}, ParentV: ids.GenerateTestID(), HeightV: innerSummary.Height(), @@ -477,11 +460,10 @@ func TestStateSummaryAccept(t *testing.T) { BytesV: []byte{'i', 'n', 'n', 'e', 'r'}, } - vm.hIndexer.MarkRepaired(true) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) // store post fork block associated with summary - innerBlk := &snowman.TestBlock{ + innerBlk := &snowmantest.Block{ BytesV: []byte{1}, ParentV: ids.GenerateTestID(), HeightV: innerSummary.Height(), @@ -545,7 +527,6 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { BytesV: []byte{'i', 'n', 'n', 'e', 'r'}, } - vm.hIndexer.MarkRepaired(true) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) // Set the last accepted block height to be higher that the state summary @@ -553,7 +534,7 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { vm.lastAcceptedHeight = innerSummary.Height() + 1 // store post fork block associated with summary - innerBlk := &snowman.TestBlock{ + innerBlk := &snowmantest.Block{ BytesV: []byte{1}, ParentV: ids.GenerateTestID(), HeightV: innerSummary.Height(), @@ -598,54 +579,3 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { require.NoError(err) require.Equal(block.StateSyncSkipped, status) } - -func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { - require := require.New(t) - - // Note: by default proVM is built such that heightIndex will be considered complete - var ( - activationTime = time.Unix(0, 0) - durangoTime = activationTime - ) - coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) - defer func() { - require.NoError(proVM.Shutdown(context.Background())) - }() - - require.NoError(proVM.VerifyHeightIndex(context.Background())) - - // let coreVM be always ready to serve summaries - summaryHeight := uint64(2022) - coreStateSummary := &block.TestStateSummary{ - T: t, - IDV: ids.ID{'a', 'a', 'a', 'a'}, - HeightV: summaryHeight, - BytesV: []byte{'c', 'o', 'r', 'e', 'S', 'u', 'm', 'm', 'a', 'r', 'y'}, - } - coreVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { - return coreStateSummary, nil - } - coreVM.GetStateSummaryF = func(_ context.Context, height uint64) (block.StateSummary, error) { - require.Equal(summaryHeight, height) - return coreStateSummary, nil - } - - // set height index to reindexing - proVM.hIndexer.MarkRepaired(false) - err := proVM.VerifyHeightIndex(context.Background()) - require.ErrorIs(err, block.ErrIndexIncomplete) - - _, err = proVM.GetLastStateSummary(context.Background()) - require.ErrorIs(err, block.ErrIndexIncomplete) - - _, err = proVM.GetStateSummary(context.Background(), summaryHeight) - require.ErrorIs(err, block.ErrIndexIncomplete) - - // declare height index complete - proVM.hIndexer.MarkRepaired(true) - require.NoError(proVM.VerifyHeightIndex(context.Background())) - - summary, err := proVM.GetLastStateSummary(context.Background()) - require.NoError(err) - require.Equal(summaryHeight, summary.Height()) -} diff --git a/vms/proposervm/summary/codec.go b/vms/proposervm/summary/codec.go index 41a9eb9a37d0..72617101b84c 100644 --- a/vms/proposervm/summary/codec.go +++ b/vms/proposervm/summary/codec.go @@ -6,7 +6,6 @@ package summary import ( "errors" "math" - "time" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" @@ -21,7 +20,7 @@ var ( ) func init() { - lc := linearcodec.NewDefault(time.Time{}) + lc := linearcodec.NewDefault() Codec = codec.NewManager(math.MaxInt32) if err := Codec.RegisterCodec(CodecVersion, lc); err != nil { panic(err) diff --git a/vms/proposervm/tree/tree_test.go b/vms/proposervm/tree/tree_test.go index 1e826e418c21..48095a40f638 100644 --- a/vms/proposervm/tree/tree_test.go +++ b/vms/proposervm/tree/tree_test.go @@ -9,32 +9,16 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" -) - -var ( - GenesisID = ids.GenerateTestID() - Genesis = &snowman.TestBlock{TestDecidable: choices.TestDecidable{ - IDV: GenesisID, - StatusV: choices.Accepted, - }} + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" ) func TestAcceptSingleBlock(t *testing.T) { require := require.New(t) - block := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.ID(), - } - tr := New() + block := snowmantest.BuildChild(snowmantest.Genesis) _, contains := tr.Get(block) require.False(contains) @@ -53,24 +37,11 @@ func TestAcceptSingleBlock(t *testing.T) { func TestAcceptBlockConflict(t *testing.T) { require := require.New(t) - blockToAccept := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.ID(), - } - - blockToReject := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.ID(), - } - tr := New() + blockToAccept := snowmantest.BuildChild(snowmantest.Genesis) + blockToReject := snowmantest.BuildChild(snowmantest.Genesis) + // add conflicting blocks tr.Add(blockToAccept) _, contains := tr.Get(blockToAccept) @@ -96,32 +67,12 @@ func TestAcceptBlockConflict(t *testing.T) { func TestAcceptChainConflict(t *testing.T) { require := require.New(t) - blockToAccept := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.ID(), - } - - blockToReject := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: Genesis.ID(), - } - - blockToRejectChild := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: blockToReject.ID(), - } - tr := New() + blockToAccept := snowmantest.BuildChild(snowmantest.Genesis) + blockToReject := snowmantest.BuildChild(snowmantest.Genesis) + blockToRejectChild := snowmantest.BuildChild(blockToReject) + // add conflicting blocks. tr.Add(blockToAccept) _, contains := tr.Get(blockToAccept) diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index 488dc984dad1..f5916fcf9f42 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -12,7 +12,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" - "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" @@ -24,11 +23,11 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/proposervm/indexer" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" "github.com/ava-labs/avalanchego/vms/proposervm/scheduler" "github.com/ava-labs/avalanchego/vms/proposervm/state" @@ -54,13 +53,7 @@ var ( _ block.BatchedChainVM = (*VM)(nil) _ block.StateSyncableVM = (*VM)(nil) - // TODO: remove after the X-chain supports height indexing. - mainnetXChainID = ids.FromStringOrPanic("2oYMBNV4eNHyqk2fjjV5nVQLDbtmNJzq5s3qs3Lo6ftnC6FByM") - fujiXChainID = ids.FromStringOrPanic("2JVSBoinj9C2J33VntvzYtVJNZdN2NKiwwKjcumHUWEb5DbBrm") - dbPrefix = []byte("proposervm") - - errHeightIndexInvalidWhilePruning = errors.New("height index invalid while pruning old blocks") ) func cachedBlockSize(_ ids.ID, blk snowman.Block) int { @@ -75,7 +68,6 @@ type VM struct { ssVM block.StateSyncableVM state.State - hIndexer indexer.HeightIndexer proposer.Windower tree.Tree @@ -107,6 +99,14 @@ type VM struct { // lastAcceptedHeight is set to the last accepted PostForkBlock's height. lastAcceptedHeight uint64 + + // proposerBuildSlotGauge reports the slot index when this node may attempt + // to build a block. + proposerBuildSlotGauge prometheus.Gauge + + // acceptedBlocksSlotHistogram reports the slots that accepted blocks were + // proposed in. + acceptedBlocksSlotHistogram prometheus.Histogram } // New performs best when [minBlkDelay] is whole seconds. This is because block @@ -138,26 +138,9 @@ func (vm *VM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - // TODO: Add a helper for this metrics override, it is performed in multiple - // places. - multiGatherer := metrics.NewMultiGatherer() - registerer := prometheus.NewRegistry() - if err := multiGatherer.Register("proposervm", registerer); err != nil { - return err - } - - optionalGatherer := metrics.NewOptionalGatherer() - if err := multiGatherer.Register("", optionalGatherer); err != nil { - return err - } - if err := chainCtx.Metrics.Register(multiGatherer); err != nil { - return err - } - chainCtx.Metrics = optionalGatherer - vm.ctx = chainCtx vm.db = versiondb.New(prefixdb.New(dbPrefix, db)) - baseState, err := state.NewMetered(vm.db, "state", registerer) + baseState, err := state.NewMetered(vm.db, "state", vm.Config.Registerer) if err != nil { return err } @@ -166,7 +149,7 @@ func (vm *VM) Initialize( vm.Tree = tree.New() innerBlkCache, err := metercacher.New( "inner_block_cache", - registerer, + vm.Config.Registerer, cache.NewSizedLRU( innerBlkCacheSize, cachedBlockSize, @@ -177,10 +160,6 @@ func (vm *VM) Initialize( } vm.innerBlkCache = innerBlkCache - indexerDB := versiondb.New(vm.db) - indexerState := state.New(indexerDB) - vm.hIndexer = indexer.NewHeightIndexer(vm, vm.ctx.Log, indexerState) - scheduler, vmToEngine := scheduler.New(vm.ctx.Log, toEngine) vm.Scheduler = scheduler vm.toScheduler = vmToEngine @@ -210,7 +189,7 @@ func (vm *VM) Initialize( return err } - if err := vm.repair(detachedCtx); err != nil { + if err := vm.repairAcceptedChainByHeight(ctx); err != nil { return err } @@ -222,7 +201,7 @@ func (vm *VM) Initialize( return err } - forkHeight, err := vm.getForkHeight() + forkHeight, err := vm.GetForkHeight() switch err { case nil: chainCtx.Log.Info("initialized proposervm", @@ -237,7 +216,28 @@ func (vm *VM) Initialize( default: return err } - return nil + + vm.proposerBuildSlotGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "block_building_slot", + Help: "the slot that this node may attempt to build a block", + }) + vm.acceptedBlocksSlotHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "accepted_blocks_slot", + Help: "the slot accepted blocks were proposed in", + // define the following ranges: + // (-inf, 0] + // (0, 1] + // (1, 2] + // (2, inf) + // the usage of ".5" before was to ensure we work around the limitation + // of comparing floating point of the same numerical value. + Buckets: []float64{0.5, 1.5, 2.5}, + }) + + return utils.Err( + vm.Config.Registerer.Register(vm.proposerBuildSlotGauge), + vm.Config.Registerer.Register(vm.acceptedBlocksSlotHistogram), + ) } // shutdown ops then propagate shutdown to innerVM @@ -325,13 +325,15 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { ) if vm.IsDurangoActivated(parentTimestamp) { currentTime := vm.Clock.Time().Truncate(time.Second) - nextStartTime, err = vm.getPostDurangoSlotTime( + if nextStartTime, err = vm.getPostDurangoSlotTime( ctx, childBlockHeight, pChainHeight, proposer.TimeToSlot(parentTimestamp, currentTime), parentTimestamp, - ) + ); err == nil { + vm.proposerBuildSlotGauge.Set(float64(proposer.TimeToSlot(parentTimestamp, nextStartTime))) + } } else { nextStartTime, err = vm.getPreDurangoSlotTime( ctx, @@ -421,174 +423,6 @@ func (vm *VM) LastAccepted(ctx context.Context) (ids.ID, error) { return lastAccepted, err } -// repair makes sure that vm and innerVM chains are in sync. -// Moreover it fixes vm's height index if defined. -func (vm *VM) repair(ctx context.Context) error { - switch err := vm.ChainVM.VerifyHeightIndex(ctx); err { - case nil: - // InnerVM height index is complete. We can immediately verify - // and repair this VM height index. - shouldRepair, err := vm.shouldHeightIndexBeRepaired(ctx) - if err != nil { - return err - } - if !shouldRepair { - vm.ctx.Log.Info("block height index was successfully verified") - vm.hIndexer.MarkRepaired(true) - return vm.repairAcceptedChainByHeight(ctx) - } - case block.ErrIndexIncomplete: - default: - return err - } - - if vm.NumHistoricalBlocks != 0 { - vm.ctx.Log.Fatal("block height index must be valid when pruning historical blocks") - return errHeightIndexInvalidWhilePruning - } - - // innerVM height index is incomplete. Sync vm and innerVM chains first. - if err := vm.repairAcceptedChainByIteration(ctx); err != nil { - return err - } - - // asynchronously rebuild height index, if needed - go func() { - // Poll until the underlying chain's index is complete or shutdown is - // called. - ticker := time.NewTicker(checkIndexedFrequency) - defer ticker.Stop() - for { - // The underlying VM expects the lock to be held here. - vm.ctx.Lock.Lock() - err := vm.ChainVM.VerifyHeightIndex(ctx) - vm.ctx.Lock.Unlock() - - if err == nil { - // innerVM indexing complete. Let's re-index this VM - break - } - if err != block.ErrIndexIncomplete { - vm.ctx.Log.Error("block height indexing failed", - zap.Error(err), - ) - return - } - - // innerVM index is incomplete. Wait for completion and retry - select { - case <-vm.context.Done(): - return - case <-ticker.C: - } - } - - vm.ctx.Lock.Lock() - shouldRepair, err := vm.shouldHeightIndexBeRepaired(ctx) - vm.ctx.Lock.Unlock() - - if err != nil { - vm.ctx.Log.Error("could not verify height indexing status", - zap.Error(err), - ) - return - } - if !shouldRepair { - vm.ctx.Log.Info("block height indexing is already complete") - vm.hIndexer.MarkRepaired(true) - return - } - - err = vm.hIndexer.RepairHeightIndex(vm.context) - if err == nil { - vm.ctx.Log.Info("block height indexing finished") - return - } - - // Note that we don't check if `err` is `context.Canceled` here because - // repairing the height index may have returned a non-standard error - // due to the chain shutting down. - if vm.context.Err() == nil { - // The context wasn't closed, so the chain hasn't been shutdown. - // This must have been an unexpected error. - vm.ctx.Log.Error("block height indexing failed", - zap.Error(err), - ) - } - }() - return nil -} - -func (vm *VM) repairAcceptedChainByIteration(ctx context.Context) error { - lastAcceptedID, err := vm.GetLastAccepted() - if err == database.ErrNotFound { - // If the last accepted block isn't indexed yet, then the underlying - // chain is the only chain and there is nothing to repair. - return nil - } - if err != nil { - return err - } - - // Revert accepted blocks that weren't committed to the database. - for { - lastAccepted, err := vm.getPostForkBlock(ctx, lastAcceptedID) - if err == database.ErrNotFound { - // If the post fork block can't be found, it's because we're - // reverting past the fork boundary. If this is the case, then there - // is only one database to keep consistent, so there is nothing to - // repair anymore. - if err := vm.State.DeleteLastAccepted(); err != nil { - return err - } - if err := vm.State.DeleteCheckpoint(); err != nil { - return err - } - return vm.db.Commit() - } - if err != nil { - return err - } - - shouldBeAccepted := lastAccepted.getInnerBlk() - - // If the inner block is accepted, then we don't need to revert any more - // blocks. - if shouldBeAccepted.Status() == choices.Accepted { - return vm.db.Commit() - } - - // Mark the last accepted block as processing - rather than accepted. - lastAccepted.setStatus(choices.Processing) - if err := vm.State.PutBlock(lastAccepted.getStatelessBlk(), choices.Processing); err != nil { - return err - } - - // Advance to the parent block - previousLastAcceptedID := lastAcceptedID - lastAcceptedID = lastAccepted.Parent() - if err := vm.State.SetLastAccepted(lastAcceptedID); err != nil { - return err - } - - // If the indexer checkpoint was previously pointing to the last - // accepted block, roll it back to the new last accepted block. - checkpoint, err := vm.State.GetCheckpoint() - if err == database.ErrNotFound { - continue - } - if err != nil { - return err - } - if previousLastAcceptedID != checkpoint { - continue - } - if err := vm.State.SetCheckpoint(lastAcceptedID); err != nil { - return err - } - } -} - func (vm *VM) repairAcceptedChainByHeight(ctx context.Context) error { innerLastAcceptedID, err := vm.ChainVM.LastAccepted(ctx) if err != nil { @@ -693,7 +527,7 @@ func (vm *VM) setLastAcceptedMetadata(ctx context.Context) error { } func (vm *VM) parsePostForkBlock(ctx context.Context, b []byte) (PostForkBlock, error) { - statelessBlock, err := statelessblock.Parse(b, vm.DurangoTime) + statelessBlock, err := statelessblock.Parse(b, vm.ctx.ChainID) if err != nil { return nil, err } @@ -751,26 +585,6 @@ func (vm *VM) getBlock(ctx context.Context, id ids.ID) (Block, error) { return vm.getPreForkBlock(ctx, id) } -// TODO: remove after the P-chain and X-chain support height indexing. -func (vm *VM) getForkHeight() (uint64, error) { - // The fork block can be easily identified with the provided links because - // the `Parent Hash` is equal to the `Proposer Parent ID`. - switch vm.ctx.ChainID { - case constants.PlatformChainID: - switch vm.ctx.NetworkID { - case constants.MainnetID: - return 805732, nil // https://subnets.avax.network/p-chain/block/805732 - case constants.FujiID: - return 47529, nil // https://subnets-test.avax.network/p-chain/block/47529 - } - case mainnetXChainID: - return 1, nil // https://subnets.avax.network/x-chain/block/1 - case fujiXChainID: - return 1, nil // https://subnets-test.avax.network/x-chain/block/1 - } - return vm.GetForkHeight() -} - func (vm *VM) getPostForkBlock(ctx context.Context, blkID ids.ID) (PostForkBlock, error) { block, exists := vm.verifiedBlocks[blkID] if exists { diff --git a/vms/proposervm/vm_byzantine_test.go b/vms/proposervm/vm_byzantine_test.go index c9ad1b98c79b..d4997781cdd4 100644 --- a/vms/proposervm/vm_byzantine_test.go +++ b/vms/proposervm/vm_byzantine_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) @@ -36,20 +37,12 @@ func TestInvalidByzantineProposerParent(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - xBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - } + xBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } @@ -62,25 +55,15 @@ func TestInvalidByzantineProposerParent(t *testing.T) { require.NoError(aBlock.Verify(context.Background())) require.NoError(aBlock.Accept(context.Background())) - yBlockBytes := []byte{2} - yBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: yBlockBytes, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - } - + yBlock := snowmantest.BuildChild(xBlock) coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { - if !bytes.Equal(blockBytes, yBlockBytes) { + if !bytes.Equal(blockBytes, yBlock.Bytes()) { return nil, errUnknownBlock } return yBlock, nil } - parsedBlock, err := proVM.ParseBlock(context.Background(), yBlockBytes) + parsedBlock, err := proVM.ParseBlock(context.Background(), yBlock.Bytes()) if err != nil { // If there was an error parsing, then this is fine. return @@ -107,39 +90,18 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) - proVM.Set(coreGenBlk.Timestamp()) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + proVM.Set(snowmantest.GenesisTimestamp) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - xBlockID := ids.GenerateTestID() + xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) xBlock := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: xBlockID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - }, + Block: *xTestBlock, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: xBlockID, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: xBlockID, - }, + snowmantest.BuildChild(xTestBlock), + snowmantest.BuildChild(xTestBlock), }, } @@ -148,8 +110,8 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case xBlock.ID(): return xBlock, nil case xBlock.opts[0].ID(): @@ -162,8 +124,8 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, xBlock.Bytes()): return xBlock, nil case bytes.Equal(b, xBlock.opts[0].Bytes()): @@ -216,39 +178,21 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - xBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - } + xBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - yBlockBytes := []byte{2} - yBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: yBlockBytes, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - } - + yBlock := snowmantest.BuildChild(xBlock) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case gBlock.ID(): - return gBlock, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case xBlock.ID(): return xBlock, nil case yBlock.ID(): @@ -259,8 +203,8 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { switch { - case bytes.Equal(blockBytes, gBlock.Bytes()): - return gBlock, nil + case bytes.Equal(blockBytes, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(blockBytes, xBlock.Bytes()): return xBlock, nil case bytes.Equal(blockBytes, yBlock.Bytes()): @@ -306,38 +250,17 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) - proVM.Set(coreGenBlk.Timestamp()) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + proVM.Set(snowmantest.GenesisTimestamp) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() xBlock := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - }, - opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), // valid block should reference xBlock - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), // valid block should reference xBlock - }, + Block: *snowmantest.BuildChild(snowmantest.Genesis), + opts: [2]snowman.Block{ // valid blocks should reference xBlock + snowmantest.BuildChild(snowmantest.Genesis), + snowmantest.BuildChild(snowmantest.Genesis), }, } @@ -346,8 +269,8 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case xBlock.ID(): return xBlock, nil case xBlock.opts[0].ID(): @@ -360,8 +283,8 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, xBlock.Bytes()): return xBlock, nil case bytes.Equal(b, xBlock.opts[0].Bytes()): @@ -406,40 +329,19 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) - proVM.Set(coreGenBlk.Timestamp()) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + proVM.Set(snowmantest.GenesisTimestamp) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create an Oracle pre-fork block X - xBlockID := ids.GenerateTestID() + xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) xBlock := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: xBlockID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - }, + Block: *xTestBlock, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: xBlockID, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: xBlockID, - }, + snowmantest.BuildChild(xTestBlock), + snowmantest.BuildChild(xTestBlock), }, } @@ -448,19 +350,10 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { xInnerOption := xInnerOptions[0] // create a non-Oracle pre-fork block Y - yBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } - + yBlock := snowmantest.BuildChild(snowmantest.Genesis) ySlb, err := block.BuildUnsigned( - coreGenBlk.ID(), - coreGenBlk.Timestamp(), + snowmantest.GenesisID, + snowmantest.GenesisTimestamp, uint64(2000), yBlock.Bytes(), ) @@ -525,33 +418,12 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { // create an Oracle pre-fork block Z // create post-fork block B from Y - zBlockID := ids.GenerateTestID() + zTestBlock := snowmantest.BuildChild(snowmantest.Genesis) zBlock := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: zBlockID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - }, + Block: *zTestBlock, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: zBlockID, - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: zBlockID, - }, + snowmantest.BuildChild(zTestBlock), + snowmantest.BuildChild(zTestBlock), }, } @@ -590,7 +462,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -605,33 +477,15 @@ func TestGetBlock_MutatedSignature(t *testing.T) { }, nil } - proVM.Set(coreGenBlk.Timestamp()) + proVM.Set(snowmantest.GenesisTimestamp) // Create valid core blocks to build our chain on. - coreBlk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(1111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } - - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - } - + coreBlk0 := snowmantest.BuildChild(snowmantest.Genesis) + coreBlk1 := snowmantest.BuildChild(coreBlk0) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk0.ID(): return coreBlk0, nil case coreBlk1.ID(): @@ -642,8 +496,8 @@ func TestGetBlock_MutatedSignature(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk0.Bytes()): return coreBlk0, nil case bytes.Equal(b, coreBlk1.Bytes()): diff --git a/vms/proposervm/vm_regression_test.go b/vms/proposervm/vm_regression_test.go deleted file mode 100644 index ac34df120641..000000000000 --- a/vms/proposervm/vm_regression_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package proposervm - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/snow/snowtest" -) - -func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *testing.T) { - require := require.New(t) - - innerVM := &fullVM{ - TestVM: &block.TestVM{ - TestVM: common.TestVM{ - T: t, - }, - }, - } - - // let innerVM fail verifying its height index with - // a non-special error (like block.ErrIndexIncomplete) - customError := errors.New("custom error") - innerVM.VerifyHeightIndexF = func(_ context.Context) error { - return customError - } - - innerVM.InitializeF = func(context.Context, *snow.Context, database.Database, - []byte, []byte, []byte, chan<- common.Message, - []*common.Fx, common.AppSender, - ) error { - return nil - } - - proVM := New( - innerVM, - Config{ - ActivationTime: time.Unix(0, 0), - DurangoTime: time.Unix(0, 0), - MinimumPChainHeight: 0, - MinBlkDelay: DefaultMinBlockDelay, - NumHistoricalBlocks: DefaultNumHistoricalBlocks, - StakingLeafSigner: pTestSigner, - StakingCertLeaf: pTestCert, - }, - ) - - defer func() { - // avoids leaking goroutines - require.NoError(proVM.Shutdown(context.Background())) - }() - - ctx := snowtest.Context(t, snowtest.CChainID) - initialState := []byte("genesis state") - - err := proVM.Initialize( - context.Background(), - ctx, - prefixdb.New([]byte{}, memdb.New()), - initialState, - nil, - nil, - nil, - nil, - nil, - ) - require.ErrorIs(err, customError) -} diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 7ad266d73de7..a2536375d48c 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -22,6 +23,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/snowtest" @@ -49,9 +51,6 @@ var ( pTestSigner crypto.Signer pTestCert *staking.Certificate - genesisUnixTimestamp int64 = 1000 - genesisTimestamp = time.Unix(genesisUnixTimestamp, 0) - defaultPChainHeight uint64 = 2000 errUnknownBlock = errors.New("unknown block") @@ -67,7 +66,10 @@ func init() { panic(err) } pTestSigner = tlsCert.PrivateKey.(crypto.Signer) - pTestCert = staking.CertificateFromX509(tlsCert.Leaf) + pTestCert, err = staking.ParseCertificate(tlsCert.Leaf.Raw) + if err != nil { + panic(err) + } } func initTestProposerVM( @@ -79,21 +81,10 @@ func initTestProposerVM( *fullVM, *validators.TestState, *VM, - *snowman.TestBlock, database.Database, ) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: []byte{0}, - } - initialState := []byte("genesis state") coreVM := &fullVM{ TestVM: &block.TestVM{ @@ -113,20 +104,20 @@ func initTestProposerVM( return nil } coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreGenBlk.ID(): - return coreGenBlk, nil + switch blkID { + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -142,6 +133,7 @@ func initTestProposerVM( NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -149,7 +141,7 @@ func initTestProposerVM( T: t, } valState.GetMinimumHeightF = func(context.Context) (uint64, error) { - return coreGenBlk.HeightV, nil + return snowmantest.GenesisHeight, nil } valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return defaultPChainHeight, nil @@ -187,11 +179,6 @@ func initTestProposerVM( db := prefixdb.New([]byte{0}, memdb.New()) - // signal height index is complete - coreVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } - require.NoError(proVM.Initialize( context.Background(), ctx, @@ -208,11 +195,11 @@ func initTestProposerVM( coreVM.InitializeF = nil require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) + require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) - proVM.Set(coreGenBlk.Timestamp()) + proVM.Set(snowmantest.GenesisTimestamp) - return coreVM, valState, proVM, coreGenBlk, db + return coreVM, valState, proVM, db } func waitForProposerWindow(vm *VM, chainTip snowman.Block, pchainHeight uint64) error { @@ -252,7 +239,7 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -260,15 +247,7 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { skewedTimestamp := time.Now().Truncate(time.Second).Add(time.Millisecond) proVM.Set(skewedTimestamp) - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -288,20 +267,12 @@ func TestBuildBlockIsIdempotent(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -326,20 +297,12 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -363,36 +326,20 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // add two proBlks... - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } proBlk1, err := proVM.BuildBlock(context.Background()) require.NoError(err) - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk2 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -402,7 +349,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { require.NoError(proBlk2.Verify(context.Background())) // ...and set one as preferred - var prefcoreBlk *snowman.TestBlock + var prefcoreBlk *snowmantest.Block coreVM.SetPreferenceF = func(_ context.Context, prefID ids.ID) error { switch prefID { case coreBlk1.ID(): @@ -431,15 +378,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) // build block... - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: prefcoreBlk.ID(), - HeightV: prefcoreBlk.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(prefcoreBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -459,35 +398,19 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(111), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } proBlk1, err := proVM.BuildBlock(context.Background()) require.NoError(err) - coreBlk2 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(222), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk2 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } @@ -498,7 +421,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { require.NoError(proBlk2.Verify(context.Background())) // ...and set one as preferred - var wronglyPreferredcoreBlk *snowman.TestBlock + var wronglyPreferredcoreBlk *snowmantest.Block coreVM.SetPreferenceF = func(_ context.Context, prefID ids.ID) error { switch prefID { case coreBlk1.ID(): @@ -527,15 +450,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) // build block... - coreBlk3 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: wronglyPreferredcoreBlk.ID(), - HeightV: wronglyPreferredcoreBlk.Height() + 1, - } + coreBlk3 := snowmantest.BuildChild(wronglyPreferredcoreBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } @@ -556,17 +471,16 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errMarshallingFailed } + + innerBlk := snowmantest.BuildChild(snowmantest.Genesis) slb, err := statelessblock.Build( proVM.preferred, proVM.Time(), @@ -598,17 +512,13 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, gencoreBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create two Proposer blocks at the same height - innerBlk := &snowman.TestBlock{ - BytesV: []byte{1}, - ParentV: gencoreBlk.ID(), - HeightV: gencoreBlk.Height() + 1, - } + innerBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(innerBlk.Bytes(), b) return innerBlk, nil @@ -674,17 +584,13 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // one block is built from this proVM - localcoreBlk := &snowman.TestBlock{ - BytesV: []byte{111}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + localcoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return localcoreBlk, nil } @@ -694,15 +600,11 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { require.NoError(builtBlk.Verify(context.Background())) // another block with same parent comes from network and is parsed - netcoreBlk := &snowman.TestBlock{ - BytesV: []byte{222}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + netcoreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, localcoreBlk.Bytes()): return localcoreBlk, nil case bytes.Equal(b, netcoreBlk.Bytes()): @@ -744,7 +646,7 @@ func TestPreFork_Initialize(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - _, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + _, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -757,7 +659,7 @@ func TestPreFork_Initialize(t *testing.T) { require.NoError(err) require.IsType(&preForkBlock{}, rtvdBlk) - require.Equal(coreGenBlk.Bytes(), rtvdBlk.Bytes()) + require.Equal(snowmantest.GenesisBytes, rtvdBlk.Bytes()) } func TestPreFork_BuildBlock(t *testing.T) { @@ -767,20 +669,12 @@ func TestPreFork_BuildBlock(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk, nil } @@ -808,18 +702,12 @@ func TestPreFork_ParseBlock(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, _, proVM, _, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(2021), - }, - BytesV: []byte{1}, - } - + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(coreBlk.Bytes(), b) return coreBlk, nil @@ -847,21 +735,12 @@ func TestPreFork_SetPreference(t *testing.T) { activationTime = mockable.MaxTime durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(333), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), - } + coreBlk0 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk0, nil } @@ -870,8 +749,8 @@ func TestPreFork_SetPreference(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk0.ID(): return coreBlk0, nil default: @@ -880,8 +759,8 @@ func TestPreFork_SetPreference(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk0.Bytes()): return coreBlk0, nil default: @@ -890,16 +769,7 @@ func TestPreFork_SetPreference(t *testing.T) { } require.NoError(proVM.SetPreference(context.Background(), builtBlk.ID())) - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.Empty.Prefix(444), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - TimestampV: coreBlk0.Timestamp(), - } + coreBlk1 := snowmantest.BuildChild(coreBlk0) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } @@ -911,34 +781,24 @@ func TestPreFork_SetPreference(t *testing.T) { func TestExpiredBuildBlock(t *testing.T) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: []byte{0}, - } - coreVM := &block.TestVM{} coreVM.T = t coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -954,6 +814,7 @@ func TestExpiredBuildBlock(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -961,7 +822,7 @@ func TestExpiredBuildBlock(t *testing.T) { T: t, } valState.GetMinimumHeightF = func(context.Context) (uint64, error) { - return coreGenBlk.Height(), nil + return snowmantest.GenesisHeight, nil } valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return defaultPChainHeight, nil @@ -997,9 +858,6 @@ func TestExpiredBuildBlock(t *testing.T) { toScheduler = toEngineChan return nil } - coreVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } // make sure that DBs are compressed correctly require.NoError(proVM.Initialize( @@ -1021,7 +879,7 @@ func TestExpiredBuildBlock(t *testing.T) { coreVM.InitializeF = nil require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) + require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) // Notify the proposer VM of a new block on the inner block side toScheduler <- common.PendingTxs @@ -1030,17 +888,9 @@ func TestExpiredBuildBlock(t *testing.T) { // Before calling BuildBlock, verify a remote block and set it as the // preferred block. - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) statelessBlock, err := statelessblock.BuildUnsigned( - coreGenBlk.ID(), + snowmantest.GenesisID, proVM.Time(), 0, coreBlk.Bytes(), @@ -1049,8 +899,8 @@ func TestExpiredBuildBlock(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -1059,8 +909,8 @@ func TestExpiredBuildBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil default: @@ -1123,21 +973,12 @@ func TestInnerBlockDeduplication(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) coreBlk0 := &wrappedBlock{ Block: coreBlk, } @@ -1145,14 +986,14 @@ func TestInnerBlockDeduplication(t *testing.T) { Block: coreBlk, } statelessBlock0, err := statelessblock.BuildUnsigned( - coreGenBlk.ID(), + snowmantest.GenesisID, coreBlk.Timestamp(), 0, coreBlk.Bytes(), ) require.NoError(err) statelessBlock1, err := statelessblock.BuildUnsigned( - coreGenBlk.ID(), + snowmantest.GenesisID, coreBlk.Timestamp(), 1, coreBlk.Bytes(), @@ -1161,8 +1002,8 @@ func TestInnerBlockDeduplication(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk0.ID(): return coreBlk0, nil default: @@ -1171,8 +1012,8 @@ func TestInnerBlockDeduplication(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk0.Bytes()): return coreBlk0, nil default: @@ -1189,8 +1030,8 @@ func TestInnerBlockDeduplication(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk1.ID(): return coreBlk1, nil default: @@ -1199,8 +1040,8 @@ func TestInnerBlockDeduplication(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil default: @@ -1221,16 +1062,6 @@ func TestInnerBlockDeduplication(t *testing.T) { func TestInnerVMRollback(t *testing.T) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: []byte{0}, - } - valState := &validators.TestState{ T: t, } @@ -1251,20 +1082,20 @@ func TestInnerVMRollback(t *testing.T) { coreVM.T = t coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -1287,9 +1118,6 @@ func TestInnerVMRollback(t *testing.T) { ) error { return nil } - coreVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } db := memdb.New() @@ -1303,6 +1131,7 @@ func TestInnerVMRollback(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1319,20 +1148,11 @@ func TestInnerVMRollback(t *testing.T) { )) require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) + require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) - coreBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), - } + coreBlk := snowmantest.BuildChild(snowmantest.Genesis) statelessBlock, err := statelessblock.BuildUnsigned( - coreGenBlk.ID(), + snowmantest.GenesisID, coreBlk.Timestamp(), 0, coreBlk.Bytes(), @@ -1341,8 +1161,8 @@ func TestInnerVMRollback(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk.ID(): return coreBlk, nil default: @@ -1351,8 +1171,8 @@ func TestInnerVMRollback(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk.Bytes()): return coreBlk, nil default: @@ -1379,9 +1199,6 @@ func TestInnerVMRollback(t *testing.T) { // Restart the node and have the inner VM rollback state. require.NoError(proVM.Shutdown(context.Background())) coreBlk.StatusV = choices.Processing - coreVM.VerifyHeightIndexF = func(context.Context) error { - return nil - } proVM = New( coreVM, @@ -1393,6 +1210,7 @@ func TestInnerVMRollback(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1414,7 +1232,7 @@ func TestInnerVMRollback(t *testing.T) { lastAcceptedID, err := proVM.LastAccepted(context.Background()) require.NoError(err) - require.Equal(coreGenBlk.IDV, lastAcceptedID) + require.Equal(snowmantest.GenesisID, lastAcceptedID) parsedBlock, err = proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) require.NoError(err) @@ -1429,7 +1247,7 @@ func TestBuildBlockDuringWindow(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, valState, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() @@ -1443,26 +1261,10 @@ func TestBuildBlockDuringWindow(t *testing.T) { }, nil } - coreBlk0 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - } - coreBlk1 := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreBlk0.ID(), - HeightV: coreBlk0.Height() + 1, - } + coreBlk0 := snowmantest.BuildChild(snowmantest.Genesis) + coreBlk1 := snowmantest.BuildChild(coreBlk0) statelessBlock0, err := statelessblock.BuildUnsigned( - coreGenBlk.ID(), + snowmantest.GenesisID, proVM.Time(), 0, coreBlk0.Bytes(), @@ -1471,8 +1273,8 @@ func TestBuildBlockDuringWindow(t *testing.T) { coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { - case coreGenBlk.ID(): - return coreGenBlk, nil + case snowmantest.GenesisID: + return snowmantest.Genesis, nil case coreBlk0.ID(): return coreBlk0, nil case coreBlk1.ID(): @@ -1483,8 +1285,8 @@ func TestBuildBlockDuringWindow(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil case bytes.Equal(b, coreBlk0.Bytes()): return coreBlk0, nil case bytes.Equal(b, coreBlk1.Bytes()): @@ -1534,21 +1336,13 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create pre-fork block X and post-fork block A - xBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - } + xBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil @@ -1559,18 +1353,10 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { require.NoError(aBlock.Verify(context.Background())) // use a different way to construct pre-fork block Y and post-fork block B - yBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - } + yBlock := snowmantest.BuildChild(snowmantest.Genesis) ySlb, err := statelessblock.BuildUnsigned( - gBlock.ID(), + snowmantest.GenesisID, proVM.Time(), defaultPChainHeight, yBlock.Bytes(), @@ -1589,15 +1375,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { require.NoError(bBlock.Verify(context.Background())) // append Z/C to Y/B - zBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: yBlock.ID(), - HeightV: yBlock.Height() + 1, - } + zBlock := snowmantest.BuildChild(yBlock) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return zBlock, nil @@ -1631,32 +1409,13 @@ func TestTooFarAdvanced(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - xBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), - } - - yBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: xBlock.ID(), - HeightV: xBlock.Height() + 1, - TimestampV: xBlock.Timestamp(), - } + xBlock := snowmantest.BuildChild(snowmantest.Genesis) + yBlock := snowmantest.BuildChild(xBlock) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil @@ -1725,41 +1484,17 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = mockable.MaxTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - xBlockID := ids.GenerateTestID() + xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) xBlock := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: xBlockID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), - }, + Block: *xTestBlock, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), - }, + snowmantest.BuildChild(xTestBlock), + snowmantest.BuildChild(xTestBlock), }, } @@ -1803,21 +1538,12 @@ func TestLaggedPChainHeight(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() - innerBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), - } - + innerBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return innerBlock, nil } @@ -1828,7 +1554,7 @@ func TestLaggedPChainHeight(t *testing.T) { block := blockIntf.(*postForkBlock) pChainHeight := block.PChainHeight() - require.Equal(pChainHeight, coreGenBlk.Height()) + require.Equal(snowmantest.GenesisHeight, pChainHeight) } // Ensure that rejecting a block does not modify the accepted block ID for the @@ -1836,26 +1562,13 @@ func TestLaggedPChainHeight(t *testing.T) { func TestRejectedHeightNotIndexed(t *testing.T) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: []byte{0}, - } - - coreHeights := []ids.ID{coreGenBlk.ID()} + coreHeights := []ids.ID{snowmantest.GenesisID} initialState := []byte("genesis state") coreVM := &block.TestVM{ TestVM: common.TestVM{ T: t, }, - VerifyHeightIndexF: func(context.Context) error { - return nil - }, GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(coreHeights)) { return ids.ID{}, errTooHigh @@ -1871,20 +1584,20 @@ func TestRejectedHeightNotIndexed(t *testing.T) { return nil } coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreGenBlk.ID(): - return coreGenBlk, nil + switch blkID { + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -1900,6 +1613,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -1907,7 +1621,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { T: t, } valState.GetMinimumHeightF = func(context.Context) (uint64, error) { - return coreGenBlk.HeightV, nil + return snowmantest.GenesisHeight, nil } valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return defaultPChainHeight, nil @@ -1963,27 +1677,10 @@ func TestRejectedHeightNotIndexed(t *testing.T) { require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) - - ctx.Lock.Lock() - for proVM.VerifyHeightIndex(context.Background()) != nil { - ctx.Lock.Unlock() - time.Sleep(time.Millisecond) - ctx.Lock.Lock() - } - ctx.Lock.Unlock() + require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) // create inner block X and outer block A - xBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), - } + xBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil @@ -1995,20 +1692,11 @@ func TestRejectedHeightNotIndexed(t *testing.T) { require.NoError(aBlock.Verify(context.Background())) // use a different way to construct inner block Y and outer block B - yBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: coreGenBlk.ID(), - HeightV: coreGenBlk.Height() + 1, - TimestampV: coreGenBlk.Timestamp(), - } + yBlock := snowmantest.BuildChild(snowmantest.Genesis) ySlb, err := statelessblock.BuildUnsigned( - coreGenBlk.ID(), - coreGenBlk.Timestamp(), + snowmantest.GenesisID, + snowmantest.GenesisTimestamp, defaultPChainHeight, yBlock.Bytes(), ) @@ -2046,26 +1734,13 @@ func TestRejectedHeightNotIndexed(t *testing.T) { func TestRejectedOptionHeightNotIndexed(t *testing.T) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: []byte{0}, - } - - coreHeights := []ids.ID{coreGenBlk.ID()} + coreHeights := []ids.ID{snowmantest.GenesisID} initialState := []byte("genesis state") coreVM := &block.TestVM{ TestVM: common.TestVM{ T: t, }, - VerifyHeightIndexF: func(context.Context) error { - return nil - }, GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(coreHeights)) { return ids.ID{}, errTooHigh @@ -2081,20 +1756,20 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { return nil } coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { - return coreGenBlk.ID(), nil + return snowmantest.GenesisID, nil } coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - switch { - case blkID == coreGenBlk.ID(): - return coreGenBlk, nil + switch blkID { + case snowmantest.GenesisID: + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { - case bytes.Equal(b, coreGenBlk.Bytes()): - return coreGenBlk, nil + case bytes.Equal(b, snowmantest.GenesisBytes): + return snowmantest.Genesis, nil default: return nil, errUnknownBlock } @@ -2110,6 +1785,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2117,7 +1793,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { T: t, } valState.GetMinimumHeightF = func(context.Context) (uint64, error) { - return coreGenBlk.HeightV, nil + return snowmantest.GenesisHeight, nil } valState.GetCurrentHeightF = func(context.Context) (uint64, error) { return defaultPChainHeight, nil @@ -2173,46 +1849,14 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) + require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) - ctx.Lock.Lock() - for proVM.VerifyHeightIndex(context.Background()) != nil { - ctx.Lock.Unlock() - time.Sleep(time.Millisecond) - ctx.Lock.Lock() - } - ctx.Lock.Unlock() - - xBlockID := ids.GenerateTestID() + xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) xBlock := &TestOptionsBlock{ - TestBlock: snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: xBlockID, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: coreGenBlk.ID(), - TimestampV: coreGenBlk.Timestamp(), - }, + Block: *xTestBlock, opts: [2]snowman.Block{ - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{2}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), - }, - &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{3}, - ParentV: xBlockID, - TimestampV: coreGenBlk.Timestamp(), - }, + snowmantest.BuildChild(xTestBlock), + snowmantest.BuildChild(xTestBlock), }, } @@ -2276,6 +1920,7 @@ func TestVMInnerBlkCache(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2290,11 +1935,10 @@ func TestVMInnerBlkCache(t *testing.T) { gomock.Any(), gomock.Any(), ).Return(nil) - innerVM.EXPECT().VerifyHeightIndex(gomock.Any()).Return(nil) innerVM.EXPECT().Shutdown(gomock.Any()).Return(nil) { - innerBlk := snowman.NewMockBlock(ctrl) + innerBlk := snowmantest.NewMockBlock(ctrl) innerBlkID := ids.GenerateTestID() innerVM.EXPECT().LastAccepted(gomock.Any()).Return(innerBlkID, nil) innerVM.EXPECT().GetBlock(gomock.Any(), innerBlkID).Return(innerBlk, nil) @@ -2338,7 +1982,7 @@ func TestVMInnerBlkCache(t *testing.T) { // Not in the VM's state so need to parse it. state.EXPECT().GetBlock(blkNearTip.ID()).Return(blkNearTip, choices.Accepted, nil).Times(2) // We will ask the inner VM to parse. - mockInnerBlkNearTip := snowman.NewMockBlock(ctrl) + mockInnerBlkNearTip := snowmantest.NewMockBlock(ctrl) mockInnerBlkNearTip.EXPECT().Height().Return(uint64(1)).Times(2) mockInnerBlkNearTip.EXPECT().Bytes().Return(blkNearTipInnerBytes).Times(1) @@ -2374,22 +2018,13 @@ func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create pre-fork block X and post-fork block A - xBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), - } + xBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil @@ -2399,25 +2034,16 @@ func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { coreVM.BuildBlockF = nil bStatelessBlock, err := statelessblock.BuildUnsigned( - gBlock.ID(), - gBlock.Timestamp(), + snowmantest.GenesisID, + snowmantest.GenesisTimestamp, defaultPChainHeight, xBlock.Bytes(), ) require.NoError(err) - xBlockCopy := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: xBlock.IDV, - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), - } + xBlockCopy := *xBlock coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { - return xBlockCopy, nil + return &xBlockCopy, nil } bBlockBytes := bStatelessBlock.Bytes() @@ -2453,22 +2079,13 @@ func TestVMInnerBlkMarkedAcceptedRegression(t *testing.T) { activationTime = time.Unix(0, 0) durangoTime = activationTime ) - coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, activationTime, durangoTime, 0) + coreVM, _, proVM, _ := initTestProposerVM(t, activationTime, durangoTime, 0) defer func() { require.NoError(proVM.Shutdown(context.Background())) }() // create an inner block and wrap it in an postForkBlock. - innerBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - BytesV: []byte{1}, - ParentV: gBlock.ID(), - HeightV: gBlock.Height() + 1, - TimestampV: gBlock.Timestamp(), - } + innerBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return innerBlock, nil @@ -2491,7 +2108,7 @@ func TestVMInnerBlkMarkedAcceptedRegression(t *testing.T) { } type blockWithVerifyContext struct { - *snowman.MockBlock + *snowmantest.MockBlock *block.MockWithVerifyContext } @@ -2514,6 +2131,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2531,11 +2149,10 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { gomock.Any(), gomock.Any(), ).Return(nil) - innerVM.EXPECT().VerifyHeightIndex(gomock.Any()).Return(nil) innerVM.EXPECT().Shutdown(gomock.Any()).Return(nil) { - innerBlk := snowman.NewMockBlock(ctrl) + innerBlk := snowmantest.NewMockBlock(ctrl) innerBlkID := ids.GenerateTestID() innerVM.EXPECT().LastAccepted(gomock.Any()).Return(innerBlkID, nil) innerVM.EXPECT().GetBlock(gomock.Any(), innerBlkID).Return(innerBlk, nil) @@ -2562,7 +2179,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { { pChainHeight := uint64(0) innerBlk := blockWithVerifyContext{ - MockBlock: snowman.NewMockBlock(ctrl), + MockBlock: snowmantest.NewMockBlock(ctrl), MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(true, nil).Times(2) @@ -2610,7 +2227,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { // Ensure we call Verify on a block that returns // false for ShouldVerifyWithContext innerBlk := blockWithVerifyContext{ - MockBlock: snowman.NewMockBlock(ctrl), + MockBlock: snowmantest.NewMockBlock(ctrl), MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(false, nil) @@ -2633,7 +2250,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { { // Ensure we call Verify on a block that doesn't have a valid context innerBlk := blockWithVerifyContext{ - MockBlock: snowman.NewMockBlock(ctrl), + MockBlock: snowmantest.NewMockBlock(ctrl), MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) @@ -2650,16 +2267,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { func TestHistoricalBlockDeletion(t *testing.T) { require := require.New(t) - coreGenBlk := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Accepted, - }, - HeightV: 0, - TimestampV: genesisTimestamp, - BytesV: utils.RandomBytes(1024), - } - acceptedBlocks := []snowman.Block{coreGenBlk} + acceptedBlocks := []*snowmantest.Block{snowmantest.Genesis} currentHeight := uint64(0) initialState := []byte("genesis state") @@ -2689,9 +2297,6 @@ func TestHistoricalBlockDeletion(t *testing.T) { } return nil, errUnknownBlock }, - VerifyHeightIndexF: func(context.Context) error { - return nil - }, GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(acceptedBlocks)) { return ids.ID{}, errTooHigh @@ -2705,7 +2310,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { ctx.ValidatorState = &validators.TestState{ T: t, GetMinimumHeightF: func(context.Context) (uint64, error) { - return coreGenBlk.HeightV, nil + return snowmantest.GenesisHeight, nil }, GetCurrentHeightF: func(context.Context) (uint64, error) { return defaultPChainHeight, nil @@ -2728,6 +2333,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { NumHistoricalBlocks: DefaultNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2748,20 +2354,10 @@ func TestHistoricalBlockDeletion(t *testing.T) { require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) - require.NoError(proVM.VerifyHeightIndex(context.Background())) issueBlock := func() { lastAcceptedBlock := acceptedBlocks[currentHeight] - innerBlock := &snowman.TestBlock{ - TestDecidable: choices.TestDecidable{ - IDV: ids.GenerateTestID(), - StatusV: choices.Processing, - }, - ParentV: lastAcceptedBlock.ID(), - HeightV: lastAcceptedBlock.Height() + 1, - TimestampV: lastAcceptedBlock.Timestamp(), - BytesV: utils.RandomBytes(1024), - } + innerBlock := snowmantest.BuildChild(lastAcceptedBlock) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return innerBlock, nil @@ -2829,6 +2425,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { NumHistoricalBlocks: numHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2849,7 +2446,6 @@ func TestHistoricalBlockDeletion(t *testing.T) { require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) - require.NoError(proVM.VerifyHeightIndex(context.Background())) // Verify that old blocks were pruned during startup requireNumHeights(numHistoricalBlocks) @@ -2874,6 +2470,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { NumHistoricalBlocks: newNumHistoricalBlocks, StakingLeafSigner: pTestSigner, StakingCertLeaf: pTestCert, + Registerer: prometheus.NewRegistry(), }, ) @@ -2897,7 +2494,6 @@ func TestHistoricalBlockDeletion(t *testing.T) { require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) - require.NoError(proVM.VerifyHeightIndex(context.Background())) // The height index shouldn't be modified at this point requireNumHeights(numHistoricalBlocks) diff --git a/vms/rpcchainvm/batched_vm_test.go b/vms/rpcchainvm/batched_vm_test.go index 8039c9fb7ba3..b24c0699e597 100644 --- a/vms/rpcchainvm/batched_vm_test.go +++ b/vms/rpcchainvm/batched_vm_test.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/vms/components/chain" @@ -43,8 +43,8 @@ func batchedParseBlockCachingTestPlugin(t *testing.T, loadExpectations bool) blo vm := block.NewMockChainVM(ctrl) if loadExpectations { - blk1 := snowman.NewMockBlock(ctrl) - blk2 := snowman.NewMockBlock(ctrl) + blk1 := snowmantest.NewMockBlock(ctrl) + blk2 := snowmantest.NewMockBlock(ctrl) gomock.InOrder( // Initialize vm.EXPECT().Initialize( diff --git a/vms/rpcchainvm/errors.go b/vms/rpcchainvm/errors.go index 4b434b51d425..37d043fe2797 100644 --- a/vms/rpcchainvm/errors.go +++ b/vms/rpcchainvm/errors.go @@ -14,13 +14,11 @@ var ( errEnumToError = map[vmpb.Error]error{ vmpb.Error_ERROR_CLOSED: database.ErrClosed, vmpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, - vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE: block.ErrIndexIncomplete, vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED: block.ErrStateSyncableVMNotImplemented, } errorToErrEnum = map[error]vmpb.Error{ database.ErrClosed: vmpb.Error_ERROR_CLOSED, database.ErrNotFound: vmpb.Error_ERROR_NOT_FOUND, - block.ErrIndexIncomplete: vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE, block.ErrStateSyncableVMNotImplemented: vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED, } ) diff --git a/vms/rpcchainvm/grpcutils/util.go b/vms/rpcchainvm/grpcutils/util.go index 69e165c29832..280ab5c1aa3b 100644 --- a/vms/rpcchainvm/grpcutils/util.go +++ b/vms/rpcchainvm/grpcutils/util.go @@ -24,7 +24,7 @@ func Errorf(code int, tmpl string, args ...interface{}) error { }) } -// GetGRPCErrorFromHTTPRespone takes an HandleSimpleHTTPResponse as input and returns a gRPC error. +// GetGRPCErrorFromHTTPResponse takes an HandleSimpleHTTPResponse as input and returns a gRPC error. func GetGRPCErrorFromHTTPResponse(resp *httppb.HandleSimpleHTTPResponse) error { a, err := anypb.New(resp) if err != nil { diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index 3b71aaa8b4c7..504f628fc90a 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/logging" @@ -41,7 +41,7 @@ var ( } // last accepted blocks data before and after summary is accepted - preSummaryBlk = &snowman.TestBlock{ + preSummaryBlk = &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: ids.ID{'f', 'i', 'r', 's', 't', 'B', 'l', 'K'}, StatusV: choices.Accepted, @@ -50,7 +50,7 @@ var ( ParentV: ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'B', 'l', 'k'}, } - summaryBlk = &snowman.TestBlock{ + summaryBlk = &snowmantest.Block{ TestDecidable: choices.TestDecidable{ IDV: ids.ID{'s', 'u', 'm', 'm', 'a', 'r', 'y', 'B', 'l', 'K'}, StatusV: choices.Accepted, diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go index 7f03281c1c1c..6e6417725f11 100644 --- a/vms/rpcchainvm/vm_client.go +++ b/vms/rpcchainvm/vm_client.go @@ -136,16 +136,19 @@ func (vm *VMClient) Initialize( } // Register metrics - registerer := prometheus.NewRegistry() - multiGatherer := metrics.NewMultiGatherer() - vm.grpcServerMetrics = grpc_prometheus.NewServerMetrics() - if err := registerer.Register(vm.grpcServerMetrics); err != nil { + serverReg, err := metrics.MakeAndRegister( + chainCtx.Metrics, + "rpcchainvm", + ) + if err != nil { return err } - if err := multiGatherer.Register("rpcchainvm", registerer); err != nil { + vm.grpcServerMetrics = grpc_prometheus.NewServerMetrics() + if err := serverReg.Register(vm.grpcServerMetrics); err != nil { return err } - if err := multiGatherer.Register("", vm); err != nil { + + if err := chainCtx.Metrics.Register("plugin", vm); err != nil { return err } @@ -185,7 +188,7 @@ func (vm *VMClient) Initialize( SubnetId: chainCtx.SubnetID[:], ChainId: chainCtx.ChainID[:], NodeId: chainCtx.NodeID.Bytes(), - PublicKey: bls.PublicKeyToBytes(chainCtx.PublicKey), + PublicKey: bls.PublicKeyToCompressedBytes(chainCtx.PublicKey), XChainId: chainCtx.XChainID[:], CChainId: chainCtx.CChainID[:], AvaxAssetId: chainCtx.AVAXAssetID[:], @@ -226,8 +229,8 @@ func (vm *VMClient) Initialize( time: time, } - chainState, err := chain.NewMeteredState( - registerer, + vm.State, err = chain.NewMeteredState( + serverReg, &chain.Config{ DecidedCacheSize: decidedCacheSize, MissingCacheSize: missingCacheSize, @@ -241,12 +244,7 @@ func (vm *VMClient) Initialize( BuildBlockWithContext: vm.buildBlockWithContext, }, ) - if err != nil { - return err - } - vm.State = chainState - - return chainCtx.Metrics.Register(multiGatherer) + return err } func (vm *VMClient) newDBServer(db database.Database) *grpc.Server { @@ -667,14 +665,6 @@ func (vm *VMClient) batchedParseBlock(ctx context.Context, blksBytes [][]byte) ( return res, nil } -func (vm *VMClient) VerifyHeightIndex(ctx context.Context) error { - resp, err := vm.client.VerifyHeightIndex(ctx, &emptypb.Empty{}) - if err != nil { - return err - } - return errEnumToError[resp.Err] -} - func (vm *VMClient) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { resp, err := vm.client.GetBlockIDAtHeight( ctx, diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go index 82bafe425841..b33fd3e5b5fe 100644 --- a/vms/rpcchainvm/vm_server.go +++ b/vms/rpcchainvm/vm_server.go @@ -72,9 +72,9 @@ type VMServer struct { allowShutdown *utils.Atomic[bool] - processMetrics prometheus.Gatherer - db database.Database - log logging.Logger + metrics prometheus.Gatherer + db database.Database + log logging.Logger serverCloser grpcutils.ServerCloser connCloser wrappers.Closer @@ -108,7 +108,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) if err != nil { return nil, err } - publicKey, err := bls.PublicKeyFromBytes(req.PublicKey) + publicKey, err := bls.PublicKeyFromCompressedBytes(req.PublicKey) if err != nil { return nil, err } @@ -125,28 +125,47 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) return nil, err } - registerer := prometheus.NewRegistry() + pluginMetrics := metrics.NewPrefixGatherer() + vm.metrics = pluginMetrics + + processMetrics, err := metrics.MakeAndRegister( + pluginMetrics, + "process", + ) + if err != nil { + return nil, err + } // Current state of process metrics processCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) - if err := registerer.Register(processCollector); err != nil { + if err := processMetrics.Register(processCollector); err != nil { return nil, err } // Go process metrics using debug.GCStats goCollector := collectors.NewGoCollector() - if err := registerer.Register(goCollector); err != nil { + if err := processMetrics.Register(goCollector); err != nil { + return nil, err + } + + grpcMetrics, err := metrics.MakeAndRegister( + pluginMetrics, + "grpc", + ) + if err != nil { return nil, err } // gRPC client metrics grpcClientMetrics := grpc_prometheus.NewClientMetrics() - if err := registerer.Register(grpcClientMetrics); err != nil { + if err := grpcMetrics.Register(grpcClientMetrics); err != nil { return nil, err } - // Register metrics for each Go plugin processes - vm.processMetrics = registerer + vmMetrics := metrics.NewPrefixGatherer() + if err := pluginMetrics.Register("vm", vmMetrics); err != nil { + return nil, err + } // Dial the database dbClientConn, err := grpcutils.Dial( @@ -225,7 +244,7 @@ func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) Keystore: keystoreClient, SharedMemory: sharedMemoryClient, BCLookup: bcLookupClient, - Metrics: metrics.NewOptionalGatherer(), + Metrics: vmMetrics, // Signs warp messages WarpSigner: warpSignerClient, @@ -567,22 +586,8 @@ func (vm *VMServer) AppGossip(ctx context.Context, req *vmpb.AppGossipMsg) (*emp } func (vm *VMServer) Gather(context.Context, *emptypb.Empty) (*vmpb.GatherResponse, error) { - // Gather metrics registered to snow context Gatherer. These - // metrics are defined by the underlying vm implementation. - mfs, err := vm.ctx.Metrics.Gather() - if err != nil { - return nil, err - } - - // Gather metrics registered by rpcchainvm server Gatherer. These - // metrics are collected for each Go plugin process. - pluginMetrics, err := vm.processMetrics.Gather() - if err != nil { - return nil, err - } - mfs = append(mfs, pluginMetrics...) - - return &vmpb.GatherResponse{MetricFamilies: mfs}, err + metrics, err := vm.metrics.Gather() + return &vmpb.GatherResponse{MetricFamilies: metrics}, err } func (vm *VMServer) GetAncestors(ctx context.Context, req *vmpb.GetAncestorsRequest) (*vmpb.GetAncestorsResponse, error) { @@ -627,13 +632,6 @@ func (vm *VMServer) BatchedParseBlock( }, nil } -func (vm *VMServer) VerifyHeightIndex(ctx context.Context, _ *emptypb.Empty) (*vmpb.VerifyHeightIndexResponse, error) { - err := vm.vm.VerifyHeightIndex(ctx) - return &vmpb.VerifyHeightIndexResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) -} - func (vm *VMServer) GetBlockIDAtHeight( ctx context.Context, req *vmpb.GetBlockIDAtHeightRequest, diff --git a/vms/rpcchainvm/vm_test.go b/vms/rpcchainvm/vm_test.go index 7aeec999485a..429bc62df0be 100644 --- a/vms/rpcchainvm/vm_test.go +++ b/vms/rpcchainvm/vm_test.go @@ -79,7 +79,7 @@ func TestHelperProcess(t *testing.T) { } if len(args) == 0 { - fmt.Fprintf(os.Stderr, "failed to receive testKey\n") + fmt.Fprintln(os.Stderr, "failed to receive testKey") os.Exit(2) } diff --git a/vms/rpcchainvm/with_context_vm_test.go b/vms/rpcchainvm/with_context_vm_test.go index f9216f5a5b2e..f87a6cba5f0b 100644 --- a/vms/rpcchainvm/with_context_vm_test.go +++ b/vms/rpcchainvm/with_context_vm_test.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/consensus/snowman/snowmantest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/snowtest" ) @@ -40,7 +41,7 @@ type ContextEnabledVMMock struct { } type ContextEnabledBlockMock struct { - *snowman.MockBlock + *snowmantest.MockBlock *block.MockWithVerifyContext } @@ -56,7 +57,7 @@ func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) block.ChainVM if loadExpectations { ctxBlock := ContextEnabledBlockMock{ - MockBlock: snowman.NewMockBlock(ctrl), + MockBlock: snowmantest.NewMockBlock(ctrl), MockWithVerifyContext: block.NewMockWithVerifyContext(ctrl), } gomock.InOrder( diff --git a/vms/secp256k1fx/credential_test.go b/vms/secp256k1fx/credential_test.go index e69b98b286e7..c08548e0bcc7 100644 --- a/vms/secp256k1fx/credential_test.go +++ b/vms/secp256k1fx/credential_test.go @@ -5,7 +5,6 @@ package secp256k1fx import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -28,7 +27,7 @@ func TestCredentialVerifyNil(t *testing.T) { func TestCredentialSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index dcdf78385404..388d7bc14d84 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -53,7 +53,7 @@ func init() { func TestFxInitialize(t *testing.T) { vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} @@ -69,7 +69,7 @@ func TestFxInitializeInvalid(t *testing.T) { func TestFxVerifyTransfer(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -107,7 +107,7 @@ func TestFxVerifyTransfer(t *testing.T) { func TestFxVerifyTransferNilTx(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -143,7 +143,7 @@ func TestFxVerifyTransferNilTx(t *testing.T) { func TestFxVerifyTransferNilOutput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -170,7 +170,7 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { func TestFxVerifyTransferNilInput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -201,7 +201,7 @@ func TestFxVerifyTransferNilInput(t *testing.T) { func TestFxVerifyTransferNilCredential(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -233,7 +233,7 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { func TestFxVerifyTransferInvalidOutput(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -270,7 +270,7 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { func TestFxVerifyTransferWrongAmounts(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -307,7 +307,7 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { func TestFxVerifyTransferTimelocked(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -344,7 +344,7 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { func TestFxVerifyTransferTooManySigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -382,7 +382,7 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { func TestFxVerifyTransferTooFewSigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -417,7 +417,7 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { func TestFxVerifyTransferMismatchedSigners(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -455,7 +455,7 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { func TestFxVerifyTransferInvalidSignature(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -495,7 +495,7 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { func TestFxVerifyTransferWrongSigner(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -535,7 +535,7 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { func TestFxVerifyTransferSigIndexOOB(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -575,7 +575,7 @@ func TestFxVerifyTransferSigIndexOOB(t *testing.T) { func TestFxVerifyOperation(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -627,7 +627,7 @@ func TestFxVerifyOperation(t *testing.T) { func TestFxVerifyOperationUnknownTx(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -679,7 +679,7 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { func TestFxVerifyOperationUnknownOperation(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -709,7 +709,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { func TestFxVerifyOperationUnknownCredential(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -757,7 +757,7 @@ func TestFxVerifyOperationUnknownCredential(t *testing.T) { func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -810,7 +810,7 @@ func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -855,7 +855,7 @@ func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -905,7 +905,7 @@ func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { require := require.New(t) vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) @@ -952,7 +952,7 @@ func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { func TestVerifyPermission(t *testing.T) { vm := TestVM{ - Codec: linearcodec.NewDefault(time.Time{}), + Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} diff --git a/vms/secp256k1fx/transfer_input_test.go b/vms/secp256k1fx/transfer_input_test.go index c155d848e559..4434584e1a9d 100644 --- a/vms/secp256k1fx/transfer_input_test.go +++ b/vms/secp256k1fx/transfer_input_test.go @@ -5,7 +5,6 @@ package secp256k1fx import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -81,7 +80,7 @@ func TestTransferInputVerifyUnsorted(t *testing.T) { func TestTransferInputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/secp256k1fx/transfer_output_test.go b/vms/secp256k1fx/transfer_output_test.go index 864fb85b9ff0..b1d8a2170b5a 100644 --- a/vms/secp256k1fx/transfer_output_test.go +++ b/vms/secp256k1fx/transfer_output_test.go @@ -5,7 +5,6 @@ package secp256k1fx import ( "testing" - "time" "github.com/stretchr/testify/require" @@ -136,7 +135,7 @@ func TestOutputVerifyDuplicated(t *testing.T) { func TestOutputSerialize(t *testing.T) { require := require.New(t) - c := linearcodec.NewDefault(time.Time{}) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() require.NoError(m.RegisterCodec(0, c)) diff --git a/vms/tracedvm/block_vm.go b/vms/tracedvm/block_vm.go index b32b3bcd4593..13bb1a5d7b7c 100644 --- a/vms/tracedvm/block_vm.go +++ b/vms/tracedvm/block_vm.go @@ -50,7 +50,6 @@ type blockVM struct { getAncestorsTag string batchedParseBlockTag string // HeightIndexedChainVM tags - verifyHeightIndexTag string getBlockIDAtHeightTag string // StateSyncableVM tags stateSyncEnabledTag string @@ -85,7 +84,6 @@ func NewBlockVM(vm block.ChainVM, name string, tracer trace.Tracer) block.ChainV buildBlockWithContextTag: name + ".buildBlockWithContext", getAncestorsTag: name + ".getAncestors", batchedParseBlockTag: name + ".batchedParseBlock", - verifyHeightIndexTag: name + ".verifyHeightIndex", getBlockIDAtHeightTag: name + ".getBlockIDAtHeight", stateSyncEnabledTag: name + ".stateSyncEnabled", getOngoingSyncStateSummaryTag: name + ".getOngoingSyncStateSummary", @@ -176,13 +174,6 @@ func (vm *blockVM) LastAccepted(ctx context.Context) (ids.ID, error) { return vm.ChainVM.LastAccepted(ctx) } -func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { - ctx, span := vm.tracer.Start(ctx, vm.verifyHeightIndexTag) - defer span.End() - - return vm.ChainVM.VerifyHeightIndex(ctx) -} - func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { ctx, span := vm.tracer.Start(ctx, vm.getBlockIDAtHeightTag, oteltrace.WithAttributes( attribute.Int64("height", int64(height)), diff --git a/vms/txs/mempool/mempool.go b/vms/txs/mempool/mempool.go new file mode 100644 index 000000000000..808bda5a8270 --- /dev/null +++ b/vms/txs/mempool/mempool.go @@ -0,0 +1,220 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mempool + +import ( + "errors" + "fmt" + "sync" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/linked" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/setmap" + "github.com/ava-labs/avalanchego/utils/units" +) + +const ( + // MaxTxSize is the maximum number of bytes a transaction can use to be + // allowed into the mempool. + MaxTxSize = 64 * units.KiB + + // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache + droppedTxIDsCacheSize = 64 + + // maxMempoolSize is the maximum number of bytes allowed in the mempool + maxMempoolSize = 64 * units.MiB +) + +var ( + ErrDuplicateTx = errors.New("duplicate tx") + ErrTxTooLarge = errors.New("tx too large") + ErrMempoolFull = errors.New("mempool is full") + ErrConflictsWithOtherTx = errors.New("tx conflicts with other tx") +) + +type Tx interface { + InputIDs() set.Set[ids.ID] + ID() ids.ID + Size() int +} + +type Metrics interface { + Update(numTxs, bytesAvailable int) +} + +type Mempool[T Tx] interface { + Add(tx T) error + Get(txID ids.ID) (T, bool) + // Remove [txs] and any conflicts of [txs] from the mempool. + Remove(txs ...T) + + // Peek returns the oldest tx in the mempool. + Peek() (tx T, exists bool) + + // Iterate iterates over the txs until f returns false + Iterate(f func(tx T) bool) + + // Note: dropped txs are added to droppedTxIDs but are not evicted from + // unissued decision/staker txs. This allows previously dropped txs to be + // possibly reissued. + MarkDropped(txID ids.ID, reason error) + GetDropReason(txID ids.ID) error + + // Len returns the number of txs in the mempool. + Len() int +} + +type mempool[T Tx] struct { + lock sync.RWMutex + unissuedTxs *linked.Hashmap[ids.ID, T] + consumedUTXOs *setmap.SetMap[ids.ID, ids.ID] // TxID -> Consumed UTXOs + bytesAvailable int + droppedTxIDs *cache.LRU[ids.ID, error] // TxID -> Verification error + + metrics Metrics +} + +func New[T Tx]( + metrics Metrics, +) *mempool[T] { + m := &mempool[T]{ + unissuedTxs: linked.NewHashmap[ids.ID, T](), + consumedUTXOs: setmap.New[ids.ID, ids.ID](), + bytesAvailable: maxMempoolSize, + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + metrics: metrics, + } + m.updateMetrics() + + return m +} + +func (m *mempool[T]) updateMetrics() { + m.metrics.Update(m.unissuedTxs.Len(), m.bytesAvailable) +} + +func (m *mempool[T]) Add(tx T) error { + txID := tx.ID() + + m.lock.Lock() + defer m.lock.Unlock() + + if _, ok := m.unissuedTxs.Get(txID); ok { + return fmt.Errorf("%w: %s", ErrDuplicateTx, txID) + } + + txSize := tx.Size() + if txSize > MaxTxSize { + return fmt.Errorf("%w: %s size (%d) > max size (%d)", + ErrTxTooLarge, + txID, + txSize, + MaxTxSize, + ) + } + if txSize > m.bytesAvailable { + return fmt.Errorf("%w: %s size (%d) > available space (%d)", + ErrMempoolFull, + txID, + txSize, + m.bytesAvailable, + ) + } + + inputs := tx.InputIDs() + if m.consumedUTXOs.HasOverlap(inputs) { + return fmt.Errorf("%w: %s", ErrConflictsWithOtherTx, txID) + } + + m.bytesAvailable -= txSize + m.unissuedTxs.Put(txID, tx) + m.updateMetrics() + + // Mark these UTXOs as consumed in the mempool + m.consumedUTXOs.Put(txID, inputs) + + // An added tx must not be marked as dropped. + m.droppedTxIDs.Evict(txID) + return nil +} + +func (m *mempool[T]) Get(txID ids.ID) (T, bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.unissuedTxs.Get(txID) +} + +func (m *mempool[T]) Remove(txs ...T) { + m.lock.Lock() + defer m.lock.Unlock() + + for _, tx := range txs { + txID := tx.ID() + // If the transaction is in the mempool, remove it. + if _, ok := m.consumedUTXOs.DeleteKey(txID); ok { + m.unissuedTxs.Delete(txID) + m.bytesAvailable += tx.Size() + continue + } + + // If the transaction isn't in the mempool, remove any conflicts it has. + inputs := tx.InputIDs() + for _, removed := range m.consumedUTXOs.DeleteOverlapping(inputs) { + tx, _ := m.unissuedTxs.Get(removed.Key) + m.unissuedTxs.Delete(removed.Key) + m.bytesAvailable += tx.Size() + } + } + m.updateMetrics() +} + +func (m *mempool[T]) Peek() (T, bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + _, tx, exists := m.unissuedTxs.Oldest() + return tx, exists +} + +func (m *mempool[T]) Iterate(f func(T) bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + it := m.unissuedTxs.NewIterator() + for it.Next() { + if !f(it.Value()) { + return + } + } +} + +func (m *mempool[_]) MarkDropped(txID ids.ID, reason error) { + if errors.Is(reason, ErrMempoolFull) { + return + } + + m.lock.RLock() + defer m.lock.RUnlock() + + if _, ok := m.unissuedTxs.Get(txID); ok { + return + } + + m.droppedTxIDs.Put(txID, reason) +} + +func (m *mempool[_]) GetDropReason(txID ids.ID) error { + err, _ := m.droppedTxIDs.Get(txID) + return err +} + +func (m *mempool[_]) Len() int { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.unissuedTxs.Len() +} diff --git a/vms/txs/mempool/mempool_test.go b/vms/txs/mempool/mempool_test.go new file mode 100644 index 000000000000..ff3725476e2f --- /dev/null +++ b/vms/txs/mempool/mempool_test.go @@ -0,0 +1,299 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mempool + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Tx = (*dummyTx)(nil) + +type dummyTx struct { + size int + id ids.ID + inputIDs []ids.ID +} + +func (tx *dummyTx) Size() int { + return tx.size +} + +func (tx *dummyTx) ID() ids.ID { + return tx.id +} + +func (tx *dummyTx) InputIDs() set.Set[ids.ID] { + return set.Of(tx.inputIDs...) +} + +type noMetrics struct{} + +func (*noMetrics) Update(int, int) {} + +func newMempool() *mempool[*dummyTx] { + return New[*dummyTx](&noMetrics{}) +} + +func TestAdd(t *testing.T) { + tx0 := newTx(0, 32) + + tests := []struct { + name string + initialTxs []*dummyTx + tx *dummyTx + err error + dropReason error + }{ + { + name: "successfully add tx", + initialTxs: nil, + tx: tx0, + err: nil, + dropReason: nil, + }, + { + name: "attempt adding duplicate tx", + initialTxs: []*dummyTx{tx0}, + tx: tx0, + err: ErrDuplicateTx, + dropReason: nil, + }, + { + name: "attempt adding too large tx", + initialTxs: nil, + tx: newTx(0, MaxTxSize+1), + err: ErrTxTooLarge, + dropReason: ErrTxTooLarge, + }, + { + name: "attempt adding tx when full", + initialTxs: newTxs(maxMempoolSize/MaxTxSize, MaxTxSize), + tx: newTx(maxMempoolSize/MaxTxSize, MaxTxSize), + err: ErrMempoolFull, + dropReason: nil, + }, + { + name: "attempt adding conflicting tx", + initialTxs: []*dummyTx{tx0}, + tx: newTx(0, 32), + err: ErrConflictsWithOtherTx, + dropReason: ErrConflictsWithOtherTx, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + mempool := newMempool() + + for _, tx := range test.initialTxs { + require.NoError(mempool.Add(tx)) + } + + err := mempool.Add(test.tx) + require.ErrorIs(err, test.err) + + txID := test.tx.ID() + + if err != nil { + mempool.MarkDropped(txID, err) + } + + err = mempool.GetDropReason(txID) + require.ErrorIs(err, test.dropReason) + }) + } +} + +func TestGet(t *testing.T) { + require := require.New(t) + + mempool := newMempool() + + tx := newTx(0, 32) + txID := tx.ID() + + _, exists := mempool.Get(txID) + require.False(exists) + + require.NoError(mempool.Add(tx)) + + returned, exists := mempool.Get(txID) + require.True(exists) + require.Equal(tx, returned) + + mempool.Remove(tx) + + _, exists = mempool.Get(txID) + require.False(exists) +} + +func TestPeek(t *testing.T) { + require := require.New(t) + + mempool := newMempool() + + _, exists := mempool.Peek() + require.False(exists) + + tx0 := newTx(0, 32) + tx1 := newTx(1, 32) + + require.NoError(mempool.Add(tx0)) + require.NoError(mempool.Add(tx1)) + + tx, exists := mempool.Peek() + require.True(exists) + require.Equal(tx, tx0) + + mempool.Remove(tx0) + + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, tx1) + + mempool.Remove(tx0) + + tx, exists = mempool.Peek() + require.True(exists) + require.Equal(tx, tx1) + + mempool.Remove(tx1) + + _, exists = mempool.Peek() + require.False(exists) +} + +func TestRemoveConflict(t *testing.T) { + require := require.New(t) + + mempool := newMempool() + + tx := newTx(0, 32) + txConflict := newTx(0, 32) + + require.NoError(mempool.Add(tx)) + + returnedTx, exists := mempool.Peek() + require.True(exists) + require.Equal(returnedTx, tx) + + mempool.Remove(txConflict) + + _, exists = mempool.Peek() + require.False(exists) +} + +func TestIterate(t *testing.T) { + require := require.New(t) + + mempool := newMempool() + + var ( + iteratedTxs []*dummyTx + maxLen = 2 + ) + addTxs := func(tx *dummyTx) bool { + iteratedTxs = append(iteratedTxs, tx) + return len(iteratedTxs) < maxLen + } + mempool.Iterate(addTxs) + require.Empty(iteratedTxs) + + tx0 := newTx(0, 32) + require.NoError(mempool.Add(tx0)) + + mempool.Iterate(addTxs) + require.Equal([]*dummyTx{tx0}, iteratedTxs) + + tx1 := newTx(1, 32) + require.NoError(mempool.Add(tx1)) + + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*dummyTx{tx0, tx1}, iteratedTxs) + + tx2 := newTx(2, 32) + require.NoError(mempool.Add(tx2)) + + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*dummyTx{tx0, tx1}, iteratedTxs) + + mempool.Remove(tx0, tx2) + + iteratedTxs = nil + mempool.Iterate(addTxs) + require.Equal([]*dummyTx{tx1}, iteratedTxs) +} + +func TestDropped(t *testing.T) { + require := require.New(t) + + mempool := newMempool() + + tx := newTx(0, 32) + txID := tx.ID() + testErr := errors.New("test") + + mempool.MarkDropped(txID, testErr) + + err := mempool.GetDropReason(txID) + require.ErrorIs(err, testErr) + + require.NoError(mempool.Add(tx)) + require.NoError(mempool.GetDropReason(txID)) + + mempool.MarkDropped(txID, testErr) + require.NoError(mempool.GetDropReason(txID)) +} + +func newTxs(num int, size int) []*dummyTx { + txs := make([]*dummyTx, num) + for i := range txs { + txs[i] = newTx(uint64(i), size) + } + return txs +} + +func newTx(index uint64, size int) *dummyTx { + return &dummyTx{ + size: size, + id: ids.GenerateTestID(), + inputIDs: []ids.ID{ids.Empty.Prefix(index)}, + } +} + +// shows that valid tx is not added to mempool if this would exceed its maximum +// size +func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { + require := require.New(t) + + mpool := newMempool() + + tx := newTx(0, 32) + + // shortcut to simulated almost filled mempool + mpool.bytesAvailable = tx.Size() - 1 + + err := mpool.Add(tx) + require.ErrorIs(err, ErrMempoolFull) + + // tx should not be marked as dropped if the mempool is full + txID := tx.ID() + mpool.MarkDropped(txID, err) + require.NoError(mpool.GetDropReason(txID)) + + // shortcut to simulated almost filled mempool + mpool.bytesAvailable = tx.Size() + + err = mpool.Add(tx) + require.NoError(err, "should have added tx to mempool") +} diff --git a/vms/txs/mempool/metrics.go b/vms/txs/mempool/metrics.go new file mode 100644 index 000000000000..7ad316082b7f --- /dev/null +++ b/vms/txs/mempool/metrics.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mempool + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils" +) + +var _ Metrics = (*metrics)(nil) + +type metrics struct { + numTxs prometheus.Gauge + bytesAvailableMetric prometheus.Gauge +} + +func NewMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { + m := &metrics{ + numTxs: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of transactions in the mempool", + }), + bytesAvailableMetric: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "bytes_available", + Help: "Number of bytes of space currently available in the mempool", + }), + } + + err := utils.Err( + registerer.Register(m.numTxs), + registerer.Register(m.bytesAvailableMetric), + ) + + return m, err +} + +func (m *metrics) Update(numTxs, bytesAvailable int) { + m.numTxs.Set(float64(numTxs)) + m.bytesAvailableMetric.Set(float64(bytesAvailable)) +} diff --git a/wallet/chain/c/backend.go b/wallet/chain/c/backend.go index 3301015f0eeb..8d1ea6f34f07 100644 --- a/wallet/chain/c/backend.go +++ b/wallet/chain/c/backend.go @@ -4,6 +4,7 @@ package c import ( + "context" "errors" "fmt" "math/big" @@ -16,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - stdcontext "context" ethcommon "github.com/ethereum/go-ethereum/common" ) @@ -32,11 +32,10 @@ type Backend interface { BuilderBackend SignerBackend - AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error + AcceptAtomicTx(ctx context.Context, tx *evm.Tx) error } type backend struct { - Context common.ChainUTXOs accountsLock sync.RWMutex @@ -49,18 +48,16 @@ type Account struct { } func NewBackend( - ctx Context, utxos common.ChainUTXOs, accounts map[ethcommon.Address]*Account, ) Backend { return &backend{ - Context: ctx, ChainUTXOs: utxos, accounts: accounts, } } -func (b *backend) AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error { +func (b *backend) AcceptAtomicTx(ctx context.Context, tx *evm.Tx) error { switch tx := tx.UnsignedAtomicTx.(type) { case *evm.UnsignedImportTx: for _, input := range tx.ImportedInputs { @@ -131,7 +128,7 @@ func (b *backend) AcceptAtomicTx(ctx stdcontext.Context, tx *evm.Tx) error { return nil } -func (b *backend) Balance(_ stdcontext.Context, addr ethcommon.Address) (*big.Int, error) { +func (b *backend) Balance(_ context.Context, addr ethcommon.Address) (*big.Int, error) { b.accountsLock.RLock() defer b.accountsLock.RUnlock() @@ -142,7 +139,7 @@ func (b *backend) Balance(_ stdcontext.Context, addr ethcommon.Address) (*big.In return account.Balance, nil } -func (b *backend) Nonce(_ stdcontext.Context, addr ethcommon.Address) (uint64, error) { +func (b *backend) Nonce(_ context.Context, addr ethcommon.Address) (uint64, error) { b.accountsLock.RLock() defer b.accountsLock.RUnlock() diff --git a/wallet/chain/c/builder.go b/wallet/chain/c/builder.go index 28e1eccc2d54..0554cb39ba95 100644 --- a/wallet/chain/c/builder.go +++ b/wallet/chain/c/builder.go @@ -4,6 +4,7 @@ package c import ( + "context" "errors" "math/big" @@ -17,7 +18,6 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - stdcontext "context" ethcommon "github.com/ethereum/go-ethereum/common" ) @@ -41,6 +41,10 @@ var ( // Builder provides a convenient interface for building unsigned C-chain // transactions. type Builder interface { + // Context returns the configuration of the chain that this builder uses to + // create transactions. + Context() *Context + // GetBalance calculates the amount of AVAX that this builder has control // over. GetBalance( @@ -86,16 +90,15 @@ type Builder interface { // BuilderBackend specifies the required information needed to build unsigned // C-chain transactions. type BuilderBackend interface { - Context - - UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) - Balance(ctx stdcontext.Context, addr ethcommon.Address) (*big.Int, error) - Nonce(ctx stdcontext.Context, addr ethcommon.Address) (uint64, error) + UTXOs(ctx context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) + Balance(ctx context.Context, addr ethcommon.Address) (*big.Int, error) + Nonce(ctx context.Context, addr ethcommon.Address) (uint64, error) } type builder struct { avaxAddrs set.Set[ids.ShortID] ethAddrs set.Set[ethcommon.Address] + context *Context backend BuilderBackend } @@ -110,15 +113,21 @@ type builder struct { func NewBuilder( avaxAddrs set.Set[ids.ShortID], ethAddrs set.Set[ethcommon.Address], + context *Context, backend BuilderBackend, ) Builder { return &builder{ avaxAddrs: avaxAddrs, ethAddrs: ethAddrs, + context: context, backend: backend, } } +func (b *builder) Context() *Context { + return b.context +} + func (b *builder) GetBalance( options ...common.Option, ) (*big.Int, error) { @@ -152,7 +161,7 @@ func (b *builder) GetImportableBalance( var ( addrs = ops.Addresses(b.avaxAddrs) minIssuanceTime = ops.MinIssuanceTime() - avaxAssetID = b.backend.AVAXAssetID() + avaxAssetID = b.context.AVAXAssetID balance uint64 ) for _, utxo := range utxos { @@ -186,7 +195,7 @@ func (b *builder) NewImportTx( var ( addrs = ops.Addresses(b.avaxAddrs) minIssuanceTime = ops.MinIssuanceTime() - avaxAssetID = b.backend.AVAXAssetID() + avaxAssetID = b.context.AVAXAssetID importedInputs = make([]*avax.TransferableInput, 0, len(utxos)) importedAmount uint64 @@ -218,8 +227,8 @@ func (b *builder) NewImportTx( utils.Sort(importedInputs) tx := &evm.UnsignedImportTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, SourceChain: chainID, ImportedInputs: importedInputs, } @@ -260,7 +269,7 @@ func (b *builder) NewExportTx( options ...common.Option, ) (*evm.UnsignedExportTx, error) { var ( - avaxAssetID = b.backend.AVAXAssetID() + avaxAssetID = b.context.AVAXAssetID exportedOutputs = make([]*avax.TransferableOutput, len(outputs)) exportedAmount uint64 ) @@ -280,8 +289,8 @@ func (b *builder) NewExportTx( avax.SortTransferableOutputs(exportedOutputs, evm.Codec) tx := &evm.UnsignedExportTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, DestinationChain: chainID, ExportedOutputs: exportedOutputs, } @@ -378,7 +387,7 @@ func (b *builder) NewExportTx( utils.Sort(inputs) tx.Ins = inputs - snowCtx, err := newSnowContext(b.backend) + snowCtx, err := newSnowContext(b.context) if err != nil { return nil, err } diff --git a/wallet/chain/c/context.go b/wallet/chain/c/context.go index dc0537e23069..d56a75a0070b 100644 --- a/wallet/chain/c/context.go +++ b/wallet/chain/c/context.go @@ -4,99 +4,66 @@ package c import ( + "context" + "github.com/ava-labs/avalanchego/api/info" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" - - stdcontext "context" ) const Alias = "C" -var _ Context = (*context)(nil) - -type Context interface { - NetworkID() uint32 - BlockchainID() ids.ID - AVAXAssetID() ids.ID +type Context struct { + NetworkID uint32 + BlockchainID ids.ID + AVAXAssetID ids.ID } -type context struct { - networkID uint32 - blockchainID ids.ID - avaxAssetID ids.ID -} - -func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { +func NewContextFromURI(ctx context.Context, uri string) (*Context, error) { infoClient := info.NewClient(uri) xChainClient := avm.NewClient(uri, "X") return NewContextFromClients(ctx, infoClient, xChainClient) } func NewContextFromClients( - ctx stdcontext.Context, + ctx context.Context, infoClient info.Client, xChainClient avm.Client, -) (Context, error) { +) (*Context, error) { networkID, err := infoClient.GetNetworkID(ctx) if err != nil { return nil, err } - chainID, err := infoClient.GetBlockchainID(ctx, Alias) + blockchainID, err := infoClient.GetBlockchainID(ctx, Alias) if err != nil { return nil, err } - asset, err := xChainClient.GetAssetDescription(ctx, "AVAX") + avaxAsset, err := xChainClient.GetAssetDescription(ctx, "AVAX") if err != nil { return nil, err } - return NewContext( - networkID, - chainID, - asset.AssetID, - ), nil -} - -func NewContext( - networkID uint32, - blockchainID ids.ID, - avaxAssetID ids.ID, -) Context { - return &context{ - networkID: networkID, - blockchainID: blockchainID, - avaxAssetID: avaxAssetID, - } -} - -func (c *context) NetworkID() uint32 { - return c.networkID -} - -func (c *context) BlockchainID() ids.ID { - return c.blockchainID -} - -func (c *context) AVAXAssetID() ids.ID { - return c.avaxAssetID + return &Context{ + NetworkID: networkID, + BlockchainID: blockchainID, + AVAXAssetID: avaxAsset.AssetID, + }, nil } -func newSnowContext(c Context) (*snow.Context, error) { - chainID := c.BlockchainID() +func newSnowContext(c *Context) (*snow.Context, error) { lookup := ids.NewAliaser() return &snow.Context{ - NetworkID: c.NetworkID(), + NetworkID: c.NetworkID, SubnetID: constants.PrimaryNetworkID, - ChainID: chainID, - CChainID: chainID, - AVAXAssetID: c.AVAXAssetID(), + ChainID: c.BlockchainID, + CChainID: c.BlockchainID, + AVAXAssetID: c.AVAXAssetID, Log: logging.NoLog{}, BCLookup: lookup, - }, lookup.Alias(chainID, Alias) + }, lookup.Alias(c.BlockchainID, Alias) } diff --git a/wallet/chain/c/signer.go b/wallet/chain/c/signer.go index 24de72c13941..1e69db75be19 100644 --- a/wallet/chain/c/signer.go +++ b/wallet/chain/c/signer.go @@ -4,6 +4,7 @@ package c import ( + "context" "errors" "fmt" @@ -19,8 +20,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - stdcontext "context" ) const version = 0 @@ -45,7 +44,7 @@ type Signer interface { // // If the signer doesn't have the ability to provide a required signature, // the signature slot will be skipped without reporting an error. - SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error + SignAtomic(ctx context.Context, tx *evm.Tx) error } type EthKeychain interface { @@ -57,7 +56,7 @@ type EthKeychain interface { } type SignerBackend interface { - GetUTXO(ctx stdcontext.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) + GetUTXO(ctx context.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) } type txSigner struct { @@ -74,7 +73,7 @@ func NewSigner(avaxKC keychain.Keychain, ethKC EthKeychain, backend SignerBacken } } -func (s *txSigner) SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error { +func (s *txSigner) SignAtomic(ctx context.Context, tx *evm.Tx) error { switch utx := tx.UnsignedAtomicTx.(type) { case *evm.UnsignedImportTx: signers, err := s.getImportSigners(ctx, utx.SourceChain, utx.ImportedInputs) @@ -90,7 +89,7 @@ func (s *txSigner) SignAtomic(ctx stdcontext.Context, tx *evm.Tx) error { } } -func (s *txSigner) getImportSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { +func (s *txSigner) getImportSigners(ctx context.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { txSigners := make([][]keychain.Signer, len(ins)) for credIndex, transferInput := range ins { input, ok := transferInput.In.(*secp256k1fx.TransferInput) @@ -152,7 +151,7 @@ func (s *txSigner) getExportSigners(ins []evm.EVMInput) [][]keychain.Signer { return txSigners } -func SignUnsignedAtomic(ctx stdcontext.Context, signer Signer, utx evm.UnsignedAtomicTx) (*evm.Tx, error) { +func SignUnsignedAtomic(ctx context.Context, signer Signer, utx evm.UnsignedAtomicTx) (*evm.Tx, error) { tx := &evm.Tx{UnsignedAtomicTx: utx} return tx, signer.SignAtomic(ctx, tx) } diff --git a/wallet/chain/c/wallet.go b/wallet/chain/c/wallet.go index 1f8d6d251748..5685316fba4f 100644 --- a/wallet/chain/c/wallet.go +++ b/wallet/chain/c/wallet.go @@ -4,7 +4,7 @@ package c import ( - "errors" + "context" "math/big" "time" @@ -12,21 +12,16 @@ import ( "github.com/ava-labs/coreth/plugin/evm" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ethcommon "github.com/ethereum/go-ethereum/common" ) -var ( - _ Wallet = (*wallet)(nil) - - errNotCommitted = errors.New("not committed") -) +var _ Wallet = (*wallet)(nil) type Wallet interface { - Context - // Builder returns the builder that will be used to create the transactions. Builder() Builder @@ -167,25 +162,45 @@ func (w *wallet) IssueAtomicTx( return w.Backend.AcceptAtomicTx(ctx, tx) } - pollFrequency := ops.PollFrequency() - ticker := time.NewTicker(pollFrequency) + if err := awaitTxAccepted(w.avaxClient, ctx, txID, ops.PollFrequency()); err != nil { + return err + } + + return w.Backend.AcceptAtomicTx(ctx, tx) +} + +func (w *wallet) baseFee(options []common.Option) (*big.Int, error) { + ops := common.NewOptions(options) + baseFee := ops.BaseFee(nil) + if baseFee != nil { + return baseFee, nil + } + + ctx := ops.Context() + return w.ethClient.EstimateBaseFee(ctx) +} + +// TODO: Upstream this function into coreth. +func awaitTxAccepted( + c evm.Client, + ctx context.Context, + txID ids.ID, + freq time.Duration, + options ...rpc.Option, +) error { + ticker := time.NewTicker(freq) defer ticker.Stop() for { - status, err := w.avaxClient.GetAtomicTxStatus(ctx, txID) + status, err := c.GetAtomicTxStatus(ctx, txID, options...) if err != nil { return err } - switch status { - case evm.Accepted: - return w.Backend.AcceptAtomicTx(ctx, tx) - case evm.Dropped, evm.Unknown: - return errNotCommitted + if status == evm.Accepted { + return nil } - // The tx is Processing. - select { case <-ticker.C: case <-ctx.Done(): @@ -193,14 +208,3 @@ func (w *wallet) IssueAtomicTx( } } } - -func (w *wallet) baseFee(options []common.Option) (*big.Int, error) { - ops := common.NewOptions(options) - baseFee := ops.BaseFee(nil) - if baseFee != nil { - return baseFee, nil - } - - ctx := ops.Context() - return w.ethClient.EstimateBaseFee(ctx) -} diff --git a/wallet/chain/p/backend.go b/wallet/chain/p/backend.go index 5b8001808ceb..ed8e6a4f8830 100644 --- a/wallet/chain/p/backend.go +++ b/wallet/chain/p/backend.go @@ -4,6 +4,7 @@ package p import ( + "context" "sync" "github.com/ava-labs/avalanchego/database" @@ -13,31 +14,31 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + "github.com/ava-labs/avalanchego/wallet/chain/p/signer" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - - stdcontext "context" ) var _ Backend = (*backend)(nil) // Backend defines the full interface required to support a P-chain wallet. type Backend interface { - common.ChainUTXOs - BuilderBackend - SignerBackend + builder.Backend + signer.Backend - AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error + AcceptTx(ctx context.Context, tx *txs.Tx) error } type backend struct { - Context common.ChainUTXOs + context *builder.Context + subnetOwnerLock sync.RWMutex subnetOwner map[ids.ID]fx.Owner // subnetID -> owner } -func NewBackend(ctx Context, utxos common.ChainUTXOs, subnetTxs map[ids.ID]*txs.Tx) Backend { +func NewBackend(context *builder.Context, utxos common.ChainUTXOs, subnetTxs map[ids.ID]*txs.Tx) Backend { subnetOwner := make(map[ids.ID]fx.Owner) for txID, tx := range subnetTxs { // first get owners from the CreateSubnetTx createSubnetTx, ok := tx.Unsigned.(*txs.CreateSubnetTx) @@ -54,13 +55,13 @@ func NewBackend(ctx Context, utxos common.ChainUTXOs, subnetTxs map[ids.ID]*txs. subnetOwner[transferSubnetOwnershipTx.Subnet] = transferSubnetOwnershipTx.Owner } return &backend{ - Context: ctx, ChainUTXOs: utxos, + context: context, subnetOwner: subnetOwner, } } -func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { +func (b *backend) AcceptTx(ctx context.Context, tx *txs.Tx) error { txID := tx.ID() err := tx.Unsigned.Visit(&backendVisitor{ b: b, @@ -75,7 +76,7 @@ func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { return b.addUTXOs(ctx, constants.PlatformChainID, producedUTXOSlice) } -func (b *backend) addUTXOs(ctx stdcontext.Context, destinationChainID ids.ID, utxos []*avax.UTXO) error { +func (b *backend) addUTXOs(ctx context.Context, destinationChainID ids.ID, utxos []*avax.UTXO) error { for _, utxo := range utxos { if err := b.AddUTXO(ctx, destinationChainID, utxo); err != nil { return err @@ -84,7 +85,7 @@ func (b *backend) addUTXOs(ctx stdcontext.Context, destinationChainID ids.ID, ut return nil } -func (b *backend) removeUTXOs(ctx stdcontext.Context, sourceChain ids.ID, utxoIDs set.Set[ids.ID]) error { +func (b *backend) removeUTXOs(ctx context.Context, sourceChain ids.ID, utxoIDs set.Set[ids.ID]) error { for utxoID := range utxoIDs { if err := b.RemoveUTXO(ctx, sourceChain, utxoID); err != nil { return err @@ -93,7 +94,7 @@ func (b *backend) removeUTXOs(ctx stdcontext.Context, sourceChain ids.ID, utxoID return nil } -func (b *backend) GetSubnetOwner(_ stdcontext.Context, subnetID ids.ID) (fx.Owner, error) { +func (b *backend) GetSubnetOwner(_ context.Context, subnetID ids.ID) (fx.Owner, error) { b.subnetOwnerLock.RLock() defer b.subnetOwnerLock.RUnlock() diff --git a/wallet/chain/p/backend_visitor.go b/wallet/chain/p/backend_visitor.go index d8b118fa21b7..c7cec9544da1 100644 --- a/wallet/chain/p/backend_visitor.go +++ b/wallet/chain/p/backend_visitor.go @@ -4,29 +4,34 @@ package p import ( + "context" + "errors" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - - stdcontext "context" ) -var _ txs.Visitor = (*backendVisitor)(nil) +var ( + _ txs.Visitor = (*backendVisitor)(nil) + + ErrUnsupportedTxType = errors.New("unsupported tx type") +) // backendVisitor handles accepting of transactions for the backend type backendVisitor struct { b *backend - ctx stdcontext.Context + ctx context.Context txID ids.ID } func (*backendVisitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errUnsupportedTxType + return ErrUnsupportedTxType } func (*backendVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errUnsupportedTxType + return ErrUnsupportedTxType } func (b *backendVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { diff --git a/wallet/chain/p/builder.go b/wallet/chain/p/builder/builder.go similarity index 89% rename from wallet/chain/p/builder.go rename to wallet/chain/p/builder/builder.go index 85f9b6111010..745ebe4d5848 100644 --- a/wallet/chain/p/builder.go +++ b/wallet/chain/p/builder/builder.go @@ -1,9 +1,10 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package p +package builder import ( + "context" "errors" "fmt" "time" @@ -20,15 +21,14 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - - stdcontext "context" ) var ( - errNoChangeAddress = errors.New("no possible change address") - errUnknownOwnerType = errors.New("unknown owner type") - errInsufficientAuthorization = errors.New("insufficient authorization") - errInsufficientFunds = errors.New("insufficient funds") + ErrNoChangeAddress = errors.New("no possible change address") + ErrUnknownOutputType = errors.New("unknown output type") + ErrUnknownOwnerType = errors.New("unknown owner type") + ErrInsufficientAuthorization = errors.New("insufficient authorization") + ErrInsufficientFunds = errors.New("insufficient funds") _ Builder = (*builder)(nil) ) @@ -36,6 +36,10 @@ var ( // Builder provides a convenient interface for building unsigned P-chain // transactions. type Builder interface { + // Context returns the configuration of the chain that this builder uses to + // create transactions. + Context() *Context + // GetBalance calculates the amount of each asset that this builder has // control over. GetBalance( @@ -51,16 +55,14 @@ type Builder interface { options ...common.Option, ) (map[ids.ID]uint64, error) - // NewBaseTx creates a new simple value transfer. Because the P-chain - // doesn't intend for balance transfers to occur, this method is expensive - // and abuses the creation of subnets. + // NewBaseTx creates a new simple value transfer. // // - [outputs] specifies all the recipients and amounts that should be sent // from this transaction. NewBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, - ) (*txs.CreateSubnetTx, error) + ) (*txs.BaseTx, error) // NewAddValidatorTx creates a new validator of the primary network. // @@ -256,32 +258,39 @@ type Builder interface { ) (*txs.AddPermissionlessDelegatorTx, error) } -// BuilderBackend specifies the required information needed to build unsigned -// P-chain transactions. -type BuilderBackend interface { - Context - UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) - GetSubnetOwner(ctx stdcontext.Context, subnetID ids.ID) (fx.Owner, error) +type Backend interface { + UTXOs(ctx context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) + GetSubnetOwner(ctx context.Context, subnetID ids.ID) (fx.Owner, error) } type builder struct { addrs set.Set[ids.ShortID] - backend BuilderBackend + context *Context + backend Backend } -// NewBuilder returns a new transaction builder. +// New returns a new transaction builder. // // - [addrs] is the set of addresses that the builder assumes can be used when // signing the transactions in the future. -// - [backend] provides the required access to the chain's context and state -// to build out the transactions. -func NewBuilder(addrs set.Set[ids.ShortID], backend BuilderBackend) Builder { +// - [context] provides the chain's configuration. +// - [backend] provides the chain's state. +func New( + addrs set.Set[ids.ShortID], + context *Context, + backend Backend, +) Builder { return &builder{ addrs: addrs, + context: context, backend: backend, } } +func (b *builder) Context() *Context { + return b.context +} + func (b *builder) GetBalance( options ...common.Option, ) (map[ids.ID]uint64, error) { @@ -300,9 +309,9 @@ func (b *builder) GetImportableBalance( func (b *builder) NewBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, -) (*txs.CreateSubnetTx, error) { +) (*txs.BaseTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.CreateSubnetTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } for _, out := range outputs { assetID := out.AssetID() @@ -322,16 +331,13 @@ func (b *builder) NewBaseTx( outputs = append(outputs, changeOutputs...) avax.SortTransferableOutputs(outputs, txs.Codec) // sort the outputs - tx := &txs.CreateSubnetTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: constants.PlatformChainID, - Ins: inputs, - Outs: outputs, - Memo: ops.Memo(), - }}, - Owner: &secp256k1fx.OutputOwners{}, - } + tx := &txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: b.context.NetworkID, + BlockchainID: constants.PlatformChainID, + Ins: inputs, + Outs: outputs, + Memo: ops.Memo(), + }} return tx, b.initCtx(tx) } @@ -341,9 +347,9 @@ func (b *builder) NewAddValidatorTx( shares uint32, options ...common.Option, ) (*txs.AddValidatorTx, error) { - avaxAssetID := b.backend.AVAXAssetID() + avaxAssetID := b.context.AVAXAssetID toBurn := map[ids.ID]uint64{ - avaxAssetID: b.backend.AddPrimaryNetworkValidatorFee(), + avaxAssetID: b.context.AddPrimaryNetworkValidatorFee, } toStake := map[ids.ID]uint64{ avaxAssetID: vdr.Wght, @@ -357,7 +363,7 @@ func (b *builder) NewAddValidatorTx( utils.Sort(rewardsOwner.Addrs) tx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: baseOutputs, @@ -376,7 +382,7 @@ func (b *builder) NewAddSubnetValidatorTx( options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.AddSubnetValidatorFee(), + b.context.AVAXAssetID: b.context.AddSubnetValidatorFee, } toStake := map[ids.ID]uint64{} ops := common.NewOptions(options) @@ -392,7 +398,7 @@ func (b *builder) NewAddSubnetValidatorTx( tx := &txs.AddSubnetValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -410,7 +416,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( options ...common.Option, ) (*txs.RemoveSubnetValidatorTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } toStake := map[ids.ID]uint64{} ops := common.NewOptions(options) @@ -426,7 +432,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( tx := &txs.RemoveSubnetValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -444,12 +450,12 @@ func (b *builder) NewAddDelegatorTx( rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) { - avaxAssetID := b.backend.AVAXAssetID() + avaxAssetID := b.context.AVAXAssetID toBurn := map[ids.ID]uint64{ - avaxAssetID: b.backend.AddPrimaryNetworkDelegatorFee(), + avaxAssetID: b.context.AddPrimaryNetworkDelegatorFee, } toStake := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): vdr.Wght, + avaxAssetID: vdr.Wght, } ops := common.NewOptions(options) inputs, baseOutputs, stakeOutputs, err := b.spend(toBurn, toStake, ops) @@ -460,7 +466,7 @@ func (b *builder) NewAddDelegatorTx( utils.Sort(rewardsOwner.Addrs) tx := &txs.AddDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: baseOutputs, @@ -482,7 +488,7 @@ func (b *builder) NewCreateChainTx( options ...common.Option, ) (*txs.CreateChainTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.CreateBlockchainTxFee(), + b.context.AVAXAssetID: b.context.CreateBlockchainTxFee, } toStake := map[ids.ID]uint64{} ops := common.NewOptions(options) @@ -499,7 +505,7 @@ func (b *builder) NewCreateChainTx( utils.Sort(fxIDs) tx := &txs.CreateChainTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -520,7 +526,7 @@ func (b *builder) NewCreateSubnetTx( options ...common.Option, ) (*txs.CreateSubnetTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.CreateSubnetTxFee(), + b.context.AVAXAssetID: b.context.CreateSubnetTxFee, } toStake := map[ids.ID]uint64{} ops := common.NewOptions(options) @@ -532,7 +538,7 @@ func (b *builder) NewCreateSubnetTx( utils.Sort(owner.Addrs) tx := &txs.CreateSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -549,7 +555,7 @@ func (b *builder) NewTransferSubnetOwnershipTx( options ...common.Option, ) (*txs.TransferSubnetOwnershipTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } toStake := map[ids.ID]uint64{} ops := common.NewOptions(options) @@ -566,7 +572,7 @@ func (b *builder) NewTransferSubnetOwnershipTx( utils.Sort(owner.Addrs) tx := &txs.TransferSubnetOwnershipTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -593,8 +599,8 @@ func (b *builder) NewImportTx( var ( addrs = ops.Addresses(b.addrs) minIssuanceTime = ops.MinIssuanceTime() - avaxAssetID = b.backend.AVAXAssetID() - txFee = b.backend.BaseTxFee() + avaxAssetID = b.context.AVAXAssetID + txFee = b.context.BaseTxFee importedInputs = make([]*avax.TransferableInput, 0, len(utxos)) importedAmounts = make(map[ids.ID]uint64) @@ -635,7 +641,7 @@ func (b *builder) NewImportTx( if len(importedInputs) == 0 { return nil, fmt.Errorf( "%w: no UTXOs available to import", - errInsufficientFunds, + ErrInsufficientFunds, ) } @@ -674,7 +680,7 @@ func (b *builder) NewImportTx( avax.SortTransferableOutputs(outputs, txs.Codec) // sort imported outputs tx := &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -692,7 +698,7 @@ func (b *builder) NewExportTx( options ...common.Option, ) (*txs.ExportTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } for _, out := range outputs { assetID := out.AssetID() @@ -713,7 +719,7 @@ func (b *builder) NewExportTx( avax.SortTransferableOutputs(outputs, txs.Codec) // sort exported outputs tx := &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: changeOutputs, @@ -743,8 +749,8 @@ func (b *builder) NewTransformSubnetTx( options ...common.Option, ) (*txs.TransformSubnetTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.TransformSubnetTxFee(), - assetID: maxSupply - initialSupply, + b.context.AVAXAssetID: b.context.TransformSubnetTxFee, + assetID: maxSupply - initialSupply, } toStake := map[ids.ID]uint64{} ops := common.NewOptions(options) @@ -760,7 +766,7 @@ func (b *builder) NewTransformSubnetTx( tx := &txs.TransformSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: outputs, @@ -794,12 +800,12 @@ func (b *builder) NewAddPermissionlessValidatorTx( shares uint32, options ...common.Option, ) (*txs.AddPermissionlessValidatorTx, error) { - avaxAssetID := b.backend.AVAXAssetID() + avaxAssetID := b.context.AVAXAssetID toBurn := map[ids.ID]uint64{} if vdr.Subnet == constants.PrimaryNetworkID { - toBurn[avaxAssetID] = b.backend.AddPrimaryNetworkValidatorFee() + toBurn[avaxAssetID] = b.context.AddPrimaryNetworkValidatorFee } else { - toBurn[avaxAssetID] = b.backend.AddSubnetValidatorFee() + toBurn[avaxAssetID] = b.context.AddSubnetValidatorFee } toStake := map[ids.ID]uint64{ assetID: vdr.Wght, @@ -814,7 +820,7 @@ func (b *builder) NewAddPermissionlessValidatorTx( utils.Sort(delegationRewardsOwner.Addrs) tx := &txs.AddPermissionlessValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: baseOutputs, @@ -837,12 +843,12 @@ func (b *builder) NewAddPermissionlessDelegatorTx( rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddPermissionlessDelegatorTx, error) { - avaxAssetID := b.backend.AVAXAssetID() + avaxAssetID := b.context.AVAXAssetID toBurn := map[ids.ID]uint64{} if vdr.Subnet == constants.PrimaryNetworkID { - toBurn[avaxAssetID] = b.backend.AddPrimaryNetworkDelegatorFee() + toBurn[avaxAssetID] = b.context.AddPrimaryNetworkDelegatorFee } else { - toBurn[avaxAssetID] = b.backend.AddSubnetDelegatorFee() + toBurn[avaxAssetID] = b.context.AddSubnetDelegatorFee } toStake := map[ids.ID]uint64{ assetID: vdr.Wght, @@ -856,7 +862,7 @@ func (b *builder) NewAddPermissionlessDelegatorTx( utils.Sort(rewardsOwner.Addrs) tx := &txs.AddPermissionlessDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), + NetworkID: b.context.NetworkID, BlockchainID: constants.PlatformChainID, Ins: inputs, Outs: baseOutputs, @@ -900,7 +906,7 @@ func (b *builder) getBalance( out, ok := outIntf.(*secp256k1fx.TransferOutput) if !ok { - return nil, errUnknownOutputType + return nil, ErrUnknownOutputType } _, ok = common.MatchOwners(&out.OutputOwners, addrs, minIssuanceTime) @@ -949,13 +955,20 @@ func (b *builder) spend( addr, ok := addrs.Peek() if !ok { - return nil, nil, nil, errNoChangeAddress + return nil, nil, nil, ErrNoChangeAddress } changeOwner := options.ChangeOwner(&secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }) + // Initialize the return values with empty slices to preserve backward + // compatibility of the json representation of transactions with no + // inputs or outputs. + inputs = make([]*avax.TransferableInput, 0) + changeOutputs = make([]*avax.TransferableOutput, 0) + stakeOutputs = make([]*avax.TransferableOutput, 0) + // Iterate over the locked UTXOs for _, utxo := range utxos { assetID := utxo.AssetID() @@ -982,7 +995,7 @@ func (b *builder) spend( out, ok := lockedOut.TransferableOut.(*secp256k1fx.TransferOutput) if !ok { - return nil, nil, nil, errUnknownOutputType + return nil, nil, nil, ErrUnknownOutputType } inputSigIndices, ok := common.MatchOwners(&out.OutputOwners, addrs, minIssuanceTime) @@ -1063,7 +1076,7 @@ func (b *builder) spend( out, ok := outIntf.(*secp256k1fx.TransferOutput) if !ok { - return nil, nil, nil, errUnknownOutputType + return nil, nil, nil, ErrUnknownOutputType } inputSigIndices, ok := common.MatchOwners(&out.OutputOwners, addrs, minIssuanceTime) @@ -1123,7 +1136,7 @@ func (b *builder) spend( if amount != 0 { return nil, nil, nil, fmt.Errorf( "%w: provided UTXOs need %d more units of asset %q to stake", - errInsufficientFunds, + ErrInsufficientFunds, amount, assetID, ) @@ -1133,7 +1146,7 @@ func (b *builder) spend( if amount != 0 { return nil, nil, nil, fmt.Errorf( "%w: provided UTXOs need %d more units of asset %q", - errInsufficientFunds, + ErrInsufficientFunds, amount, assetID, ) @@ -1157,7 +1170,7 @@ func (b *builder) authorizeSubnet(subnetID ids.ID, options *common.Options) (*se } owner, ok := ownerIntf.(*secp256k1fx.OutputOwners) if !ok { - return nil, errUnknownOwnerType + return nil, ErrUnknownOwnerType } addrs := options.Addresses(b.addrs) @@ -1165,7 +1178,7 @@ func (b *builder) authorizeSubnet(subnetID ids.ID, options *common.Options) (*se inputSigIndices, ok := common.MatchOwners(owner, addrs, minIssuanceTime) if !ok { // We can't authorize the subnet - return nil, errInsufficientAuthorization + return nil, ErrInsufficientAuthorization } return &secp256k1fx.Input{ SigIndices: inputSigIndices, @@ -1173,7 +1186,7 @@ func (b *builder) authorizeSubnet(subnetID ids.ID, options *common.Options) (*se } func (b *builder) initCtx(tx txs.UnsignedTx) error { - ctx, err := newSnowContext(b.backend) + ctx, err := NewSnowContext(b.context.NetworkID, b.context.AVAXAssetID) if err != nil { return err } diff --git a/wallet/chain/p/builder_with_options.go b/wallet/chain/p/builder/builder_with_options.go similarity index 81% rename from wallet/chain/p/builder_with_options.go rename to wallet/chain/p/builder/builder_with_options.go index a402355b9e01..d831e0c76daa 100644 --- a/wallet/chain/p/builder_with_options.go +++ b/wallet/chain/p/builder/builder_with_options.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package p +package builder import ( "time" @@ -17,28 +17,32 @@ import ( var _ Builder = (*builderWithOptions)(nil) type builderWithOptions struct { - Builder + builder Builder options []common.Option } -// NewBuilderWithOptions returns a new transaction builder that will use the -// given options by default. +// NewWithOptions returns a new builder that will use the given options by +// default. // // - [builder] is the builder that will be called to perform the underlying // operations. // - [options] will be provided to the builder in addition to the options // provided in the method calls. -func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { +func NewWithOptions(builder Builder, options ...common.Option) Builder { return &builderWithOptions{ - Builder: builder, + builder: builder, options: options, } } +func (b *builderWithOptions) Context() *Context { + return b.builder.Context() +} + func (b *builderWithOptions) GetBalance( options ...common.Option, ) (map[ids.ID]uint64, error) { - return b.Builder.GetBalance( + return b.builder.GetBalance( common.UnionOptions(b.options, options)..., ) } @@ -47,19 +51,29 @@ func (b *builderWithOptions) GetImportableBalance( chainID ids.ID, options ...common.Option, ) (map[ids.ID]uint64, error) { - return b.Builder.GetImportableBalance( + return b.builder.GetImportableBalance( chainID, common.UnionOptions(b.options, options)..., ) } +func (b *builderWithOptions) NewBaseTx( + outputs []*avax.TransferableOutput, + options ...common.Option, +) (*txs.BaseTx, error) { + return b.builder.NewBaseTx( + outputs, + common.UnionOptions(b.options, options)..., + ) +} + func (b *builderWithOptions) NewAddValidatorTx( vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, ) (*txs.AddValidatorTx, error) { - return b.Builder.NewAddValidatorTx( + return b.builder.NewAddValidatorTx( vdr, rewardsOwner, shares, @@ -71,18 +85,18 @@ func (b *builderWithOptions) NewAddSubnetValidatorTx( vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) { - return b.Builder.NewAddSubnetValidatorTx( + return b.builder.NewAddSubnetValidatorTx( vdr, common.UnionOptions(b.options, options)..., ) } -func (b *builderWithOptions) RemoveSubnetValidatorTx( +func (b *builderWithOptions) NewRemoveSubnetValidatorTx( nodeID ids.NodeID, subnetID ids.ID, options ...common.Option, ) (*txs.RemoveSubnetValidatorTx, error) { - return b.Builder.NewRemoveSubnetValidatorTx( + return b.builder.NewRemoveSubnetValidatorTx( nodeID, subnetID, common.UnionOptions(b.options, options)..., @@ -94,7 +108,7 @@ func (b *builderWithOptions) NewAddDelegatorTx( rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) { - return b.Builder.NewAddDelegatorTx( + return b.builder.NewAddDelegatorTx( vdr, rewardsOwner, common.UnionOptions(b.options, options)..., @@ -109,7 +123,7 @@ func (b *builderWithOptions) NewCreateChainTx( chainName string, options ...common.Option, ) (*txs.CreateChainTx, error) { - return b.Builder.NewCreateChainTx( + return b.builder.NewCreateChainTx( subnetID, genesis, vmID, @@ -123,7 +137,7 @@ func (b *builderWithOptions) NewCreateSubnetTx( owner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.CreateSubnetTx, error) { - return b.Builder.NewCreateSubnetTx( + return b.builder.NewCreateSubnetTx( owner, common.UnionOptions(b.options, options)..., ) @@ -134,7 +148,7 @@ func (b *builderWithOptions) NewTransferSubnetOwnershipTx( owner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.TransferSubnetOwnershipTx, error) { - return b.Builder.NewTransferSubnetOwnershipTx( + return b.builder.NewTransferSubnetOwnershipTx( subnetID, owner, common.UnionOptions(b.options, options)..., @@ -146,7 +160,7 @@ func (b *builderWithOptions) NewImportTx( to *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.ImportTx, error) { - return b.Builder.NewImportTx( + return b.builder.NewImportTx( sourceChainID, to, common.UnionOptions(b.options, options)..., @@ -158,7 +172,7 @@ func (b *builderWithOptions) NewExportTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.ExportTx, error) { - return b.Builder.NewExportTx( + return b.builder.NewExportTx( chainID, outputs, common.UnionOptions(b.options, options)..., @@ -182,7 +196,7 @@ func (b *builderWithOptions) NewTransformSubnetTx( uptimeRequirement uint32, options ...common.Option, ) (*txs.TransformSubnetTx, error) { - return b.Builder.NewTransformSubnetTx( + return b.builder.NewTransformSubnetTx( subnetID, assetID, initialSupply, @@ -210,7 +224,7 @@ func (b *builderWithOptions) NewAddPermissionlessValidatorTx( shares uint32, options ...common.Option, ) (*txs.AddPermissionlessValidatorTx, error) { - return b.Builder.NewAddPermissionlessValidatorTx( + return b.builder.NewAddPermissionlessValidatorTx( vdr, signer, assetID, @@ -227,7 +241,7 @@ func (b *builderWithOptions) NewAddPermissionlessDelegatorTx( rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddPermissionlessDelegatorTx, error) { - return b.Builder.NewAddPermissionlessDelegatorTx( + return b.builder.NewAddPermissionlessDelegatorTx( vdr, assetID, rewardsOwner, diff --git a/wallet/chain/p/builder/context.go b/wallet/chain/p/builder/context.go new file mode 100644 index 000000000000..f0da23fdc5f9 --- /dev/null +++ b/wallet/chain/p/builder/context.go @@ -0,0 +1,82 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "context" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm" +) + +const Alias = "P" + +type Context struct { + NetworkID uint32 + AVAXAssetID ids.ID + BaseTxFee uint64 + CreateSubnetTxFee uint64 + TransformSubnetTxFee uint64 + CreateBlockchainTxFee uint64 + AddPrimaryNetworkValidatorFee uint64 + AddPrimaryNetworkDelegatorFee uint64 + AddSubnetValidatorFee uint64 + AddSubnetDelegatorFee uint64 +} + +func NewContextFromURI(ctx context.Context, uri string) (*Context, error) { + infoClient := info.NewClient(uri) + xChainClient := avm.NewClient(uri, "X") + return NewContextFromClients(ctx, infoClient, xChainClient) +} + +func NewContextFromClients( + ctx context.Context, + infoClient info.Client, + xChainClient avm.Client, +) (*Context, error) { + networkID, err := infoClient.GetNetworkID(ctx) + if err != nil { + return nil, err + } + + asset, err := xChainClient.GetAssetDescription(ctx, "AVAX") + if err != nil { + return nil, err + } + + txFees, err := infoClient.GetTxFee(ctx) + if err != nil { + return nil, err + } + + return &Context{ + NetworkID: networkID, + AVAXAssetID: asset.AssetID, + BaseTxFee: uint64(txFees.TxFee), + CreateSubnetTxFee: uint64(txFees.CreateSubnetTxFee), + TransformSubnetTxFee: uint64(txFees.TransformSubnetTxFee), + CreateBlockchainTxFee: uint64(txFees.CreateBlockchainTxFee), + AddPrimaryNetworkValidatorFee: uint64(txFees.AddPrimaryNetworkValidatorFee), + AddPrimaryNetworkDelegatorFee: uint64(txFees.AddPrimaryNetworkDelegatorFee), + AddSubnetValidatorFee: uint64(txFees.AddSubnetValidatorFee), + AddSubnetDelegatorFee: uint64(txFees.AddSubnetDelegatorFee), + }, nil +} + +func NewSnowContext(networkID uint32, avaxAssetID ids.ID) (*snow.Context, error) { + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: networkID, + SubnetID: constants.PrimaryNetworkID, + ChainID: constants.PlatformChainID, + AVAXAssetID: avaxAssetID, + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(constants.PlatformChainID, Alias) +} diff --git a/wallet/chain/p/builder_test.go b/wallet/chain/p/builder_test.go index 473103147549..9f73b9e399b9 100644 --- a/wallet/chain/p/builder_test.go +++ b/wallet/chain/p/builder_test.go @@ -21,6 +21,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -32,22 +33,22 @@ var ( avaxAssetID = ids.Empty.Prefix(1789) subnetAssetID = ids.Empty.Prefix(2024) - testCtx = NewContext( - constants.UnitTestID, - avaxAssetID, - units.MicroAvax, // BaseTxFee - 19*units.MicroAvax, // CreateSubnetTxFee - 789*units.MicroAvax, // TransformSubnetTxFee - 1234*units.MicroAvax, // CreateBlockchainTxFee - 19*units.MilliAvax, // AddPrimaryNetworkValidatorFee - 765*units.MilliAvax, // AddPrimaryNetworkDelegatorFee - 1010*units.MilliAvax, // AddSubnetValidatorFee - 9*units.Avax, // AddSubnetDelegatorFee - ) + testContext = &builder.Context{ + NetworkID: constants.UnitTestID, + AVAXAssetID: avaxAssetID, + BaseTxFee: units.MicroAvax, + CreateSubnetTxFee: 19 * units.MicroAvax, + TransformSubnetTxFee: 789 * units.MicroAvax, + CreateBlockchainTxFee: 1234 * units.MicroAvax, + AddPrimaryNetworkValidatorFee: 19 * units.MilliAvax, + AddPrimaryNetworkDelegatorFee: 765 * units.MilliAvax, + AddSubnetValidatorFee: 1010 * units.MilliAvax, + AddSubnetDelegatorFee: 9 * units.Avax, + } ) -// These tests create and sign a tx, then verify that utxos included -// in the tx are exactly necessary to pay fees for it +// These tests create a tx, then verify that utxos included in the tx are +// exactly necessary to pay fees for it. func TestBaseTx(t *testing.T) { var ( @@ -59,11 +60,11 @@ func TestBaseTx(t *testing.T) { chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ constants.PlatformChainID: utxos, }) - backend = NewBackend(testCtx, chainUTXOs, nil) + backend = NewBackend(testContext, chainUTXOs, nil) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction outputsToMove = []*avax.TransferableOutput{{ @@ -87,7 +88,7 @@ func TestBaseTx(t *testing.T) { require.Len(ins, 2) require.Len(outs, 2) - expectedConsumed := testCtx.CreateSubnetTxFee() + outputsToMove[0].Out.Amount() + expectedConsumed := testContext.BaseTxFee + outputsToMove[0].Out.Amount() consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) require.Equal(outputsToMove[0], outs[1]) @@ -119,11 +120,11 @@ func TestAddSubnetValidatorTx(t *testing.T) { }, } - backend = NewBackend(testCtx, chainUTXOs, subnets) + backend = NewBackend(testContext, chainUTXOs, subnets) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + builder = builder.New(set.Of(utxoAddr, subnetAuthAddr), testContext, backend) // data to build the transaction subnetValidator = &txs.SubnetValidator{ @@ -145,7 +146,7 @@ func TestAddSubnetValidatorTx(t *testing.T) { require.Len(ins, 2) require.Len(outs, 1) - expectedConsumed := testCtx.AddSubnetValidatorFee() + expectedConsumed := testContext.AddSubnetValidatorFee consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -176,11 +177,11 @@ func TestRemoveSubnetValidatorTx(t *testing.T) { }, } - backend = NewBackend(testCtx, chainUTXOs, subnets) + backend = NewBackend(testContext, chainUTXOs, subnets) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + builder = builder.New(set.Of(utxoAddr, subnetAuthAddr), testContext, backend) ) // build the transaction @@ -196,7 +197,7 @@ func TestRemoveSubnetValidatorTx(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -227,10 +228,10 @@ func TestCreateChainTx(t *testing.T) { }, } - backend = NewBackend(testCtx, chainUTXOs, subnets) + backend = NewBackend(testContext, chainUTXOs, subnets) utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + builder = builder.New(set.Of(utxoAddr, subnetAuthAddr), testContext, backend) // data to build the transaction genesisBytes = []byte{'a', 'b', 'c'} @@ -255,7 +256,7 @@ func TestCreateChainTx(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.CreateBlockchainTxFee() + expectedConsumed := testContext.CreateBlockchainTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -286,11 +287,11 @@ func TestCreateSubnetTx(t *testing.T) { }, } - backend = NewBackend(testCtx, chainUTXOs, subnets) + backend = NewBackend(testContext, chainUTXOs, subnets) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + builder = builder.New(set.Of(utxoAddr, subnetAuthAddr), testContext, backend) ) // build the transaction @@ -303,7 +304,7 @@ func TestCreateSubnetTx(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.CreateSubnetTxFee() + expectedConsumed := testContext.CreateSubnetTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -334,11 +335,11 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { }, } - backend = NewBackend(testCtx, chainUTXOs, subnets) + backend = NewBackend(testContext, chainUTXOs, subnets) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + builder = builder.New(set.Of(utxoAddr, subnetAuthAddr), testContext, backend) ) // build the transaction @@ -354,7 +355,7 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -373,11 +374,11 @@ func TestImportTx(t *testing.T) { sourceChainID: importedUTXOs, }) - backend = NewBackend(testCtx, chainUTXOs, nil) + backend = NewBackend(testContext, chainUTXOs, nil) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction importKey = testKeys[0] @@ -404,7 +405,7 @@ func TestImportTx(t *testing.T) { require.Len(importedIns, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := importedIns[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -419,11 +420,11 @@ func TestExportTx(t *testing.T) { chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ constants.PlatformChainID: utxos, }) - backend = NewBackend(testCtx, chainUTXOs, nil) + backend = NewBackend(testContext, chainUTXOs, nil) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction subnetID = ids.GenerateTestID() @@ -452,7 +453,7 @@ func TestExportTx(t *testing.T) { require.Len(ins, 2) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + exportedOutputs[0].Out.Amount() + expectedConsumed := testContext.BaseTxFee + exportedOutputs[0].Out.Amount() consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) require.Equal(utx.ExportedOutputs, exportedOutputs) @@ -484,11 +485,11 @@ func TestTransformSubnetTx(t *testing.T) { }, } - backend = NewBackend(testCtx, chainUTXOs, subnets) + backend = NewBackend(testContext, chainUTXOs, subnets) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr, subnetAuthAddr), backend) + builder = builder.New(set.Of(utxoAddr, subnetAuthAddr), testContext, backend) // data to build the transaction initialSupply = 40 * units.MegaAvax @@ -523,7 +524,7 @@ func TestTransformSubnetTx(t *testing.T) { expectedConsumedSubnetAsset := maxSupply - initialSupply consumedSubnetAsset := ins[0].In.Amount() - outs[1].Out.Amount() require.Equal(expectedConsumedSubnetAsset, consumedSubnetAsset) - expectedConsumed := testCtx.TransformSubnetTxFee() + expectedConsumed := testContext.TransformSubnetTxFee consumed := ins[1].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -538,13 +539,13 @@ func TestAddPermissionlessValidatorTx(t *testing.T) { chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ constants.PlatformChainID: utxos, }) - backend = NewBackend(testCtx, chainUTXOs, nil) + backend = NewBackend(testContext, chainUTXOs, nil) // builder utxoAddr = utxosKey.Address() rewardKey = testKeys[0] rewardAddr = rewardKey.Address() - builder = NewBuilder(set.Of(utxoAddr, rewardAddr), backend) + builder = builder.New(set.Of(utxoAddr, rewardAddr), testContext, backend) // data to build the transaction validationRewardsOwner = &secp256k1fx.OutputOwners{ @@ -593,7 +594,7 @@ func TestAddPermissionlessValidatorTx(t *testing.T) { expectedConsumedSubnetAsset := utx.Validator.Weight() consumedSubnetAsset := staked[0].Out.Amount() + staked[1].Out.Amount() require.Equal(expectedConsumedSubnetAsset, consumedSubnetAsset) - expectedConsumed := testCtx.AddPrimaryNetworkValidatorFee() + expectedConsumed := testContext.AddPrimaryNetworkValidatorFee consumed := ins[1].In.Amount() + ins[3].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -608,13 +609,13 @@ func TestAddPermissionlessDelegatorTx(t *testing.T) { chainUTXOs = common.NewDeterministicChainUTXOs(require, map[ids.ID][]*avax.UTXO{ constants.PlatformChainID: utxos, }) - backend = NewBackend(testCtx, chainUTXOs, nil) + backend = NewBackend(testContext, chainUTXOs, nil) // builder utxoAddr = utxosKey.Address() rewardKey = testKeys[0] rewardAddr = rewardKey.Address() - builder = NewBuilder(set.Of(utxoAddr, rewardAddr), backend) + builder = builder.New(set.Of(utxoAddr, rewardAddr), testContext, backend) // data to build the transaction rewardsOwner = &secp256k1fx.OutputOwners{ @@ -651,7 +652,7 @@ func TestAddPermissionlessDelegatorTx(t *testing.T) { expectedConsumedSubnetAsset := utx.Validator.Weight() consumedSubnetAsset := staked[0].Out.Amount() + staked[1].Out.Amount() require.Equal(expectedConsumedSubnetAsset, consumedSubnetAsset) - expectedConsumed := testCtx.AddPrimaryNetworkDelegatorFee() + expectedConsumed := testContext.AddPrimaryNetworkDelegatorFee consumed := ins[1].In.Amount() + ins[3].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -663,7 +664,7 @@ func makeTestUTXOs(utxosKey *secp256k1.PrivateKey) []*avax.UTXO { utxosAddr := utxosKey.Address() return []*avax.UTXO{ - { // a small UTXO first, which should not be enough to pay fees + { // a small UTXO first, which should not be enough to pay fees UTXOID: avax.UTXOID{ TxID: ids.Empty.Prefix(utxosOffset), OutputIndex: uint32(utxosOffset), diff --git a/wallet/chain/p/context.go b/wallet/chain/p/context.go deleted file mode 100644 index 2511a19a9dbf..000000000000 --- a/wallet/chain/p/context.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p - -import ( - "github.com/ava-labs/avalanchego/api/info" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms/avm" - - stdcontext "context" -) - -const Alias = "P" - -var _ Context = (*context)(nil) - -type Context interface { - NetworkID() uint32 - AVAXAssetID() ids.ID - BaseTxFee() uint64 - CreateSubnetTxFee() uint64 - TransformSubnetTxFee() uint64 - CreateBlockchainTxFee() uint64 - AddPrimaryNetworkValidatorFee() uint64 - AddPrimaryNetworkDelegatorFee() uint64 - AddSubnetValidatorFee() uint64 - AddSubnetDelegatorFee() uint64 -} - -type context struct { - networkID uint32 - avaxAssetID ids.ID - baseTxFee uint64 - createSubnetTxFee uint64 - transformSubnetTxFee uint64 - createBlockchainTxFee uint64 - addPrimaryNetworkValidatorFee uint64 - addPrimaryNetworkDelegatorFee uint64 - addSubnetValidatorFee uint64 - addSubnetDelegatorFee uint64 -} - -func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { - infoClient := info.NewClient(uri) - xChainClient := avm.NewClient(uri, "X") - return NewContextFromClients(ctx, infoClient, xChainClient) -} - -func NewContextFromClients( - ctx stdcontext.Context, - infoClient info.Client, - xChainClient avm.Client, -) (Context, error) { - networkID, err := infoClient.GetNetworkID(ctx) - if err != nil { - return nil, err - } - - asset, err := xChainClient.GetAssetDescription(ctx, "AVAX") - if err != nil { - return nil, err - } - - txFees, err := infoClient.GetTxFee(ctx) - if err != nil { - return nil, err - } - - return NewContext( - networkID, - asset.AssetID, - uint64(txFees.TxFee), - uint64(txFees.CreateSubnetTxFee), - uint64(txFees.TransformSubnetTxFee), - uint64(txFees.CreateBlockchainTxFee), - uint64(txFees.AddPrimaryNetworkValidatorFee), - uint64(txFees.AddPrimaryNetworkDelegatorFee), - uint64(txFees.AddSubnetValidatorFee), - uint64(txFees.AddSubnetDelegatorFee), - ), nil -} - -func NewContext( - networkID uint32, - avaxAssetID ids.ID, - baseTxFee uint64, - createSubnetTxFee uint64, - transformSubnetTxFee uint64, - createBlockchainTxFee uint64, - addPrimaryNetworkValidatorFee uint64, - addPrimaryNetworkDelegatorFee uint64, - addSubnetValidatorFee uint64, - addSubnetDelegatorFee uint64, -) Context { - return &context{ - networkID: networkID, - avaxAssetID: avaxAssetID, - baseTxFee: baseTxFee, - createSubnetTxFee: createSubnetTxFee, - transformSubnetTxFee: transformSubnetTxFee, - createBlockchainTxFee: createBlockchainTxFee, - addPrimaryNetworkValidatorFee: addPrimaryNetworkValidatorFee, - addPrimaryNetworkDelegatorFee: addPrimaryNetworkDelegatorFee, - addSubnetValidatorFee: addSubnetValidatorFee, - addSubnetDelegatorFee: addSubnetDelegatorFee, - } -} - -func (c *context) NetworkID() uint32 { - return c.networkID -} - -func (c *context) AVAXAssetID() ids.ID { - return c.avaxAssetID -} - -func (c *context) BaseTxFee() uint64 { - return c.baseTxFee -} - -func (c *context) CreateSubnetTxFee() uint64 { - return c.createSubnetTxFee -} - -func (c *context) TransformSubnetTxFee() uint64 { - return c.transformSubnetTxFee -} - -func (c *context) CreateBlockchainTxFee() uint64 { - return c.createBlockchainTxFee -} - -func (c *context) AddPrimaryNetworkValidatorFee() uint64 { - return c.addPrimaryNetworkValidatorFee -} - -func (c *context) AddPrimaryNetworkDelegatorFee() uint64 { - return c.addPrimaryNetworkDelegatorFee -} - -func (c *context) AddSubnetValidatorFee() uint64 { - return c.addSubnetValidatorFee -} - -func (c *context) AddSubnetDelegatorFee() uint64 { - return c.addSubnetDelegatorFee -} - -func newSnowContext(c Context) (*snow.Context, error) { - lookup := ids.NewAliaser() - return &snow.Context{ - NetworkID: c.NetworkID(), - SubnetID: constants.PrimaryNetworkID, - ChainID: constants.PlatformChainID, - AVAXAssetID: c.AVAXAssetID(), - Log: logging.NoLog{}, - BCLookup: lookup, - }, lookup.Alias(constants.PlatformChainID, Alias) -} diff --git a/wallet/chain/p/signer.go b/wallet/chain/p/signer/signer.go similarity index 89% rename from wallet/chain/p/signer.go rename to wallet/chain/p/signer/signer.go index bedbbdbf562a..08b3a9d9963b 100644 --- a/wallet/chain/p/signer.go +++ b/wallet/chain/p/signer/signer.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package p +package signer import ( "github.com/ava-labs/avalanchego/ids" @@ -27,17 +27,17 @@ type Signer interface { Sign(ctx stdcontext.Context, tx *txs.Tx) error } -type SignerBackend interface { +type Backend interface { GetUTXO(ctx stdcontext.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) GetSubnetOwner(ctx stdcontext.Context, subnetID ids.ID) (fx.Owner, error) } type txSigner struct { kc keychain.Keychain - backend SignerBackend + backend Backend } -func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { +func New(kc keychain.Keychain, backend Backend) Signer { return &txSigner{ kc: kc, backend: backend, @@ -45,7 +45,7 @@ func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { } func (s *txSigner) Sign(ctx stdcontext.Context, tx *txs.Tx) error { - return tx.Unsigned.Visit(&signerVisitor{ + return tx.Unsigned.Visit(&visitor{ kc: s.kc, backend: s.backend, ctx: ctx, diff --git a/wallet/chain/p/signer_visitor.go b/wallet/chain/p/signer/visitor.go similarity index 77% rename from wallet/chain/p/signer_visitor.go rename to wallet/chain/p/signer/visitor.go index 7c9dd4cb95ca..5dd4abe2b7d2 100644 --- a/wallet/chain/p/signer_visitor.go +++ b/wallet/chain/p/signer/visitor.go @@ -1,9 +1,10 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package p +package signer import ( + "context" "errors" "fmt" @@ -18,40 +19,39 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - stdcontext "context" ) var ( - _ txs.Visitor = (*signerVisitor)(nil) + _ txs.Visitor = (*visitor)(nil) - errUnsupportedTxType = errors.New("unsupported tx type") - errUnknownInputType = errors.New("unknown input type") - errUnknownCredentialType = errors.New("unknown credential type") - errUnknownOutputType = errors.New("unknown output type") - errUnknownSubnetAuthType = errors.New("unknown subnet auth type") - errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") + ErrUnsupportedTxType = errors.New("unsupported tx type") + ErrUnknownInputType = errors.New("unknown input type") + ErrUnknownOutputType = errors.New("unknown output type") + ErrInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") + ErrUnknownSubnetAuthType = errors.New("unknown subnet auth type") + ErrUnknownOwnerType = errors.New("unknown owner type") + ErrUnknownCredentialType = errors.New("unknown credential type") emptySig [secp256k1.SignatureLen]byte ) -// signerVisitor handles signing transactions for the signer -type signerVisitor struct { +// visitor handles signing transactions for the signer +type visitor struct { kc keychain.Keychain - backend SignerBackend - ctx stdcontext.Context + backend Backend + ctx context.Context tx *txs.Tx } -func (*signerVisitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return errUnsupportedTxType +func (*visitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + return ErrUnsupportedTxType } -func (*signerVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { - return errUnsupportedTxType +func (*visitor) RewardValidatorTx(*txs.RewardValidatorTx) error { + return ErrUnsupportedTxType } -func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { +func (s *visitor) BaseTx(tx *txs.BaseTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -59,7 +59,7 @@ func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { +func (s *visitor) AddValidatorTx(tx *txs.AddValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -67,7 +67,7 @@ func (s *signerVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { +func (s *visitor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -80,7 +80,7 @@ func (s *signerVisitor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error return sign(s.tx, false, txSigners) } -func (s *signerVisitor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { +func (s *visitor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -88,7 +88,7 @@ func (s *signerVisitor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) CreateChainTx(tx *txs.CreateChainTx) error { +func (s *visitor) CreateChainTx(tx *txs.CreateChainTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -101,7 +101,7 @@ func (s *signerVisitor) CreateChainTx(tx *txs.CreateChainTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { +func (s *visitor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -109,7 +109,7 @@ func (s *signerVisitor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { +func (s *visitor) ImportTx(tx *txs.ImportTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -122,7 +122,7 @@ func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { +func (s *visitor) ExportTx(tx *txs.ExportTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -130,7 +130,7 @@ func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { return sign(s.tx, false, txSigners) } -func (s *signerVisitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { +func (s *visitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -143,7 +143,7 @@ func (s *signerVisitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) return sign(s.tx, true, txSigners) } -func (s *signerVisitor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { +func (s *visitor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -156,7 +156,7 @@ func (s *signerVisitor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershi return sign(s.tx, true, txSigners) } -func (s *signerVisitor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { +func (s *visitor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -169,7 +169,7 @@ func (s *signerVisitor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { return sign(s.tx, true, txSigners) } -func (s *signerVisitor) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { +func (s *visitor) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -177,7 +177,7 @@ func (s *signerVisitor) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessVa return sign(s.tx, true, txSigners) } -func (s *signerVisitor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { +func (s *visitor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err @@ -185,7 +185,7 @@ func (s *signerVisitor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDe return sign(s.tx, true, txSigners) } -func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { +func (s *visitor) getSigners(sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { txSigners := make([][]keychain.Signer, len(ins)) for credIndex, transferInput := range ins { inIntf := transferInput.In @@ -195,7 +195,7 @@ func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.Transferabl input, ok := inIntf.(*secp256k1fx.TransferInput) if !ok { - return nil, errUnknownInputType + return nil, ErrUnknownInputType } inputSigners := make([]keychain.Signer, len(input.SigIndices)) @@ -219,12 +219,12 @@ func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.Transferabl out, ok := outIntf.(*secp256k1fx.TransferOutput) if !ok { - return nil, errUnknownOutputType + return nil, ErrUnknownOutputType } for sigIndex, addrIndex := range input.SigIndices { if addrIndex >= uint32(len(out.Addrs)) { - return nil, errInvalidUTXOSigIndex + return nil, ErrInvalidUTXOSigIndex } addr := out.Addrs[addrIndex] @@ -240,10 +240,10 @@ func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.Transferabl return txSigners, nil } -func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Verifiable) ([]keychain.Signer, error) { +func (s *visitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Verifiable) ([]keychain.Signer, error) { subnetInput, ok := subnetAuth.(*secp256k1fx.Input) if !ok { - return nil, errUnknownSubnetAuthType + return nil, ErrUnknownSubnetAuthType } ownerIntf, err := s.backend.GetSubnetOwner(s.ctx, subnetID) @@ -256,13 +256,13 @@ func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Veri } owner, ok := ownerIntf.(*secp256k1fx.OutputOwners) if !ok { - return nil, errUnknownOwnerType + return nil, ErrUnknownOwnerType } authSigners := make([]keychain.Signer, len(subnetInput.SigIndices)) for sigIndex, addrIndex := range subnetInput.SigIndices { if addrIndex >= uint32(len(owner.Addrs)) { - return nil, errInvalidUTXOSigIndex + return nil, ErrInvalidUTXOSigIndex } addr := owner.Addrs[addrIndex] @@ -299,7 +299,7 @@ func sign(tx *txs.Tx, signHash bool, txSigners [][]keychain.Signer) error { cred, ok := credIntf.(*secp256k1fx.Credential) if !ok { - return errUnknownCredentialType + return ErrUnknownCredentialType } if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { cred.Sigs = make([][secp256k1.SignatureLen]byte, expectedLen) diff --git a/wallet/chain/p/wallet.go b/wallet/chain/p/wallet.go index 44cc7e2a4da4..2a23e8fdd131 100644 --- a/wallet/chain/p/wallet.go +++ b/wallet/chain/p/wallet.go @@ -5,37 +5,34 @@ package p import ( "errors" - "fmt" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + vmsigner "github.com/ava-labs/avalanchego/vms/platformvm/signer" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) var ( - errNotCommitted = errors.New("not committed") + ErrNotCommitted = errors.New("not committed") _ Wallet = (*wallet)(nil) ) type Wallet interface { - Context - // Builder returns the builder that will be used to create the transactions. - Builder() Builder + Builder() builder.Builder // Signer returns the signer that will be used to sign the transactions. - Signer() Signer + Signer() walletsigner.Signer // IssueBaseTx creates, signs, and issues a new simple value transfer. - // Because the P-chain doesn't intend for balance transfers to occur, this - // method is expensive and abuses the creation of subnets. // // - [outputs] specifies all the recipients and amounts that should be sent // from this transaction. @@ -221,7 +218,7 @@ type Wallet interface { // the delegation reward will be sent to the validator's [rewardsOwner]. IssueAddPermissionlessValidatorTx( vdr *txs.SubnetValidator, - signer signer.Signer, + signer vmsigner.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, delegationRewardsOwner *secp256k1fx.OutputOwners, @@ -258,8 +255,8 @@ type Wallet interface { } func NewWallet( - builder Builder, - signer Signer, + builder builder.Builder, + signer walletsigner.Signer, client platformvm.Client, backend Backend, ) Wallet { @@ -273,16 +270,16 @@ func NewWallet( type wallet struct { Backend - builder Builder - signer Signer + builder builder.Builder + signer walletsigner.Signer client platformvm.Client } -func (w *wallet) Builder() Builder { +func (w *wallet) Builder() builder.Builder { return w.builder } -func (w *wallet) Signer() Signer { +func (w *wallet) Signer() walletsigner.Signer { return w.signer } @@ -449,7 +446,7 @@ func (w *wallet) IssueTransformSubnetTx( func (w *wallet) IssueAddPermissionlessValidatorTx( vdr *txs.SubnetValidator, - signer signer.Signer, + signer vmsigner.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, delegationRewardsOwner *secp256k1fx.OutputOwners, @@ -495,7 +492,7 @@ func (w *wallet) IssueUnsignedTx( ) (*txs.Tx, error) { ops := common.NewOptions(options) ctx := ops.Context() - tx, err := SignUnsigned(ctx, w.signer, utx) + tx, err := walletsigner.SignUnsigned(ctx, w.signer, utx) if err != nil { return nil, err } @@ -522,17 +519,9 @@ func (w *wallet) IssueTx( return w.Backend.AcceptTx(ctx, tx) } - txStatus, err := w.client.AwaitTxDecided(ctx, txID, ops.PollFrequency()) - if err != nil { + if err := platformvm.AwaitTxAccepted(w.client, ctx, txID, ops.PollFrequency()); err != nil { return err } - if err := w.Backend.AcceptTx(ctx, tx); err != nil { - return err - } - - if txStatus.Status != status.Committed { - return fmt.Errorf("%w: %s", errNotCommitted, txStatus.Reason) - } - return nil + return w.Backend.AcceptTx(ctx, tx) } diff --git a/wallet/chain/p/wallet_with_options.go b/wallet/chain/p/wallet_with_options.go index 4982e77f8a51..92965f2e4f1f 100644 --- a/wallet/chain/p/wallet_with_options.go +++ b/wallet/chain/p/wallet_with_options.go @@ -8,10 +8,13 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + vmsigner "github.com/ava-labs/avalanchego/vms/platformvm/signer" + walletsigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" ) var _ Wallet = (*walletWithOptions)(nil) @@ -21,28 +24,32 @@ func NewWalletWithOptions( options ...common.Option, ) Wallet { return &walletWithOptions{ - Wallet: wallet, + wallet: wallet, options: options, } } type walletWithOptions struct { - Wallet + wallet Wallet options []common.Option } -func (w *walletWithOptions) Builder() Builder { - return NewBuilderWithOptions( - w.Wallet.Builder(), +func (w *walletWithOptions) Builder() builder.Builder { + return builder.NewWithOptions( + w.wallet.Builder(), w.options..., ) } +func (w *walletWithOptions) Signer() walletsigner.Signer { + return w.wallet.Signer() +} + func (w *walletWithOptions) IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueBaseTx( + return w.wallet.IssueBaseTx( outputs, common.UnionOptions(w.options, options)..., ) @@ -54,7 +61,7 @@ func (w *walletWithOptions) IssueAddValidatorTx( shares uint32, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueAddValidatorTx( + return w.wallet.IssueAddValidatorTx( vdr, rewardsOwner, shares, @@ -66,7 +73,7 @@ func (w *walletWithOptions) IssueAddSubnetValidatorTx( vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueAddSubnetValidatorTx( + return w.wallet.IssueAddSubnetValidatorTx( vdr, common.UnionOptions(w.options, options)..., ) @@ -77,7 +84,7 @@ func (w *walletWithOptions) IssueRemoveSubnetValidatorTx( subnetID ids.ID, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueRemoveSubnetValidatorTx( + return w.wallet.IssueRemoveSubnetValidatorTx( nodeID, subnetID, common.UnionOptions(w.options, options)..., @@ -89,7 +96,7 @@ func (w *walletWithOptions) IssueAddDelegatorTx( rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueAddDelegatorTx( + return w.wallet.IssueAddDelegatorTx( vdr, rewardsOwner, common.UnionOptions(w.options, options)..., @@ -104,7 +111,7 @@ func (w *walletWithOptions) IssueCreateChainTx( chainName string, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueCreateChainTx( + return w.wallet.IssueCreateChainTx( subnetID, genesis, vmID, @@ -118,7 +125,7 @@ func (w *walletWithOptions) IssueCreateSubnetTx( owner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueCreateSubnetTx( + return w.wallet.IssueCreateSubnetTx( owner, common.UnionOptions(w.options, options)..., ) @@ -129,7 +136,7 @@ func (w *walletWithOptions) IssueTransferSubnetOwnershipTx( owner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueTransferSubnetOwnershipTx( + return w.wallet.IssueTransferSubnetOwnershipTx( subnetID, owner, common.UnionOptions(w.options, options)..., @@ -141,7 +148,7 @@ func (w *walletWithOptions) IssueImportTx( to *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueImportTx( + return w.wallet.IssueImportTx( sourceChainID, to, common.UnionOptions(w.options, options)..., @@ -153,7 +160,7 @@ func (w *walletWithOptions) IssueExportTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueExportTx( + return w.wallet.IssueExportTx( chainID, outputs, common.UnionOptions(w.options, options)..., @@ -177,7 +184,7 @@ func (w *walletWithOptions) IssueTransformSubnetTx( uptimeRequirement uint32, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueTransformSubnetTx( + return w.wallet.IssueTransformSubnetTx( subnetID, assetID, initialSupply, @@ -198,14 +205,14 @@ func (w *walletWithOptions) IssueTransformSubnetTx( func (w *walletWithOptions) IssueAddPermissionlessValidatorTx( vdr *txs.SubnetValidator, - signer signer.Signer, + signer vmsigner.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, delegationRewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueAddPermissionlessValidatorTx( + return w.wallet.IssueAddPermissionlessValidatorTx( vdr, signer, assetID, @@ -222,7 +229,7 @@ func (w *walletWithOptions) IssueAddPermissionlessDelegatorTx( rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueAddPermissionlessDelegatorTx( + return w.wallet.IssueAddPermissionlessDelegatorTx( vdr, assetID, rewardsOwner, @@ -234,7 +241,7 @@ func (w *walletWithOptions) IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueUnsignedTx( + return w.wallet.IssueUnsignedTx( utx, common.UnionOptions(w.options, options)..., ) @@ -244,7 +251,7 @@ func (w *walletWithOptions) IssueTx( tx *txs.Tx, options ...common.Option, ) error { - return w.Wallet.IssueTx( + return w.wallet.IssueTx( tx, common.UnionOptions(w.options, options)..., ) diff --git a/wallet/chain/x/backend.go b/wallet/chain/x/backend.go index a87e799fa911..e73cb1620e31 100644 --- a/wallet/chain/x/backend.go +++ b/wallet/chain/x/backend.go @@ -4,10 +4,12 @@ package x import ( + "context" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - - stdcontext "context" ) var _ Backend = (*backend)(nil) @@ -15,25 +17,26 @@ var _ Backend = (*backend)(nil) // Backend defines the full interface required to support an X-chain wallet. type Backend interface { common.ChainUTXOs - BuilderBackend - SignerBackend + builder.Backend + signer.Backend - AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error + AcceptTx(ctx context.Context, tx *txs.Tx) error } type backend struct { - Context common.ChainUTXOs + + context *builder.Context } -func NewBackend(ctx Context, utxos common.ChainUTXOs) Backend { +func NewBackend(context *builder.Context, utxos common.ChainUTXOs) Backend { return &backend{ - Context: ctx, ChainUTXOs: utxos, + context: context, } } -func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { +func (b *backend) AcceptTx(ctx context.Context, tx *txs.Tx) error { err := tx.Unsigned.Visit(&backendVisitor{ b: b, ctx: ctx, @@ -43,7 +46,7 @@ func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { return err } - chainID := b.Context.BlockchainID() + chainID := b.context.BlockchainID inputUTXOs := tx.Unsigned.InputUTXOs() for _, utxoID := range inputUTXOs { if utxoID.Symbol { diff --git a/wallet/chain/x/backend_visitor.go b/wallet/chain/x/backend_visitor.go index 0bf9ac040a97..4809bf2e06de 100644 --- a/wallet/chain/x/backend_visitor.go +++ b/wallet/chain/x/backend_visitor.go @@ -4,11 +4,11 @@ package x import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" - - stdcontext "context" ) var _ txs.Visitor = (*backendVisitor)(nil) @@ -16,7 +16,7 @@ var _ txs.Visitor = (*backendVisitor)(nil) // backendVisitor handles accepting of transactions for the backend type backendVisitor struct { b *backend - ctx stdcontext.Context + ctx context.Context txID ids.ID } diff --git a/wallet/chain/x/builder.go b/wallet/chain/x/builder/builder.go similarity index 91% rename from wallet/chain/x/builder.go rename to wallet/chain/x/builder/builder.go index 330ed69a2504..20411e6e4ce7 100644 --- a/wallet/chain/x/builder.go +++ b/wallet/chain/x/builder/builder.go @@ -1,9 +1,10 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package x +package builder import ( + "context" "errors" "fmt" @@ -18,8 +19,6 @@ import ( "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" - - stdcontext "context" ) var ( @@ -27,9 +26,9 @@ var ( errInsufficientFunds = errors.New("insufficient funds") fxIndexToID = map[uint32]ids.ID{ - 0: secp256k1fx.ID, - 1: nftfx.ID, - 2: propertyfx.ID, + SECP256K1FxIndex: secp256k1fx.ID, + NFTFxIndex: nftfx.ID, + PropertyFxIndex: propertyfx.ID, } _ Builder = (*builder)(nil) @@ -38,6 +37,10 @@ var ( // Builder provides a convenient interface for building unsigned X-chain // transactions. type Builder interface { + // Context returns the configuration of the chain that this builder uses to + // create transactions. + Context() *Context + // GetFTBalance calculates the amount of each fungible asset that this // builder has control over. GetFTBalance( @@ -154,37 +157,43 @@ type Builder interface { ) (*txs.ExportTx, error) } -// BuilderBackend specifies the required information needed to build unsigned -// X-chain transactions. -type BuilderBackend interface { - Context - - UTXOs(ctx stdcontext.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) +type Backend interface { + UTXOs(ctx context.Context, sourceChainID ids.ID) ([]*avax.UTXO, error) } type builder struct { addrs set.Set[ids.ShortID] - backend BuilderBackend + context *Context + backend Backend } -// NewBuilder returns a new transaction builder. +// New returns a new transaction builder. // // - [addrs] is the set of addresses that the builder assumes can be used when // signing the transactions in the future. -// - [backend] provides the required access to the chain's context and state -// to build out the transactions. -func NewBuilder(addrs set.Set[ids.ShortID], backend BuilderBackend) Builder { +// - [context] provides the chain's configuration. +// - [backend] provides the chain's state. +func New( + addrs set.Set[ids.ShortID], + context *Context, + backend Backend, +) Builder { return &builder{ addrs: addrs, + context: context, backend: backend, } } +func (b *builder) Context() *Context { + return b.context +} + func (b *builder) GetFTBalance( options ...common.Option, ) (map[ids.ID]uint64, error) { ops := common.NewOptions(options) - return b.getBalance(b.backend.BlockchainID(), ops) + return b.getBalance(b.context.BlockchainID, ops) } func (b *builder) GetImportableBalance( @@ -200,7 +209,7 @@ func (b *builder) NewBaseTx( options ...common.Option, ) (*txs.BaseTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } for _, out := range outputs { assetID := out.AssetID() @@ -220,8 +229,8 @@ func (b *builder) NewBaseTx( avax.SortTransferableOutputs(outputs, Parser.Codec()) // sort the outputs tx := &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, Ins: inputs, Outs: outputs, Memo: ops.Memo(), @@ -237,7 +246,7 @@ func (b *builder) NewCreateAssetTx( options ...common.Option, ) (*txs.CreateAssetTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.CreateAssetTxFee(), + b.context.AVAXAssetID: b.context.CreateAssetTxFee, } ops := common.NewOptions(options) inputs, outputs, err := b.spend(toBurn, ops) @@ -260,8 +269,8 @@ func (b *builder) NewCreateAssetTx( utils.Sort(states) // sort the initial states tx := &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, Ins: inputs, Outs: outputs, Memo: ops.Memo(), @@ -279,7 +288,7 @@ func (b *builder) NewOperationTx( options ...common.Option, ) (*txs.OperationTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } ops := common.NewOptions(options) inputs, outputs, err := b.spend(toBurn, ops) @@ -290,8 +299,8 @@ func (b *builder) NewOperationTx( txs.SortOperations(operations, Parser.Codec()) tx := &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, Ins: inputs, Outs: outputs, Memo: ops.Memo(), @@ -366,8 +375,8 @@ func (b *builder) NewImportTx( var ( addrs = ops.Addresses(b.addrs) minIssuanceTime = ops.MinIssuanceTime() - avaxAssetID = b.backend.AVAXAssetID() - txFee = b.backend.BaseTxFee() + avaxAssetID = b.context.AVAXAssetID + txFee = b.context.BaseTxFee importedInputs = make([]*avax.TransferableInput, 0, len(utxos)) importedAmounts = make(map[ids.ID]uint64) @@ -449,8 +458,8 @@ func (b *builder) NewImportTx( avax.SortTransferableOutputs(outputs, Parser.Codec()) tx := &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, Ins: inputs, Outs: outputs, Memo: ops.Memo(), @@ -467,7 +476,7 @@ func (b *builder) NewExportTx( options ...common.Option, ) (*txs.ExportTx, error) { toBurn := map[ids.ID]uint64{ - b.backend.AVAXAssetID(): b.backend.BaseTxFee(), + b.context.AVAXAssetID: b.context.BaseTxFee, } for _, out := range outputs { assetID := out.AssetID() @@ -487,8 +496,8 @@ func (b *builder) NewExportTx( avax.SortTransferableOutputs(outputs, Parser.Codec()) tx := &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: b.backend.NetworkID(), - BlockchainID: b.backend.BlockchainID(), + NetworkID: b.context.NetworkID, + BlockchainID: b.context.BlockchainID, Ins: inputs, Outs: changeOutputs, Memo: ops.Memo(), @@ -547,7 +556,7 @@ func (b *builder) spend( outputs []*avax.TransferableOutput, err error, ) { - utxos, err := b.backend.UTXOs(options.Context(), b.backend.BlockchainID()) + utxos, err := b.backend.UTXOs(options.Context(), b.context.BlockchainID) if err != nil { return nil, nil, err } @@ -642,7 +651,7 @@ func (b *builder) mintFTs( operations []*txs.Operation, err error, ) { - utxos, err := b.backend.UTXOs(options.Context(), b.backend.BlockchainID()) + utxos, err := b.backend.UTXOs(options.Context(), b.context.BlockchainID) if err != nil { return nil, err } @@ -705,7 +714,7 @@ func (b *builder) mintNFTs( operations []*txs.Operation, err error, ) { - utxos, err := b.backend.UTXOs(options.Context(), b.backend.BlockchainID()) + utxos, err := b.backend.UTXOs(options.Context(), b.context.BlockchainID) if err != nil { return nil, err } @@ -762,7 +771,7 @@ func (b *builder) mintProperty( operations []*txs.Operation, err error, ) { - utxos, err := b.backend.UTXOs(options.Context(), b.backend.BlockchainID()) + utxos, err := b.backend.UTXOs(options.Context(), b.context.BlockchainID) if err != nil { return nil, err } @@ -819,7 +828,7 @@ func (b *builder) burnProperty( operations []*txs.Operation, err error, ) { - utxos, err := b.backend.UTXOs(options.Context(), b.backend.BlockchainID()) + utxos, err := b.backend.UTXOs(options.Context(), b.context.BlockchainID) if err != nil { return nil, err } @@ -868,7 +877,11 @@ func (b *builder) burnProperty( } func (b *builder) initCtx(tx txs.UnsignedTx) error { - ctx, err := newSnowContext(b.backend) + ctx, err := NewSnowContext( + b.context.NetworkID, + b.context.BlockchainID, + b.context.AVAXAssetID, + ) if err != nil { return err } diff --git a/wallet/chain/x/builder_with_options.go b/wallet/chain/x/builder/builder_with_options.go similarity index 81% rename from wallet/chain/x/builder_with_options.go rename to wallet/chain/x/builder/builder_with_options.go index c2b65b05a630..a788ccee33c6 100644 --- a/wallet/chain/x/builder_with_options.go +++ b/wallet/chain/x/builder/builder_with_options.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package x +package builder import ( "github.com/ava-labs/avalanchego/ids" @@ -15,28 +15,32 @@ import ( var _ Builder = (*builderWithOptions)(nil) type builderWithOptions struct { - Builder + builder Builder options []common.Option } -// NewBuilderWithOptions returns a new transaction builder that will use the -// given options by default. +// NewWithOptions returns a new transaction builder that will use the given +// options by default. // // - [builder] is the builder that will be called to perform the underlying // operations. // - [options] will be provided to the builder in addition to the options // provided in the method calls. -func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { +func NewWithOptions(builder Builder, options ...common.Option) Builder { return &builderWithOptions{ - Builder: builder, + builder: builder, options: options, } } +func (b *builderWithOptions) Context() *Context { + return b.builder.Context() +} + func (b *builderWithOptions) GetFTBalance( options ...common.Option, ) (map[ids.ID]uint64, error) { - return b.Builder.GetFTBalance( + return b.builder.GetFTBalance( common.UnionOptions(b.options, options)..., ) } @@ -45,7 +49,7 @@ func (b *builderWithOptions) GetImportableBalance( chainID ids.ID, options ...common.Option, ) (map[ids.ID]uint64, error) { - return b.Builder.GetImportableBalance( + return b.builder.GetImportableBalance( chainID, common.UnionOptions(b.options, options)..., ) @@ -55,7 +59,7 @@ func (b *builderWithOptions) NewBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.BaseTx, error) { - return b.Builder.NewBaseTx( + return b.builder.NewBaseTx( outputs, common.UnionOptions(b.options, options)..., ) @@ -68,7 +72,7 @@ func (b *builderWithOptions) NewCreateAssetTx( initialState map[uint32][]verify.State, options ...common.Option, ) (*txs.CreateAssetTx, error) { - return b.Builder.NewCreateAssetTx( + return b.builder.NewCreateAssetTx( name, symbol, denomination, @@ -81,7 +85,7 @@ func (b *builderWithOptions) NewOperationTx( operations []*txs.Operation, options ...common.Option, ) (*txs.OperationTx, error) { - return b.Builder.NewOperationTx( + return b.builder.NewOperationTx( operations, common.UnionOptions(b.options, options)..., ) @@ -91,7 +95,7 @@ func (b *builderWithOptions) NewOperationTxMintFT( outputs map[ids.ID]*secp256k1fx.TransferOutput, options ...common.Option, ) (*txs.OperationTx, error) { - return b.Builder.NewOperationTxMintFT( + return b.builder.NewOperationTxMintFT( outputs, common.UnionOptions(b.options, options)..., ) @@ -103,7 +107,7 @@ func (b *builderWithOptions) NewOperationTxMintNFT( owners []*secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.OperationTx, error) { - return b.Builder.NewOperationTxMintNFT( + return b.builder.NewOperationTxMintNFT( assetID, payload, owners, @@ -116,7 +120,7 @@ func (b *builderWithOptions) NewOperationTxMintProperty( owner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.OperationTx, error) { - return b.Builder.NewOperationTxMintProperty( + return b.builder.NewOperationTxMintProperty( assetID, owner, common.UnionOptions(b.options, options)..., @@ -127,7 +131,7 @@ func (b *builderWithOptions) NewOperationTxBurnProperty( assetID ids.ID, options ...common.Option, ) (*txs.OperationTx, error) { - return b.Builder.NewOperationTxBurnProperty( + return b.builder.NewOperationTxBurnProperty( assetID, common.UnionOptions(b.options, options)..., ) @@ -138,7 +142,7 @@ func (b *builderWithOptions) NewImportTx( to *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.ImportTx, error) { - return b.Builder.NewImportTx( + return b.builder.NewImportTx( chainID, to, common.UnionOptions(b.options, options)..., @@ -150,7 +154,7 @@ func (b *builderWithOptions) NewExportTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.ExportTx, error) { - return b.Builder.NewExportTx( + return b.builder.NewExportTx( chainID, outputs, common.UnionOptions(b.options, options)..., diff --git a/wallet/chain/x/constants.go b/wallet/chain/x/builder/constants.go similarity index 95% rename from wallet/chain/x/constants.go rename to wallet/chain/x/builder/constants.go index 47efbfc2ff6d..2fc1b1132b93 100644 --- a/wallet/chain/x/constants.go +++ b/wallet/chain/x/builder/constants.go @@ -1,11 +1,9 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package x +package builder import ( - "time" - "github.com/ava-labs/avalanchego/vms/avm/block" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -25,7 +23,6 @@ var Parser block.Parser func init() { var err error Parser, err = block.NewParser( - time.Time{}, []fxs.Fx{ &secp256k1fx.Fx{}, &nftfx.Fx{}, diff --git a/wallet/chain/x/builder/context.go b/wallet/chain/x/builder/context.go new file mode 100644 index 000000000000..6a072cf6230b --- /dev/null +++ b/wallet/chain/x/builder/context.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" +) + +const Alias = "X" + +type Context struct { + NetworkID uint32 + BlockchainID ids.ID + AVAXAssetID ids.ID + BaseTxFee uint64 + CreateAssetTxFee uint64 +} + +func NewSnowContext( + networkID uint32, + blockchainID ids.ID, + avaxAssetID ids.ID, +) (*snow.Context, error) { + lookup := ids.NewAliaser() + return &snow.Context{ + NetworkID: networkID, + SubnetID: constants.PrimaryNetworkID, + ChainID: blockchainID, + XChainID: blockchainID, + AVAXAssetID: avaxAssetID, + Log: logging.NoLog{}, + BCLookup: lookup, + }, lookup.Alias(blockchainID, Alias) +} diff --git a/wallet/chain/x/builder_test.go b/wallet/chain/x/builder_test.go index f4eb916b693c..801046c185ca 100644 --- a/wallet/chain/x/builder_test.go +++ b/wallet/chain/x/builder_test.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -31,13 +32,13 @@ var ( nftAssetID = ids.Empty.Prefix(2022) propertyAssetID = ids.Empty.Prefix(2023) - testCtx = NewContext( - constants.UnitTestID, - xChainID, - avaxAssetID, - units.MicroAvax, // BaseTxFee - 99*units.MilliAvax, // CreateAssetTxFee - ) + testContext = &builder.Context{ + NetworkID: constants.UnitTestID, + BlockchainID: xChainID, + AVAXAssetID: avaxAssetID, + BaseTxFee: units.MicroAvax, + CreateAssetTxFee: 99 * units.MilliAvax, + } ) // These tests create and sign a tx, then verify that utxos included @@ -56,11 +57,11 @@ func TestBaseTx(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction outputsToMove = []*avax.TransferableOutput{{ @@ -86,7 +87,7 @@ func TestBaseTx(t *testing.T) { require.Len(ins, 2) require.Len(outs, 2) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() - outs[1].Out.Amount() require.Equal(expectedConsumed, consumed) require.Equal(outputsToMove[0], outs[1]) @@ -105,11 +106,11 @@ func TestCreateAssetTx(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction assetName = "Team Rocket" @@ -176,7 +177,7 @@ func TestCreateAssetTx(t *testing.T) { require.Len(ins, 2) require.Len(outs, 1) - expectedConsumed := testCtx.CreateAssetTxFee() + expectedConsumed := testContext.CreateAssetTxFee consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -194,11 +195,11 @@ func TestMintNFTOperation(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction payload = []byte{'h', 'e', 'l', 'l', 'o'} @@ -221,7 +222,7 @@ func TestMintNFTOperation(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -239,11 +240,11 @@ func TestMintFTOperation(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction outputs = map[ids.ID]*secp256k1fx.TransferOutput{ @@ -268,7 +269,7 @@ func TestMintFTOperation(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -286,11 +287,11 @@ func TestMintPropertyOperation(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction propertyOwner = &secp256k1fx.OutputOwners{ @@ -311,7 +312,7 @@ func TestMintPropertyOperation(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -329,11 +330,11 @@ func TestBurnPropertyOperation(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) ) utx, err := builder.NewOperationTxBurnProperty( @@ -347,7 +348,7 @@ func TestBurnPropertyOperation(t *testing.T) { require.Len(ins, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := ins[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -369,11 +370,11 @@ func TestImportTx(t *testing.T) { }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction importKey = testKeys[0] @@ -399,7 +400,7 @@ func TestImportTx(t *testing.T) { require.Len(importedIns, 1) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + expectedConsumed := testContext.BaseTxFee consumed := importedIns[0].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) } @@ -417,11 +418,11 @@ func TestExportTx(t *testing.T) { xChainID: utxos, }, ) - backend = NewBackend(testCtx, genericBackend) + backend = NewBackend(testContext, genericBackend) // builder utxoAddr = utxosKey.Address() - builder = NewBuilder(set.Of(utxoAddr), backend) + builder = builder.New(set.Of(utxoAddr), testContext, backend) // data to build the transaction subnetID = ids.GenerateTestID() @@ -449,7 +450,7 @@ func TestExportTx(t *testing.T) { require.Len(ins, 2) require.Len(outs, 1) - expectedConsumed := testCtx.BaseTxFee() + exportedOutputs[0].Out.Amount() + expectedConsumed := testContext.BaseTxFee + exportedOutputs[0].Out.Amount() consumed := ins[0].In.Amount() + ins[1].In.Amount() - outs[0].Out.Amount() require.Equal(expectedConsumed, consumed) require.Equal(utx.ExportedOuts, exportedOutputs) @@ -461,7 +462,7 @@ func makeTestUTXOs(utxosKey *secp256k1.PrivateKey) []*avax.UTXO { const utxosOffset uint64 = 2024 return []*avax.UTXO{ // currently, the wallet scans UTXOs in the order provided here - { // a small UTXO first, which should not be enough to pay fees + { // a small UTXO first, which should not be enough to pay fees UTXOID: avax.UTXOID{ TxID: ids.Empty.Prefix(utxosOffset), OutputIndex: uint32(utxosOffset), diff --git a/wallet/chain/x/context.go b/wallet/chain/x/context.go index 7218bc8c1436..7bd4e280a2cf 100644 --- a/wallet/chain/x/context.go +++ b/wallet/chain/x/context.go @@ -4,53 +4,30 @@ package x import ( + "context" + "github.com/ava-labs/avalanchego/api/info" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/avm" - - stdcontext "context" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" ) -const Alias = "X" - -var _ Context = (*context)(nil) - -type Context interface { - NetworkID() uint32 - BlockchainID() ids.ID - AVAXAssetID() ids.ID - BaseTxFee() uint64 - CreateAssetTxFee() uint64 -} - -type context struct { - networkID uint32 - blockchainID ids.ID - avaxAssetID ids.ID - baseTxFee uint64 - createAssetTxFee uint64 -} - -func NewContextFromURI(ctx stdcontext.Context, uri string) (Context, error) { +func NewContextFromURI(ctx context.Context, uri string) (*builder.Context, error) { infoClient := info.NewClient(uri) - xChainClient := avm.NewClient(uri, Alias) + xChainClient := avm.NewClient(uri, builder.Alias) return NewContextFromClients(ctx, infoClient, xChainClient) } func NewContextFromClients( - ctx stdcontext.Context, + ctx context.Context, infoClient info.Client, xChainClient avm.Client, -) (Context, error) { +) (*builder.Context, error) { networkID, err := infoClient.GetNetworkID(ctx) if err != nil { return nil, err } - chainID, err := infoClient.GetBlockchainID(ctx, Alias) + chainID, err := infoClient.GetBlockchainID(ctx, builder.Alias) if err != nil { return nil, err } @@ -65,61 +42,11 @@ func NewContextFromClients( return nil, err } - return NewContext( - networkID, - chainID, - asset.AssetID, - uint64(txFees.TxFee), - uint64(txFees.CreateAssetTxFee), - ), nil -} - -func NewContext( - networkID uint32, - blockchainID ids.ID, - avaxAssetID ids.ID, - baseTxFee uint64, - createAssetTxFee uint64, -) Context { - return &context{ - networkID: networkID, - blockchainID: blockchainID, - avaxAssetID: avaxAssetID, - baseTxFee: baseTxFee, - createAssetTxFee: createAssetTxFee, - } -} - -func (c *context) NetworkID() uint32 { - return c.networkID -} - -func (c *context) BlockchainID() ids.ID { - return c.blockchainID -} - -func (c *context) AVAXAssetID() ids.ID { - return c.avaxAssetID -} - -func (c *context) BaseTxFee() uint64 { - return c.baseTxFee -} - -func (c *context) CreateAssetTxFee() uint64 { - return c.createAssetTxFee -} - -func newSnowContext(c Context) (*snow.Context, error) { - chainID := c.BlockchainID() - lookup := ids.NewAliaser() - return &snow.Context{ - NetworkID: c.NetworkID(), - SubnetID: constants.PrimaryNetworkID, - ChainID: chainID, - XChainID: chainID, - AVAXAssetID: c.AVAXAssetID(), - Log: logging.NoLog{}, - BCLookup: lookup, - }, lookup.Alias(chainID, Alias) + return &builder.Context{ + NetworkID: networkID, + BlockchainID: chainID, + AVAXAssetID: asset.AssetID, + BaseTxFee: uint64(txFees.TxFee), + CreateAssetTxFee: uint64(txFees.CreateAssetTxFee), + }, nil } diff --git a/wallet/chain/x/signer.go b/wallet/chain/x/signer/signer.go similarity index 71% rename from wallet/chain/x/signer.go rename to wallet/chain/x/signer/signer.go index 9bc8734e46cd..1d3b943e6b87 100644 --- a/wallet/chain/x/signer.go +++ b/wallet/chain/x/signer/signer.go @@ -1,15 +1,15 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package x +package signer import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" - - stdcontext "context" ) var _ Signer = (*signer)(nil) @@ -23,27 +23,27 @@ type Signer interface { // // If the signer doesn't have the ability to provide a required signature, // the signature slot will be skipped without reporting an error. - Sign(ctx stdcontext.Context, tx *txs.Tx) error + Sign(ctx context.Context, tx *txs.Tx) error } -type SignerBackend interface { - GetUTXO(ctx stdcontext.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) +type Backend interface { + GetUTXO(ctx context.Context, chainID, utxoID ids.ID) (*avax.UTXO, error) } type signer struct { kc keychain.Keychain - backend SignerBackend + backend Backend } -func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { +func New(kc keychain.Keychain, backend Backend) Signer { return &signer{ kc: kc, backend: backend, } } -func (s *signer) Sign(ctx stdcontext.Context, tx *txs.Tx) error { - return tx.Unsigned.Visit(&signerVisitor{ +func (s *signer) Sign(ctx context.Context, tx *txs.Tx) error { + return tx.Unsigned.Visit(&visitor{ kc: s.kc, backend: s.backend, ctx: ctx, @@ -52,7 +52,7 @@ func (s *signer) Sign(ctx stdcontext.Context, tx *txs.Tx) error { } func SignUnsigned( - ctx stdcontext.Context, + ctx context.Context, signer Signer, utx txs.UnsignedTx, ) (*txs.Tx, error) { diff --git a/wallet/chain/x/signer_visitor.go b/wallet/chain/x/signer/visitor.go similarity index 82% rename from wallet/chain/x/signer_visitor.go rename to wallet/chain/x/signer/visitor.go index be442f5511f3..23a9940c6147 100644 --- a/wallet/chain/x/signer_visitor.go +++ b/wallet/chain/x/signer/visitor.go @@ -1,9 +1,10 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package x +package signer import ( + "context" "errors" "fmt" @@ -18,32 +19,31 @@ import ( "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - stdcontext "context" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" ) var ( - _ txs.Visitor = (*signerVisitor)(nil) + _ txs.Visitor = (*visitor)(nil) - errUnknownInputType = errors.New("unknown input type") - errUnknownOpType = errors.New("unknown operation type") - errInvalidNumUTXOsInOp = errors.New("invalid number of UTXOs in operation") - errUnknownCredentialType = errors.New("unknown credential type") - errUnknownOutputType = errors.New("unknown output type") - errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") + ErrUnknownInputType = errors.New("unknown input type") + ErrUnknownOpType = errors.New("unknown operation type") + ErrInvalidNumUTXOsInOp = errors.New("invalid number of UTXOs in operation") + ErrUnknownCredentialType = errors.New("unknown credential type") + ErrUnknownOutputType = errors.New("unknown output type") + ErrInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") emptySig [secp256k1.SignatureLen]byte ) -// signerVisitor handles signing transactions for the signer -type signerVisitor struct { +// visitor handles signing transactions for the signer +type visitor struct { kc keychain.Keychain - backend SignerBackend - ctx stdcontext.Context + backend Backend + ctx context.Context tx *txs.Tx } -func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { +func (s *visitor) BaseTx(tx *txs.BaseTx) error { txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) if err != nil { return err @@ -51,7 +51,7 @@ func (s *signerVisitor) BaseTx(tx *txs.BaseTx) error { return sign(s.tx, txCreds, txSigners) } -func (s *signerVisitor) CreateAssetTx(tx *txs.CreateAssetTx) error { +func (s *visitor) CreateAssetTx(tx *txs.CreateAssetTx) error { txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) if err != nil { return err @@ -59,7 +59,7 @@ func (s *signerVisitor) CreateAssetTx(tx *txs.CreateAssetTx) error { return sign(s.tx, txCreds, txSigners) } -func (s *signerVisitor) OperationTx(tx *txs.OperationTx) error { +func (s *visitor) OperationTx(tx *txs.OperationTx) error { txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) if err != nil { return err @@ -73,7 +73,7 @@ func (s *signerVisitor) OperationTx(tx *txs.OperationTx) error { return sign(s.tx, txCreds, txSigners) } -func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { +func (s *visitor) ImportTx(tx *txs.ImportTx) error { txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) if err != nil { return err @@ -87,7 +87,7 @@ func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { return sign(s.tx, txCreds, txSigners) } -func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { +func (s *visitor) ExportTx(tx *txs.ExportTx) error { txCreds, txSigners, err := s.getSigners(s.ctx, tx.BlockchainID, tx.Ins) if err != nil { return err @@ -95,14 +95,14 @@ func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { return sign(s.tx, txCreds, txSigners) } -func (s *signerVisitor) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([]verify.Verifiable, [][]keychain.Signer, error) { +func (s *visitor) getSigners(ctx context.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([]verify.Verifiable, [][]keychain.Signer, error) { txCreds := make([]verify.Verifiable, len(ins)) txSigners := make([][]keychain.Signer, len(ins)) for credIndex, transferInput := range ins { txCreds[credIndex] = &secp256k1fx.Credential{} input, ok := transferInput.In.(*secp256k1fx.TransferInput) if !ok { - return nil, nil, errUnknownInputType + return nil, nil, ErrUnknownInputType } inputSigners := make([]keychain.Signer, len(input.SigIndices)) @@ -121,12 +121,12 @@ func (s *signerVisitor) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, out, ok := utxo.Out.(*secp256k1fx.TransferOutput) if !ok { - return nil, nil, errUnknownOutputType + return nil, nil, ErrUnknownOutputType } for sigIndex, addrIndex := range input.SigIndices { if addrIndex >= uint32(len(out.Addrs)) { - return nil, nil, errInvalidUTXOSigIndex + return nil, nil, ErrInvalidUTXOSigIndex } addr := out.Addrs[addrIndex] @@ -142,7 +142,7 @@ func (s *signerVisitor) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, return txCreds, txSigners, nil } -func (s *signerVisitor) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops []*txs.Operation) ([]verify.Verifiable, [][]keychain.Signer, error) { +func (s *visitor) getOpsSigners(ctx context.Context, sourceChainID ids.ID, ops []*txs.Operation) ([]verify.Verifiable, [][]keychain.Signer, error) { txCreds := make([]verify.Verifiable, len(ops)) txSigners := make([][]keychain.Signer, len(ops)) for credIndex, op := range ops { @@ -164,14 +164,14 @@ func (s *signerVisitor) getOpsSigners(ctx stdcontext.Context, sourceChainID ids. txCreds[credIndex] = &propertyfx.Credential{} input = &op.Input default: - return nil, nil, errUnknownOpType + return nil, nil, ErrUnknownOpType } inputSigners := make([]keychain.Signer, len(input.SigIndices)) txSigners[credIndex] = inputSigners if len(op.UTXOIDs) != 1 { - return nil, nil, errInvalidNumUTXOsInOp + return nil, nil, ErrInvalidNumUTXOsInOp } utxoID := op.UTXOIDs[0].InputID() utxo, err := s.backend.GetUTXO(ctx, sourceChainID, utxoID) @@ -197,12 +197,12 @@ func (s *signerVisitor) getOpsSigners(ctx stdcontext.Context, sourceChainID ids. case *propertyfx.OwnedOutput: addrs = out.Addrs default: - return nil, nil, errUnknownOutputType + return nil, nil, ErrUnknownOutputType } for sigIndex, addrIndex := range input.SigIndices { if addrIndex >= uint32(len(addrs)) { - return nil, nil, errInvalidUTXOSigIndex + return nil, nil, ErrInvalidUTXOSigIndex } addr := addrs[addrIndex] @@ -219,7 +219,7 @@ func (s *signerVisitor) getOpsSigners(ctx stdcontext.Context, sourceChainID ids. } func sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]keychain.Signer) error { - codec := Parser.Codec() + codec := builder.Parser.Codec() unsignedBytes, err := codec.Marshal(txs.CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal unsigned tx: %w", err) @@ -254,7 +254,7 @@ func sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]keychain.Signer) fxCred.FxID = propertyfx.ID cred = &credImpl.Credential default: - return errUnknownCredentialType + return ErrUnknownCredentialType } if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { diff --git a/wallet/chain/x/wallet.go b/wallet/chain/x/wallet.go index 13491a241349..a5bac3e8b6ce 100644 --- a/wallet/chain/x/wallet.go +++ b/wallet/chain/x/wallet.go @@ -4,32 +4,25 @@ package x import ( - "errors" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var ( - errNotAccepted = errors.New("not accepted") - - _ Wallet = (*wallet)(nil) -) +var _ Wallet = (*wallet)(nil) type Wallet interface { - Context - // Builder returns the builder that will be used to create the transactions. - Builder() Builder + Builder() builder.Builder // Signer returns the signer that will be used to sign the transactions. - Signer() Signer + Signer() signer.Signer // IssueBaseTx creates, signs, and issues a new simple value transfer. // @@ -145,13 +138,13 @@ type Wallet interface { } func NewWallet( - builder Builder, - signer Signer, + builder builder.Builder, + signer signer.Signer, client avm.Client, backend Backend, ) Wallet { return &wallet{ - Backend: backend, + backend: backend, builder: builder, signer: signer, client: client, @@ -159,17 +152,17 @@ func NewWallet( } type wallet struct { - Backend - builder Builder - signer Signer + backend Backend + builder builder.Builder + signer signer.Signer client avm.Client } -func (w *wallet) Builder() Builder { +func (w *wallet) Builder() builder.Builder { return w.builder } -func (w *wallet) Signer() Signer { +func (w *wallet) Signer() signer.Signer { return w.signer } @@ -286,7 +279,7 @@ func (w *wallet) IssueUnsignedTx( ) (*txs.Tx, error) { ops := common.NewOptions(options) ctx := ops.Context() - tx, err := SignUnsigned(ctx, w.signer, utx) + tx, err := signer.SignUnsigned(ctx, w.signer, utx) if err != nil { return nil, err } @@ -310,20 +303,12 @@ func (w *wallet) IssueTx( } if ops.AssumeDecided() { - return w.Backend.AcceptTx(ctx, tx) + return w.backend.AcceptTx(ctx, tx) } - txStatus, err := w.client.ConfirmTx(ctx, txID, ops.PollFrequency()) - if err != nil { + if err := avm.AwaitTxAccepted(w.client, ctx, txID, ops.PollFrequency()); err != nil { return err } - if err := w.Backend.AcceptTx(ctx, tx); err != nil { - return err - } - - if txStatus != choices.Accepted { - return errNotAccepted - } - return nil + return w.backend.AcceptTx(ctx, tx) } diff --git a/wallet/chain/x/wallet_with_options.go b/wallet/chain/x/wallet_with_options.go index d62d02efdd40..33de453b7d61 100644 --- a/wallet/chain/x/wallet_with_options.go +++ b/wallet/chain/x/wallet_with_options.go @@ -9,6 +9,8 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + "github.com/ava-labs/avalanchego/wallet/chain/x/signer" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -19,28 +21,32 @@ func NewWalletWithOptions( options ...common.Option, ) Wallet { return &walletWithOptions{ - Wallet: wallet, + wallet: wallet, options: options, } } type walletWithOptions struct { - Wallet + wallet Wallet options []common.Option } -func (w *walletWithOptions) Builder() Builder { - return NewBuilderWithOptions( - w.Wallet.Builder(), +func (w *walletWithOptions) Builder() builder.Builder { + return builder.NewWithOptions( + w.wallet.Builder(), w.options..., ) } +func (w *walletWithOptions) Signer() signer.Signer { + return w.wallet.Signer() +} + func (w *walletWithOptions) IssueBaseTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueBaseTx( + return w.wallet.IssueBaseTx( outputs, common.UnionOptions(w.options, options)..., ) @@ -53,7 +59,7 @@ func (w *walletWithOptions) IssueCreateAssetTx( initialState map[uint32][]verify.State, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueCreateAssetTx( + return w.wallet.IssueCreateAssetTx( name, symbol, denomination, @@ -66,7 +72,7 @@ func (w *walletWithOptions) IssueOperationTx( operations []*txs.Operation, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueOperationTx( + return w.wallet.IssueOperationTx( operations, common.UnionOptions(w.options, options)..., ) @@ -76,7 +82,7 @@ func (w *walletWithOptions) IssueOperationTxMintFT( outputs map[ids.ID]*secp256k1fx.TransferOutput, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueOperationTxMintFT( + return w.wallet.IssueOperationTxMintFT( outputs, common.UnionOptions(w.options, options)..., ) @@ -88,7 +94,7 @@ func (w *walletWithOptions) IssueOperationTxMintNFT( owners []*secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueOperationTxMintNFT( + return w.wallet.IssueOperationTxMintNFT( assetID, payload, owners, @@ -101,7 +107,7 @@ func (w *walletWithOptions) IssueOperationTxMintProperty( owner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueOperationTxMintProperty( + return w.wallet.IssueOperationTxMintProperty( assetID, owner, common.UnionOptions(w.options, options)..., @@ -112,7 +118,7 @@ func (w *walletWithOptions) IssueOperationTxBurnProperty( assetID ids.ID, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueOperationTxBurnProperty( + return w.wallet.IssueOperationTxBurnProperty( assetID, common.UnionOptions(w.options, options)..., ) @@ -123,7 +129,7 @@ func (w *walletWithOptions) IssueImportTx( to *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueImportTx( + return w.wallet.IssueImportTx( chainID, to, common.UnionOptions(w.options, options)..., @@ -135,7 +141,7 @@ func (w *walletWithOptions) IssueExportTx( outputs []*avax.TransferableOutput, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueExportTx( + return w.wallet.IssueExportTx( chainID, outputs, common.UnionOptions(w.options, options)..., @@ -146,7 +152,7 @@ func (w *walletWithOptions) IssueUnsignedTx( utx txs.UnsignedTx, options ...common.Option, ) (*txs.Tx, error) { - return w.Wallet.IssueUnsignedTx( + return w.wallet.IssueUnsignedTx( utx, common.UnionOptions(w.options, options)..., ) @@ -156,7 +162,7 @@ func (w *walletWithOptions) IssueTx( tx *txs.Tx, options ...common.Option, ) error { - return w.Wallet.IssueTx( + return w.wallet.IssueTx( tx, common.UnionOptions(w.options, options)..., ) diff --git a/wallet/subnet/primary/api.go b/wallet/subnet/primary/api.go index 3c30b60d81c2..2aedc5c476c9 100644 --- a/wallet/subnet/primary/api.go +++ b/wallet/subnet/primary/api.go @@ -21,9 +21,10 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/wallet/chain/c" - "github.com/ava-labs/avalanchego/wallet/chain/p" "github.com/ava-labs/avalanchego/wallet/chain/x" + pbuilder "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + xbuilder "github.com/ava-labs/avalanchego/wallet/chain/x/builder" walletcommon "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ethcommon "github.com/ethereum/go-ethereum/common" ) @@ -57,11 +58,11 @@ type UTXOClient interface { type AVAXState struct { PClient platformvm.Client - PCTX p.Context + PCTX *pbuilder.Context XClient avm.Client - XCTX x.Context + XCTX *xbuilder.Context CClient evm.Client - CCTX c.Context + CCTX *c.Context UTXOs walletcommon.UTXOs } @@ -78,7 +79,7 @@ func FetchState( xClient := avm.NewClient(uri, "X") cClient := evm.NewCChainClient(uri) - pCTX, err := p.NewContextFromClients(ctx, infoClient, xClient) + pCTX, err := pbuilder.NewContextFromClients(ctx, infoClient, xClient) if err != nil { return nil, err } @@ -106,12 +107,12 @@ func FetchState( codec: txs.Codec, }, { - id: xCTX.BlockchainID(), + id: xCTX.BlockchainID, client: xClient, - codec: x.Parser.Codec(), + codec: xbuilder.Parser.Codec(), }, { - id: cCTX.BlockchainID(), + id: cCTX.BlockchainID, client: cClient, codec: evm.Codec, }, diff --git a/wallet/subnet/primary/example_test.go b/wallet/subnet/primary/example_test.go index 2b8d8b8eeec8..6c2390e00be4 100644 --- a/wallet/subnet/primary/example_test.go +++ b/wallet/subnet/primary/example_test.go @@ -41,9 +41,11 @@ func ExampleWallet() { // Get the P-chain and the X-chain wallets pWallet := wallet.P() xWallet := wallet.X() + xBuilder := xWallet.Builder() + xContext := xBuilder.Context() // Pull out useful constants to use when issuing transactions. - xChainID := xWallet.BlockchainID() + xChainID := xContext.BlockchainID owner := &secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ diff --git a/wallet/subnet/primary/examples/add-primary-validator/main.go b/wallet/subnet/primary/examples/add-primary-validator/main.go index 987229d1ec22..7c2b6c055855 100644 --- a/wallet/subnet/primary/examples/add-primary-validator/main.go +++ b/wallet/subnet/primary/examples/add-primary-validator/main.go @@ -54,7 +54,9 @@ func main() { // Get the P-chain wallet pWallet := wallet.P() - avaxAssetID := pWallet.AVAXAssetID() + pBuilder := pWallet.Builder() + pContext := pBuilder.Context() + avaxAssetID := pContext.AVAXAssetID addValidatorStartTime := time.Now() addValidatorTx, err := pWallet.IssueAddPermissionlessValidatorTx( diff --git a/wallet/subnet/primary/examples/c-chain-export/main.go b/wallet/subnet/primary/examples/c-chain-export/main.go index 41ecb5ca814e..a9a4c61773b8 100644 --- a/wallet/subnet/primary/examples/c-chain-export/main.go +++ b/wallet/subnet/primary/examples/c-chain-export/main.go @@ -42,7 +42,7 @@ func main() { cWallet := wallet.C() // Pull out useful constants to use when issuing transactions. - cChainID := cWallet.BlockchainID() + cChainID := cWallet.Builder().Context().BlockchainID owner := secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ diff --git a/wallet/subnet/primary/examples/c-chain-import/main.go b/wallet/subnet/primary/examples/c-chain-import/main.go index 387d435db4df..2f257fe6fd11 100644 --- a/wallet/subnet/primary/examples/c-chain-import/main.go +++ b/wallet/subnet/primary/examples/c-chain-import/main.go @@ -46,8 +46,9 @@ func main() { cWallet := wallet.C() // Pull out useful constants to use when issuing transactions. - cChainID := cWallet.BlockchainID() - avaxAssetID := cWallet.AVAXAssetID() + cContext := cWallet.Builder().Context() + cChainID := cContext.BlockchainID + avaxAssetID := cContext.AVAXAssetID owner := secp256k1fx.OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ diff --git a/wallet/subnet/primary/examples/create-chain/main.go b/wallet/subnet/primary/examples/create-chain/main.go index ea98579f6f21..c626086bce89 100644 --- a/wallet/subnet/primary/examples/create-chain/main.go +++ b/wallet/subnet/primary/examples/create-chain/main.go @@ -5,15 +5,18 @@ package main import ( "context" - "encoding/hex" "log" + "math" "time" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" + + xsgenesis "github.com/ava-labs/avalanchego/vms/example/xsvm/genesis" ) func main() { @@ -21,8 +24,16 @@ func main() { uri := primary.LocalAPIURI kc := secp256k1fx.NewKeychain(key) subnetIDStr := "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" - genesisHex := "00000000000000000000000000017b5490493f8a2fff444ac8b54e27b3339d7c60dcffffffffffffffff" - vmID := ids.ID{'x', 's', 'v', 'm'} + genesis := &xsgenesis.Genesis{ + Timestamp: time.Now().Unix(), + Allocations: []xsgenesis.Allocation{ + { + Address: genesis.EWOQKey.Address(), + Balance: math.MaxUint64, + }, + }, + } + vmID := constants.XSVMID name := "let there" subnetID, err := ids.FromString(subnetIDStr) @@ -30,9 +41,9 @@ func main() { log.Fatalf("failed to parse subnet ID: %s\n", err) } - genesisBytes, err := hex.DecodeString(genesisHex) + genesisBytes, err := xsgenesis.Codec.Marshal(xsgenesis.CodecVersion, genesis) if err != nil { - log.Fatalf("failed to parse genesis bytes: %s\n", err) + log.Fatalf("failed to create genesis bytes: %s\n", err) } ctx := context.Background() diff --git a/wallet/subnet/primary/examples/create-locked-stakeable/main.go b/wallet/subnet/primary/examples/create-locked-stakeable/main.go index 32cdcf983ba0..a6cf5c78604e 100644 --- a/wallet/subnet/primary/examples/create-locked-stakeable/main.go +++ b/wallet/subnet/primary/examples/create-locked-stakeable/main.go @@ -48,7 +48,9 @@ func main() { // Get the P-chain wallet pWallet := wallet.P() - avaxAssetID := pWallet.AVAXAssetID() + pBuilder := pWallet.Builder() + pContext := pBuilder.Context() + avaxAssetID := pContext.AVAXAssetID issueTxStartTime := time.Now() tx, err := pWallet.IssueBaseTx([]*avax.TransferableOutput{ diff --git a/wallet/subnet/primary/examples/get-p-chain-balance/main.go b/wallet/subnet/primary/examples/get-p-chain-balance/main.go index 08f2cd538c29..e190247515bf 100644 --- a/wallet/subnet/primary/examples/get-p-chain-balance/main.go +++ b/wallet/subnet/primary/examples/get-p-chain-balance/main.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/wallet/chain/p" + "github.com/ava-labs/avalanchego/wallet/chain/p/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -38,14 +39,14 @@ func main() { pUTXOs := common.NewChainUTXOs(constants.PlatformChainID, state.UTXOs) pBackend := p.NewBackend(state.PCTX, pUTXOs, nil) - pBuilder := p.NewBuilder(addresses, pBackend) + pBuilder := builder.New(addresses, state.PCTX, pBackend) currentBalances, err := pBuilder.GetBalance() if err != nil { log.Fatalf("failed to get the balance: %s\n", err) } - avaxID := state.PCTX.AVAXAssetID() + avaxID := state.PCTX.AVAXAssetID avaxBalance := currentBalances[avaxID] log.Printf("current AVAX balance of %s is %d nAVAX\n", addrStr, avaxBalance) } diff --git a/wallet/subnet/primary/examples/get-x-chain-balance/main.go b/wallet/subnet/primary/examples/get-x-chain-balance/main.go index 9895546879ee..1d45e1de609d 100644 --- a/wallet/subnet/primary/examples/get-x-chain-balance/main.go +++ b/wallet/subnet/primary/examples/get-x-chain-balance/main.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/wallet/chain/x" + "github.com/ava-labs/avalanchego/wallet/chain/x/builder" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -35,18 +36,18 @@ func main() { } log.Printf("fetched state of %s in %s\n", addrStr, time.Since(fetchStartTime)) - xChainID := state.XCTX.BlockchainID() + xChainID := state.XCTX.BlockchainID xUTXOs := common.NewChainUTXOs(xChainID, state.UTXOs) xBackend := x.NewBackend(state.XCTX, xUTXOs) - xBuilder := x.NewBuilder(addresses, xBackend) + xBuilder := builder.New(addresses, state.XCTX, xBackend) currentBalances, err := xBuilder.GetFTBalance() if err != nil { log.Fatalf("failed to get the balance: %s\n", err) } - avaxID := state.XCTX.AVAXAssetID() + avaxID := state.XCTX.AVAXAssetID avaxBalance := currentBalances[avaxID] log.Printf("current AVAX balance of %s is %d nAVAX\n", addrStr, avaxBalance) } diff --git a/wallet/subnet/primary/wallet.go b/wallet/subnet/primary/wallet.go index 9aabf651cff7..179e9351ccca 100644 --- a/wallet/subnet/primary/wallet.go +++ b/wallet/subnet/primary/wallet.go @@ -15,6 +15,11 @@ import ( "github.com/ava-labs/avalanchego/wallet/chain/p" "github.com/ava-labs/avalanchego/wallet/chain/x" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" + + pbuilder "github.com/ava-labs/avalanchego/wallet/chain/p/builder" + psigner "github.com/ava-labs/avalanchego/wallet/chain/p/signer" + xbuilder "github.com/ava-labs/avalanchego/wallet/chain/x/builder" + xsigner "github.com/ava-labs/avalanchego/wallet/chain/x/signer" ) var _ Wallet = (*wallet)(nil) @@ -118,19 +123,19 @@ func MakeWallet(ctx context.Context, config *WalletConfig) (Wallet, error) { pUTXOs := common.NewChainUTXOs(constants.PlatformChainID, avaxState.UTXOs) pBackend := p.NewBackend(avaxState.PCTX, pUTXOs, pChainTxs) - pBuilder := p.NewBuilder(avaxAddrs, pBackend) - pSigner := p.NewSigner(config.AVAXKeychain, pBackend) + pBuilder := pbuilder.New(avaxAddrs, avaxState.PCTX, pBackend) + pSigner := psigner.New(config.AVAXKeychain, pBackend) - xChainID := avaxState.XCTX.BlockchainID() + xChainID := avaxState.XCTX.BlockchainID xUTXOs := common.NewChainUTXOs(xChainID, avaxState.UTXOs) xBackend := x.NewBackend(avaxState.XCTX, xUTXOs) - xBuilder := x.NewBuilder(avaxAddrs, xBackend) - xSigner := x.NewSigner(config.AVAXKeychain, xBackend) + xBuilder := xbuilder.New(avaxAddrs, avaxState.XCTX, xBackend) + xSigner := xsigner.New(config.AVAXKeychain, xBackend) - cChainID := avaxState.CCTX.BlockchainID() + cChainID := avaxState.CCTX.BlockchainID cUTXOs := common.NewChainUTXOs(cChainID, avaxState.UTXOs) - cBackend := c.NewBackend(avaxState.CCTX, cUTXOs, ethState.Accounts) - cBuilder := c.NewBuilder(avaxAddrs, ethAddrs, cBackend) + cBackend := c.NewBackend(cUTXOs, ethState.Accounts) + cBuilder := c.NewBuilder(avaxAddrs, ethAddrs, avaxState.CCTX, cBackend) cSigner := c.NewSigner(config.AVAXKeychain, config.EthKeychain, cBackend) return NewWallet( diff --git a/x/merkledb/README.md b/x/merkledb/README.md index 29c9f0a73247..be2fb4e6b2aa 100644 --- a/x/merkledb/README.md +++ b/x/merkledb/README.md @@ -379,7 +379,7 @@ Specifically, we encode these values in the following way: +----------------------------------------------------+ | Value (variable length bytes) (optional) | +----------------------------------------------------+ -| Key length (varint) | +| Key bit length (varint) | +----------------------------------------------------+ | Key (variable length bytes) | +----------------------------------------------------+ @@ -392,7 +392,7 @@ Where: * `Value existence flag` is `1` if this node has a value, otherwise `0`. * `Value length` is the length of the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. * `Value` is the value, if it exists (i.e. if `Value existence flag` is `1`.) Otherwise not serialized. -* `Key length` is the number of nibbles in this node's key. +* `Key length` is the number of bits in this node's key. * `Key` is the node's key. Note that, as with the node serialization format, the `Child index` values aren't necessarily sequential, but they are unique and strictly increasing. diff --git a/x/merkledb/bytes_pool.go b/x/merkledb/bytes_pool.go new file mode 100644 index 000000000000..a01d85acee78 --- /dev/null +++ b/x/merkledb/bytes_pool.go @@ -0,0 +1,60 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "sync" + +type bytesPool struct { + slots chan struct{} + bytesLock sync.Mutex + bytes [][]byte +} + +func newBytesPool(numSlots int) *bytesPool { + return &bytesPool{ + slots: make(chan struct{}, numSlots), + bytes: make([][]byte, 0, numSlots), + } +} + +func (p *bytesPool) Acquire() []byte { + p.slots <- struct{}{} + return p.pop() +} + +func (p *bytesPool) TryAcquire() ([]byte, bool) { + select { + case p.slots <- struct{}{}: + return p.pop(), true + default: + return nil, false + } +} + +func (p *bytesPool) pop() []byte { + p.bytesLock.Lock() + defer p.bytesLock.Unlock() + + numBytes := len(p.bytes) + if numBytes == 0 { + return nil + } + + b := p.bytes[numBytes-1] + p.bytes = p.bytes[:numBytes-1] + return b +} + +func (p *bytesPool) Release(b []byte) { + // Before waking anyone waiting on a slot, return the bytes. + p.bytesLock.Lock() + p.bytes = append(p.bytes, b) + p.bytesLock.Unlock() + + select { + case <-p.slots: + default: + panic("release of unacquired semaphore") + } +} diff --git a/x/merkledb/bytes_pool_test.go b/x/merkledb/bytes_pool_test.go new file mode 100644 index 000000000000..cd96f2fc4011 --- /dev/null +++ b/x/merkledb/bytes_pool_test.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "testing" + +func Benchmark_BytesPool_Acquire(b *testing.B) { + s := newBytesPool(b.N) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.Acquire() + } +} + +func Benchmark_BytesPool_Release(b *testing.B) { + s := newBytesPool(b.N) + for i := 0; i < b.N; i++ { + s.Acquire() + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.Release(nil) + } +} + +func Benchmark_BytesPool_TryAcquire_Success(b *testing.B) { + s := newBytesPool(b.N) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.TryAcquire() + } +} + +func Benchmark_BytesPool_TryAcquire_Failure(b *testing.B) { + s := newBytesPool(1) + s.Acquire() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.TryAcquire() + } +} diff --git a/x/merkledb/cache.go b/x/merkledb/cache.go index ee2e7f0b2713..92e8c8739c5e 100644 --- a/x/merkledb/cache.go +++ b/x/merkledb/cache.go @@ -7,7 +7,7 @@ import ( "errors" "sync" - "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/linked" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -18,7 +18,7 @@ type onEvictCache[K comparable, V any] struct { lock sync.RWMutex maxSize int currentSize int - fifo linkedhashmap.LinkedHashmap[K, V] + fifo *linked.Hashmap[K, V] size func(K, V) int // Must not call any method that grabs [c.lock] // because this would cause a deadlock. @@ -33,7 +33,7 @@ func newOnEvictCache[K comparable, V any]( ) onEvictCache[K, V] { return onEvictCache[K, V]{ maxSize: maxSize, - fifo: linkedhashmap.New[K, V](), + fifo: linked.NewHashmap[K, V](), size: size, onEviction: onEviction, } @@ -65,15 +65,14 @@ func (c *onEvictCache[K, V]) Put(key K, value V) error { } // Flush removes all elements from the cache. -// Returns the last non-nil error during [c.onEviction], if any. -// If [c.onEviction] errors, it will still be called for any -// subsequent elements and the cache will still be emptied. +// +// Returns the first non-nil error returned by [c.onEviction], if any. +// +// If [c.onEviction] errors, it will still be called for any subsequent elements +// and the cache will still be emptied. func (c *onEvictCache[K, V]) Flush() error { c.lock.Lock() - defer func() { - c.fifo = linkedhashmap.New[K, V]() - c.lock.Unlock() - }() + defer c.lock.Unlock() return c.resize(0) } diff --git a/x/merkledb/codec.go b/x/merkledb/codec.go index a5d4a922b0d9..bfdfeff9615c 100644 --- a/x/merkledb/codec.go +++ b/x/merkledb/codec.go @@ -11,34 +11,18 @@ import ( "math" "math/bits" "slices" - "sync" - - "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/maybe" ) const ( - boolLen = 1 - trueByte = 1 - falseByte = 0 - minVarIntLen = 1 - minMaybeByteSliceLen = boolLen - minKeyLen = minVarIntLen - minByteSliceLen = minVarIntLen - minDBNodeLen = minMaybeByteSliceLen + minVarIntLen - minChildLen = minVarIntLen + minKeyLen + ids.IDLen + boolLen - - estimatedKeyLen = 64 - estimatedValueLen = 64 - // Child index, child ID - hashValuesChildLen = minVarIntLen + ids.IDLen + boolLen = 1 + trueByte = 1 + falseByte = 0 ) var ( - _ encoderDecoder = (*codecImpl)(nil) - trueBytes = []byte{trueByte} falseBytes = []byte{falseByte} @@ -48,154 +32,159 @@ var ( errNonZeroKeyPadding = errors.New("key partial byte should be padded with 0s") errExtraSpace = errors.New("trailing buffer space") errIntOverflow = errors.New("value overflows int") + errTooManyChildren = errors.New("too many children") ) -// encoderDecoder defines the interface needed by merkleDB to marshal -// and unmarshal relevant types. -type encoderDecoder interface { - encoder - decoder -} - -type encoder interface { - // Assumes [n] is non-nil. - encodeDBNode(n *dbNode) []byte - encodedDBNodeSize(n *dbNode) int - - // Returns the bytes that will be hashed to generate [n]'s ID. - // Assumes [n] is non-nil. - encodeHashValues(n *node) []byte - encodeKey(key Key) []byte -} - -type decoder interface { - // Assumes [n] is non-nil. - decodeDBNode(bytes []byte, n *dbNode) error - decodeKey(bytes []byte) (Key, error) -} - -func newCodec() encoderDecoder { - return &codecImpl{ - varIntPool: sync.Pool{ - New: func() interface{} { - return make([]byte, binary.MaxVarintLen64) - }, - }, - } -} - -// Note that bytes.Buffer.Write always returns nil, so we -// can ignore its return values in [codecImpl] methods. -type codecImpl struct { - // Invariant: Every byte slice returned by [varIntPool] has - // length [binary.MaxVarintLen64]. - varIntPool sync.Pool -} - -func (c *codecImpl) childSize(index byte, childEntry *child) int { +func childSize(index byte, childEntry *child) int { // * index // * child ID // * child key // * bool indicating whether the child has a value - return c.uintSize(uint64(index)) + ids.IDLen + c.keySize(childEntry.compressedKey) + boolLen + return uintSize(uint64(index)) + ids.IDLen + keySize(childEntry.compressedKey) + boolLen } -// based on the current implementation of codecImpl.encodeUint which uses binary.PutUvarint -func (*codecImpl) uintSize(value uint64) int { +// based on the implementation of encodeUint which uses binary.PutUvarint +func uintSize(value uint64) int { if value == 0 { return 1 } return (bits.Len64(value) + 6) / 7 } -func (c *codecImpl) keySize(p Key) int { - return c.uintSize(uint64(p.length)) + bytesNeeded(p.length) +func keySize(p Key) int { + return uintSize(uint64(p.length)) + bytesNeeded(p.length) } -func (c *codecImpl) encodedDBNodeSize(n *dbNode) int { +// Assumes [n] is non-nil. +func encodedDBNodeSize(n *dbNode) int { // * number of children // * bool indicating whether [n] has a value // * the value (optional) // * children - size := c.uintSize(uint64(len(n.children))) + boolLen + size := uintSize(uint64(len(n.children))) + boolLen if n.value.HasValue() { valueLen := len(n.value.Value()) - size += c.uintSize(uint64(valueLen)) + valueLen + size += uintSize(uint64(valueLen)) + valueLen } // for each non-nil entry, we add the additional size of the child entry for index, entry := range n.children { - size += c.childSize(index, entry) + size += childSize(index, entry) } return size } -func (c *codecImpl) encodeDBNode(n *dbNode) []byte { - buf := bytes.NewBuffer(make([]byte, 0, c.encodedDBNodeSize(n))) - c.encodeMaybeByteSlice(buf, n.value) - c.encodeUint(buf, uint64(len(n.children))) - // Note we insert children in order of increasing index - // for determinism. - keys := maps.Keys(n.children) +// Assumes [n] is non-nil. +func encodeDBNode(n *dbNode) []byte { + length := encodedDBNodeSize(n) + w := codecWriter{ + b: make([]byte, 0, length), + } + + w.MaybeBytes(n.value) + + numChildren := len(n.children) + w.Uvarint(uint64(numChildren)) + + // Avoid allocating keys entirely if the node doesn't have any children. + if numChildren == 0 { + return w.b + } + + // By allocating BranchFactorLargest rather than [numChildren], this slice + // is allocated on the stack rather than the heap. BranchFactorLargest is + // at least [numChildren] which avoids memory allocations. + keys := make([]byte, numChildren, BranchFactorLargest) + i := 0 + for k := range n.children { + keys[i] = k + i++ + } + + // Ensure that the order of entries is correct. slices.Sort(keys) for _, index := range keys { entry := n.children[index] - c.encodeUint(buf, uint64(index)) - c.encodeKeyToBuffer(buf, entry.compressedKey) - _, _ = buf.Write(entry.id[:]) - c.encodeBool(buf, entry.hasValue) + w.Uvarint(uint64(index)) + w.Key(entry.compressedKey) + w.ID(entry.id) + w.Bool(entry.hasValue) } - return buf.Bytes() + + return w.b } -func (c *codecImpl) encodeHashValues(n *node) []byte { - var ( - numChildren = len(n.children) - // Estimate size [hv] to prevent memory allocations - estimatedLen = minVarIntLen + numChildren*hashValuesChildLen + estimatedValueLen + estimatedKeyLen - buf = bytes.NewBuffer(make([]byte, 0, estimatedLen)) - ) +func encodeKey(key Key) []byte { + length := uintSize(uint64(key.length)) + len(key.Bytes()) + w := codecWriter{ + b: make([]byte, 0, length), + } + w.Key(key) + return w.b +} - c.encodeUint(buf, uint64(numChildren)) +type codecWriter struct { + b []byte +} - // ensure that the order of entries is consistent - keys := maps.Keys(n.children) - slices.Sort(keys) - for _, index := range keys { - entry := n.children[index] - c.encodeUint(buf, uint64(index)) - _, _ = buf.Write(entry.id[:]) +func (w *codecWriter) Bool(v bool) { + if v { + w.b = append(w.b, trueByte) + } else { + w.b = append(w.b, falseByte) } - c.encodeMaybeByteSlice(buf, n.valueDigest) - c.encodeKeyToBuffer(buf, n.key) +} + +func (w *codecWriter) Uvarint(v uint64) { + w.b = binary.AppendUvarint(w.b, v) +} + +func (w *codecWriter) ID(v ids.ID) { + w.b = append(w.b, v[:]...) +} - return buf.Bytes() +func (w *codecWriter) Bytes(v []byte) { + w.Uvarint(uint64(len(v))) + w.b = append(w.b, v...) } -func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) error { - if minDBNodeLen > len(b) { - return io.ErrUnexpectedEOF +func (w *codecWriter) MaybeBytes(v maybe.Maybe[[]byte]) { + hasValue := v.HasValue() + w.Bool(hasValue) + if hasValue { + w.Bytes(v.Value()) } +} + +func (w *codecWriter) Key(v Key) { + w.Uvarint(uint64(v.length)) + w.b = append(w.b, v.Bytes()...) +} - src := bytes.NewReader(b) +// Assumes [n] is non-nil. +func decodeDBNode(b []byte, n *dbNode) error { + r := codecReader{ + b: b, + copy: true, + } - value, err := c.decodeMaybeByteSlice(src) + var err error + n.value, err = r.MaybeBytes() if err != nil { return err } - n.value = value - numChildren, err := c.decodeUint(src) - switch { - case err != nil: + numChildren, err := r.Uvarint() + if err != nil { return err - case numChildren > uint64(src.Len()/minChildLen): - return io.ErrUnexpectedEOF + } + if numChildren > uint64(BranchFactorLargest) { + return errTooManyChildren } n.children = make(map[byte]*child, numChildren) var previousChild uint64 for i := uint64(0); i < numChildren; i++ { - index, err := c.decodeUint(src) + index, err := r.Uvarint() if err != nil { return err } @@ -204,15 +193,15 @@ func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) error { } previousChild = index - compressedKey, err := c.decodeKeyFromReader(src) + compressedKey, err := r.Key() if err != nil { return err } - childID, err := c.decodeID(src) + childID, err := r.ID() if err != nil { return err } - hasValue, err := c.decodeBool(src) + hasValue, err := r.Bool() if err != nil { return err } @@ -222,205 +211,131 @@ func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) error { hasValue: hasValue, } } - if src.Len() != 0 { + if len(r.b) != 0 { return errExtraSpace } return nil } -func (*codecImpl) encodeBool(dst *bytes.Buffer, value bool) { - bytesValue := falseBytes - if value { - bytesValue = trueBytes - } - _, _ = dst.Write(bytesValue) -} - -func (*codecImpl) decodeBool(src *bytes.Reader) (bool, error) { - boolByte, err := src.ReadByte() - switch { - case err == io.EOF: - return false, io.ErrUnexpectedEOF - case err != nil: - return false, err - case boolByte == trueByte: - return true, nil - case boolByte == falseByte: - return false, nil - default: - return false, errInvalidBool +func decodeKey(b []byte) (Key, error) { + r := codecReader{ + b: b, + copy: true, } -} - -func (*codecImpl) decodeUint(src *bytes.Reader) (uint64, error) { - // To ensure encoding/decoding is canonical, we need to check for leading - // zeroes in the varint. - // The last byte of the varint we read is the most significant byte. - // If it's 0, then it's a leading zero, which is considered invalid in the - // canonical encoding. - startLen := src.Len() - val64, err := binary.ReadUvarint(src) + key, err := r.Key() if err != nil { - if err == io.EOF { - return 0, io.ErrUnexpectedEOF - } - return 0, err + return Key{}, err } - endLen := src.Len() - - // Just 0x00 is a valid value so don't check if the varint is 1 byte - if startLen-endLen > 1 { - if err := src.UnreadByte(); err != nil { - return 0, err - } - lastByte, err := src.ReadByte() - if err != nil { - return 0, err - } - if lastByte == 0x00 { - return 0, errLeadingZeroes - } + if len(r.b) != 0 { + return Key{}, errExtraSpace } - - return val64, nil -} - -func (c *codecImpl) encodeUint(dst *bytes.Buffer, value uint64) { - buf := c.varIntPool.Get().([]byte) - size := binary.PutUvarint(buf, value) - _, _ = dst.Write(buf[:size]) - c.varIntPool.Put(buf) + return key, nil } -func (c *codecImpl) encodeMaybeByteSlice(dst *bytes.Buffer, maybeValue maybe.Maybe[[]byte]) { - hasValue := maybeValue.HasValue() - c.encodeBool(dst, hasValue) - if hasValue { - c.encodeByteSlice(dst, maybeValue.Value()) - } +type codecReader struct { + b []byte + // copy is used to flag to the reader if it is required to copy references + // to [b]. + copy bool } -func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (maybe.Maybe[[]byte], error) { - if minMaybeByteSliceLen > src.Len() { - return maybe.Nothing[[]byte](), io.ErrUnexpectedEOF - } - - if hasValue, err := c.decodeBool(src); err != nil || !hasValue { - return maybe.Nothing[[]byte](), err +func (r *codecReader) Bool() (bool, error) { + if len(r.b) < boolLen { + return false, io.ErrUnexpectedEOF } - - rawBytes, err := c.decodeByteSlice(src) - if err != nil { - return maybe.Nothing[[]byte](), err + boolByte := r.b[0] + if boolByte > trueByte { + return false, errInvalidBool } - return maybe.Some(rawBytes), nil + r.b = r.b[boolLen:] + return boolByte == trueByte, nil } -func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { - if minByteSliceLen > src.Len() { - return nil, io.ErrUnexpectedEOF +func (r *codecReader) Uvarint() (uint64, error) { + length, bytesRead := binary.Uvarint(r.b) + if bytesRead <= 0 { + return 0, io.ErrUnexpectedEOF } - length, err := c.decodeUint(src) - switch { - case err == io.EOF: - return nil, io.ErrUnexpectedEOF - case err != nil: - return nil, err - case length == 0: - return nil, nil - case length > uint64(src.Len()): - return nil, io.ErrUnexpectedEOF + // To ensure decoding is canonical, we check for leading zeroes in the + // varint. + // The last byte of the varint includes the most significant bits. + // If the last byte is 0, then the number should have been encoded more + // efficiently by removing this leading zero. + if bytesRead > 1 && r.b[bytesRead-1] == 0x00 { + return 0, errLeadingZeroes } - result := make([]byte, length) - _, err = io.ReadFull(src, result) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return result, err + r.b = r.b[bytesRead:] + return length, nil } -func (c *codecImpl) encodeByteSlice(dst *bytes.Buffer, value []byte) { - c.encodeUint(dst, uint64(len(value))) - if value != nil { - _, _ = dst.Write(value) +func (r *codecReader) ID() (ids.ID, error) { + if len(r.b) < ids.IDLen { + return ids.Empty, io.ErrUnexpectedEOF } + id := ids.ID(r.b[:ids.IDLen]) + + r.b = r.b[ids.IDLen:] + return id, nil } -func (*codecImpl) decodeID(src *bytes.Reader) (ids.ID, error) { - if ids.IDLen > src.Len() { - return ids.ID{}, io.ErrUnexpectedEOF +func (r *codecReader) Bytes() ([]byte, error) { + length, err := r.Uvarint() + if err != nil { + return nil, err } - var id ids.ID - _, err := io.ReadFull(src, id[:]) - if err == io.EOF { - err = io.ErrUnexpectedEOF + if length > uint64(len(r.b)) { + return nil, io.ErrUnexpectedEOF + } + result := r.b[:length] + if r.copy { + result = bytes.Clone(result) } - return id, err -} - -func (c *codecImpl) encodeKey(key Key) []byte { - estimatedLen := binary.MaxVarintLen64 + len(key.Bytes()) - dst := bytes.NewBuffer(make([]byte, 0, estimatedLen)) - c.encodeKeyToBuffer(dst, key) - return dst.Bytes() -} -func (c *codecImpl) encodeKeyToBuffer(dst *bytes.Buffer, key Key) { - c.encodeUint(dst, uint64(key.length)) - _, _ = dst.Write(key.Bytes()) + r.b = r.b[length:] + return result, nil } -func (c *codecImpl) decodeKey(b []byte) (Key, error) { - src := bytes.NewReader(b) - key, err := c.decodeKeyFromReader(src) - if err != nil { - return Key{}, err - } - if src.Len() != 0 { - return Key{}, errExtraSpace +func (r *codecReader) MaybeBytes() (maybe.Maybe[[]byte], error) { + if hasValue, err := r.Bool(); err != nil || !hasValue { + return maybe.Nothing[[]byte](), err } - return key, err -} -func (c *codecImpl) decodeKeyFromReader(src *bytes.Reader) (Key, error) { - if minKeyLen > src.Len() { - return Key{}, io.ErrUnexpectedEOF - } + bytes, err := r.Bytes() + return maybe.Some(bytes), err +} - length, err := c.decodeUint(src) +func (r *codecReader) Key() (Key, error) { + bitLen, err := r.Uvarint() if err != nil { return Key{}, err } - if length > math.MaxInt { + if bitLen > math.MaxInt { return Key{}, errIntOverflow } + result := Key{ - length: int(length), + length: int(bitLen), } - keyBytesLen := bytesNeeded(result.length) - if keyBytesLen > src.Len() { + byteLen := bytesNeeded(result.length) + if byteLen > len(r.b) { return Key{}, io.ErrUnexpectedEOF } - buffer := make([]byte, keyBytesLen) - if _, err := io.ReadFull(src, buffer); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return Key{}, err - } if result.hasPartialByte() { // Confirm that the padding bits in the partial byte are 0. - // We want to only look at the bits to the right of the last token, which is at index length-1. + // We want to only look at the bits to the right of the last token, + // which is at index length-1. // Generate a mask where the (result.length % 8) left bits are 0. paddingMask := byte(0xFF >> (result.length % 8)) - if buffer[keyBytesLen-1]&paddingMask != 0 { + if r.b[byteLen-1]&paddingMask != 0 { return Key{}, errNonZeroKeyPadding } } - result.value = string(buffer) + result.value = string(r.b[:byteLen]) + + r.b = r.b[byteLen:] return result, nil } diff --git a/x/merkledb/codec_test.go b/x/merkledb/codec_test.go index 455b75e1bed1..5bef4f14910c 100644 --- a/x/merkledb/codec_test.go +++ b/x/merkledb/codec_test.go @@ -4,11 +4,11 @@ package merkledb import ( - "bytes" "encoding/binary" "io" "math" "math/rand" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -17,6 +17,374 @@ import ( "github.com/ava-labs/avalanchego/utils/maybe" ) +var ( + encodeDBNodeTests = []struct { + name string + n *dbNode + expectedBytes []byte + }{ + { + name: "empty node", + n: &dbNode{ + children: make(map[byte]*child), + }, + expectedBytes: []byte{ + 0x00, // value.HasValue() + 0x00, // len(children) + }, + }, + { + name: "has value", + n: &dbNode{ + value: maybe.Some([]byte("value")), + children: make(map[byte]*child), + }, + expectedBytes: []byte{ + 0x01, // value.HasValue() + 0x05, // len(value.Value()) + 'v', 'a', 'l', 'u', 'e', // value.Value() + 0x00, // len(children) + }, + }, + { + name: "1 child", + n: &dbNode{ + value: maybe.Some([]byte("value")), + children: map[byte]*child{ + 0: { + compressedKey: ToKey([]byte{0}), + id: ids.ID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + hasValue: true, + }, + }, + }, + expectedBytes: []byte{ + 0x01, // value.HasValue() + 0x05, // len(value.Value()) + 'v', 'a', 'l', 'u', 'e', // value.Value() + 0x01, // len(children) + 0x00, // children[0].index + 0x08, // len(children[0].compressedKey) + 0x00, // children[0].compressedKey + // children[0].id + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x01, // children[0].hasValue + }, + }, + { + name: "2 children", + n: &dbNode{ + value: maybe.Some([]byte("value")), + children: map[byte]*child{ + 0: { + compressedKey: ToKey([]byte{0}), + id: ids.ID{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + hasValue: true, + }, + 1: { + compressedKey: ToKey([]byte{1, 2, 3}), + id: ids.ID{ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + }, + hasValue: false, + }, + }, + }, + expectedBytes: []byte{ + 0x01, // value.HasValue() + 0x05, // len(value.Value()) + 'v', 'a', 'l', 'u', 'e', // value.Value() + 0x02, // len(children) + 0x00, // children[0].index + 0x08, // len(children[0].compressedKey) + 0x00, // children[0].compressedKey + // children[0].id + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x01, // children[0].hasValue + 0x01, // children[1].index + 0x18, // len(children[1].compressedKey) + 0x01, 0x02, 0x03, // children[1].compressedKey + // children[1].id + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x00, // children[1].hasValue + }, + }, + { + name: "16 children", + n: func() *dbNode { + n := &dbNode{ + value: maybe.Some([]byte("value")), + children: make(map[byte]*child), + } + for i := byte(0); i < 16; i++ { + n.children[i] = &child{ + compressedKey: ToKey([]byte{i}), + id: ids.ID{ + 0x00 + i, 0x01 + i, 0x02 + i, 0x03 + i, + 0x04 + i, 0x05 + i, 0x06 + i, 0x07 + i, + 0x08 + i, 0x09 + i, 0x0a + i, 0x0b + i, + 0x0c + i, 0x0d + i, 0x0e + i, 0x0f + i, + 0x10 + i, 0x11 + i, 0x12 + i, 0x13 + i, + 0x14 + i, 0x15 + i, 0x16 + i, 0x17 + i, + 0x18 + i, 0x19 + i, 0x1a + i, 0x1b + i, + 0x1c + i, 0x1d + i, 0x1e + i, 0x1f + i, + }, + hasValue: i%2 == 0, + } + } + return n + }(), + expectedBytes: []byte{ + 0x01, // value.HasValue() + 0x05, // len(value.Value()) + 'v', 'a', 'l', 'u', 'e', // value.Value() + 0x10, // len(children) + 0x00, // children[0].index + 0x08, // len(children[0].compressedKey) + 0x00, // children[0].compressedKey + // children[0].id + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x01, // children[0].hasValue + 0x01, // children[1].index + 0x08, // len(children[1].compressedKey) + 0x01, // children[1].compressedKey + // children[1].id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x00, // children[1].hasValue + 0x02, // children[2].index + 0x08, // len(children[2].compressedKey) + 0x02, // children[2].compressedKey + // children[2].id + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, + 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, + 0x01, // children[2].hasValue + 0x03, // children[3].index + 0x08, // len(children[3].compressedKey) + 0x03, // children[3].compressedKey + // children[3].id + 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, + 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, + 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, + 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, + 0x00, // children[3].hasValue + 0x04, // children[4].index + 0x08, // len(children[4].compressedKey) + 0x04, // children[4].compressedKey + // children[4].id + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, + 0x01, // children[4].hasValue + 0x05, // children[5].index + 0x08, // len(children[5].compressedKey) + 0x05, // children[5].compressedKey + // children[5].id + 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, + 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, + 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, + 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, + 0x00, // children[5].hasValue + 0x06, // children[6].index + 0x08, // len(children[6].compressedKey) + 0x06, // children[6].compressedKey + // children[6].id + 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, + 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, + 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, + 0x01, // children[6].hasValue + 0x07, // children[7].index + 0x08, // len(children[7].compressedKey) + 0x07, // children[7].compressedKey + // children[7].id + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, + 0x00, // children[7].hasValue + 0x08, // children[8].index + 0x08, // len(children[8].compressedKey) + 0x08, // children[8].compressedKey + // children[8].id + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x01, // children[8].hasValue + 0x09, // children[9].index + 0x08, // len(children[9].compressedKey) + 0x09, // children[9].compressedKey + // children[9].id + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x00, // children[9].hasValue + 0x0a, // children[10].index + 0x08, // len(children[10].compressedKey) + 0x0a, // children[10].compressedKey + // children[10].id + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, + 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, + 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, + 0x01, // children[10].hasValue + 0x0b, // children[11].index + 0x08, // len(children[11].compressedKey) + 0x0b, // children[11].compressedKey + // children[11].id + 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, + 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, + 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, + 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, + 0x00, // children[11].hasValue + 0x0c, // children[12].index + 0x08, // len(children[12].compressedKey) + 0x0c, // children[12].compressedKey + // children[12].id + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, + 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, + 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, + 0x01, // children[12].hasValue + 0x0d, // children[13].index + 0x08, // len(children[13].compressedKey) + 0x0d, // children[13].compressedKey + // children[13].id + 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, + 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, + 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, + 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, + 0x00, // children[13].hasValue + 0x0e, // children[14].index + 0x08, // len(children[14].compressedKey) + 0x0e, // children[14].compressedKey + // children[14].id + 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, + 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, + 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, + 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, + 0x01, // children[14].hasValue + 0x0f, // children[15].index + 0x08, // len(children[15].compressedKey) + 0x0f, // children[15].compressedKey + // children[15].id + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, + 0x00, // children[15].hasValue + }, + }, + } + encodeKeyTests = []struct { + name string + key Key + expectedBytes []byte + }{ + { + name: "empty", + key: ToKey([]byte{}), + expectedBytes: []byte{ + 0x00, // length + }, + }, + { + name: "1 byte", + key: ToKey([]byte{0}), + expectedBytes: []byte{ + 0x08, // length + 0x00, // key + }, + }, + { + name: "2 bytes", + key: ToKey([]byte{0, 1}), + expectedBytes: []byte{ + 0x10, // length + 0x00, 0x01, // key + }, + }, + { + name: "4 bytes", + key: ToKey([]byte{0, 1, 2, 3}), + expectedBytes: []byte{ + 0x20, // length + 0x00, 0x01, 0x02, 0x03, // key + }, + }, + { + name: "8 bytes", + key: ToKey([]byte{0, 1, 2, 3, 4, 5, 6, 7}), + expectedBytes: []byte{ + 0x40, // length + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, // key + }, + }, + { + name: "32 bytes", + key: ToKey(make([]byte, 32)), + expectedBytes: append( + []byte{ + 0x80, 0x02, // length + }, + make([]byte, 32)..., // key + ), + }, + { + name: "64 bytes", + key: ToKey(make([]byte, 64)), + expectedBytes: append( + []byte{ + 0x80, 0x04, // length + }, + make([]byte, 64)..., // key + ), + }, + { + name: "1024 bytes", + key: ToKey(make([]byte, 1024)), + expectedBytes: append( + []byte{ + 0x80, 0x40, // length + }, + make([]byte, 1024)..., // key + ), + }, + } +) + func FuzzCodecBool(f *testing.F) { f.Fuzz( func( @@ -25,22 +393,22 @@ func FuzzCodecBool(f *testing.F) { ) { require := require.New(t) - codec := codec.(*codecImpl) - reader := bytes.NewReader(b) - startLen := reader.Len() - got, err := codec.decodeBool(reader) + r := codecReader{ + b: b, + } + startLen := len(r.b) + got, err := r.Bool() if err != nil { t.SkipNow() } - endLen := reader.Len() + endLen := len(r.b) numRead := startLen - endLen // Encoding [got] should be the same as [b]. - var buf bytes.Buffer - codec.encodeBool(&buf, got) - bufBytes := buf.Bytes() - require.Len(bufBytes, numRead) - require.Equal(b[:numRead], bufBytes) + w := codecWriter{} + w.Bool(got) + require.Len(w.b, numRead) + require.Equal(b[:numRead], w.b) }, ) } @@ -53,22 +421,22 @@ func FuzzCodecInt(f *testing.F) { ) { require := require.New(t) - codec := codec.(*codecImpl) - reader := bytes.NewReader(b) - startLen := reader.Len() - got, err := codec.decodeUint(reader) + c := codecReader{ + b: b, + } + startLen := len(c.b) + got, err := c.Uvarint() if err != nil { t.SkipNow() } - endLen := reader.Len() + endLen := len(c.b) numRead := startLen - endLen // Encoding [got] should be the same as [b]. - var buf bytes.Buffer - codec.encodeUint(&buf, got) - bufBytes := buf.Bytes() - require.Len(bufBytes, numRead) - require.Equal(b[:numRead], bufBytes) + w := codecWriter{} + w.Uvarint(got) + require.Len(w.b, numRead) + require.Equal(b[:numRead], w.b) }, ) } @@ -80,14 +448,13 @@ func FuzzCodecKey(f *testing.F) { b []byte, ) { require := require.New(t) - codec := codec.(*codecImpl) - got, err := codec.decodeKey(b) + got, err := decodeKey(b) if err != nil { t.SkipNow() } // Encoding [got] should be the same as [b]. - gotBytes := codec.encodeKey(got) + gotBytes := encodeKey(got) require.Equal(b, gotBytes) }, ) @@ -100,14 +467,13 @@ func FuzzCodecDBNodeCanonical(f *testing.F) { b []byte, ) { require := require.New(t) - codec := codec.(*codecImpl) node := &dbNode{} - if err := codec.decodeDBNode(b, node); err != nil { + if err := decodeDBNode(b, node); err != nil { t.SkipNow() } // Encoding [node] should be the same as [b]. - buf := codec.encodeDBNode(node) + buf := encodeDBNode(node) require.Equal(b, buf) }, ) @@ -127,13 +493,6 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { value := maybe.Nothing[[]byte]() if hasValue { - if len(valueBytes) == 0 { - // We do this because when we encode a value of []byte{} - // we will later decode it as nil. - // Doing this prevents inconsistency when comparing the - // encoded and decoded values below. - valueBytes = nil - } value = maybe.Some(valueBytes) } @@ -157,14 +516,19 @@ func FuzzCodecDBNodeDeterministic(f *testing.F) { children: children, } - nodeBytes := codec.encodeDBNode(&node) - require.Len(nodeBytes, codec.encodedDBNodeSize(&node)) + nodeBytes := encodeDBNode(&node) + require.Len(nodeBytes, encodedDBNodeSize(&node)) var gotNode dbNode - require.NoError(codec.decodeDBNode(nodeBytes, &gotNode)) + require.NoError(decodeDBNode(nodeBytes, &gotNode)) require.Equal(node, gotNode) - nodeBytes2 := codec.encodeDBNode(&gotNode) + nodeBytes2 := encodeDBNode(&gotNode) require.Equal(nodeBytes, nodeBytes2) + + // Enforce that modifying bytes after decodeDBNode doesn't + // modify the populated struct. + clear(nodeBytes) + require.Equal(node, gotNode) } }, ) @@ -175,94 +539,133 @@ func TestCodecDecodeDBNode_TooShort(t *testing.T) { var ( parsedDBNode dbNode - tooShortBytes = make([]byte, minDBNodeLen-1) + tooShortBytes = make([]byte, 1) ) - err := codec.decodeDBNode(tooShortBytes, &parsedDBNode) + err := decodeDBNode(tooShortBytes, &parsedDBNode) require.ErrorIs(err, io.ErrUnexpectedEOF) } -// Ensure that encodeHashValues is deterministic -func FuzzEncodeHashValues(f *testing.F) { - codec1 := newCodec() - codec2 := newCodec() +func TestEncodeDBNode(t *testing.T) { + for _, test := range encodeDBNodeTests { + t.Run(test.name, func(t *testing.T) { + bytes := encodeDBNode(test.n) + require.Equal(t, test.expectedBytes, bytes) + }) + } +} - f.Fuzz( - func( - t *testing.T, - randSeed int, - ) { +func TestDecodeDBNode(t *testing.T) { + for _, test := range encodeDBNodeTests { + t.Run(test.name, func(t *testing.T) { require := require.New(t) - for _, bf := range validBranchFactors { // Create a random node - r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 - - children := map[byte]*child{} - numChildren := r.Intn(int(bf)) // #nosec G404 - for i := 0; i < numChildren; i++ { - compressedKeyLen := r.Intn(32) // #nosec G404 - compressedKeyBytes := make([]byte, compressedKeyLen) - _, _ = r.Read(compressedKeyBytes) // #nosec G404 - children[byte(i)] = &child{ - compressedKey: ToKey(compressedKeyBytes), - id: ids.GenerateTestID(), - hasValue: r.Intn(2) == 1, // #nosec G404 - } - } - - hasValue := r.Intn(2) == 1 // #nosec G404 - value := maybe.Nothing[[]byte]() - if hasValue { - valueBytes := make([]byte, r.Intn(64)) // #nosec G404 - _, _ = r.Read(valueBytes) // #nosec G404 - value = maybe.Some(valueBytes) - } - - key := make([]byte, r.Intn(32)) // #nosec G404 - _, _ = r.Read(key) // #nosec G404 + var n dbNode + require.NoError(decodeDBNode(test.expectedBytes, &n)) + require.Equal(test.n, &n) + }) + } +} - hv := &node{ - key: ToKey(key), - dbNode: dbNode{ - children: children, - value: value, - }, - } +func TestEncodeKey(t *testing.T) { + for _, test := range encodeKeyTests { + t.Run(test.name, func(t *testing.T) { + bytes := encodeKey(test.key) + require.Equal(t, test.expectedBytes, bytes) + }) + } +} - // Serialize hv with both codecs - hvBytes1 := codec1.encodeHashValues(hv) - hvBytes2 := codec2.encodeHashValues(hv) +func TestDecodeKey(t *testing.T) { + for _, test := range encodeKeyTests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) - // Make sure they're the same - require.Equal(hvBytes1, hvBytes2) - } - }, - ) + key, err := decodeKey(test.expectedBytes) + require.NoError(err) + require.Equal(test.key, key) + }) + } } func TestCodecDecodeKeyLengthOverflowRegression(t *testing.T) { - codec := codec.(*codecImpl) - _, err := codec.decodeKey(binary.AppendUvarint(nil, math.MaxInt)) + _, err := decodeKey(binary.AppendUvarint(nil, math.MaxInt)) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } func TestUintSize(t *testing.T) { - c := codec.(*codecImpl) - // Test lower bound - expectedSize := c.uintSize(0) + expectedSize := uintSize(0) actualSize := binary.PutUvarint(make([]byte, binary.MaxVarintLen64), 0) require.Equal(t, expectedSize, actualSize) // Test upper bound - expectedSize = c.uintSize(math.MaxUint64) + expectedSize = uintSize(math.MaxUint64) actualSize = binary.PutUvarint(make([]byte, binary.MaxVarintLen64), math.MaxUint64) require.Equal(t, expectedSize, actualSize) // Test powers of 2 for power := 0; power < 64; power++ { n := uint64(1) << uint(power) - expectedSize := c.uintSize(n) + expectedSize := uintSize(n) actualSize := binary.PutUvarint(make([]byte, binary.MaxVarintLen64), n) require.Equal(t, expectedSize, actualSize, power) } } + +func Benchmark_EncodeDBNode(b *testing.B) { + for _, benchmark := range encodeDBNodeTests { + b.Run(benchmark.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + encodeDBNode(benchmark.n) + } + }) + } +} + +func Benchmark_DecodeDBNode(b *testing.B) { + for _, benchmark := range encodeDBNodeTests { + b.Run(benchmark.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + var n dbNode + err := decodeDBNode(benchmark.expectedBytes, &n) + require.NoError(b, err) + } + }) + } +} + +func Benchmark_EncodeKey(b *testing.B) { + for _, benchmark := range encodeKeyTests { + b.Run(benchmark.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + encodeKey(benchmark.key) + } + }) + } +} + +func Benchmark_DecodeKey(b *testing.B) { + for _, benchmark := range encodeKeyTests { + b.Run(benchmark.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := decodeKey(benchmark.expectedBytes) + require.NoError(b, err) + } + }) + } +} + +func Benchmark_EncodeUint(b *testing.B) { + w := codecWriter{ + b: make([]byte, 0, binary.MaxVarintLen64), + } + + for _, v := range []uint64{0, 1, 2, 32, 1024, 32768} { + b.Run(strconv.FormatUint(v, 10), func(b *testing.B) { + for i := 0; i < b.N; i++ { + w.Uvarint(v) + w.b = w.b[:0] + } + }) + } +} diff --git a/x/merkledb/db.go b/x/merkledb/db.go index cc81a1bed6ee..3096cbabd262 100644 --- a/x/merkledb/db.go +++ b/x/merkledb/db.go @@ -15,7 +15,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/attribute" "golang.org/x/exp/maps" - "golang.org/x/sync/semaphore" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" @@ -41,12 +40,23 @@ const ( var ( _ MerkleDB = (*merkleDB)(nil) - codec = newCodec() - metadataPrefix = []byte{0} valueNodePrefix = []byte{1} intermediateNodePrefix = []byte{2} + // cleanShutdownKey is used to flag that the database did (or did not) + // previously shutdown correctly. + // + // If this key has value [hadCleanShutdown] it must be true that all + // intermediate nodes of the trie are correctly populated on disk and that + // the [rootDBKey] has the correct key for the root node. + // + // If this key has value [didNotHaveCleanShutdown] the intermediate nodes of + // the trie may not be correct and the [rootDBKey] may not exist or point to + // a node that node longer exists. + // + // Regardless of the value of [cleanShutdownKey], the value nodes must + // always be persisted correctly. cleanShutdownKey = []byte(string(metadataPrefix) + "cleanShutdown") rootDBKey = []byte(string(metadataPrefix) + "root") hadCleanShutdown = []byte{1} @@ -153,6 +163,11 @@ type Config struct { // BranchFactor determines the number of children each node can have. BranchFactor BranchFactor + // Hasher defines the hash function to use when hashing the trie. + // + // If not specified, [DefaultHasher] will be used. + Hasher Hasher + // RootGenConcurrency is the number of goroutines to use when // generating a new state root. // @@ -204,7 +219,7 @@ type merkleDB struct { // True iff the db has been closed. closed bool - metrics merkleMetrics + metrics metrics debugTracer trace.Tracer infoTracer trace.Tracer @@ -218,16 +233,19 @@ type merkleDB struct { // Valid children of this trie. childViews []*view - // calculateNodeIDsSema controls the number of goroutines inside - // [calculateNodeIDsHelper] at any given time. - calculateNodeIDsSema *semaphore.Weighted + // hashNodesKeyPool controls the number of goroutines that are created + // inside [hashChangedNode] at any given time and provides slices for the + // keys needed while hashing. + hashNodesKeyPool *bytesPool tokenSize int + + hasher Hasher } // New returns a new merkle database. func New(ctx context.Context, db database.Database, config Config) (MerkleDB, error) { - metrics, err := newMetrics("merkleDB", config.Reg) + metrics, err := newMetrics("merkledb", config.Reg) if err != nil { return nil, err } @@ -238,25 +256,26 @@ func newDatabase( ctx context.Context, db database.Database, config Config, - metrics merkleMetrics, + metrics metrics, ) (*merkleDB, error) { if err := config.BranchFactor.Valid(); err != nil { return nil, err } - rootGenConcurrency := uint(runtime.NumCPU()) - if config.RootGenConcurrency != 0 { - rootGenConcurrency = config.RootGenConcurrency + hasher := config.Hasher + if hasher == nil { + hasher = DefaultHasher } - // Share a sync.Pool of []byte between the intermediateNodeDB and valueNodeDB - // to reduce memory allocations. - bufferPool := &sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufferLength) - }, + rootGenConcurrency := runtime.NumCPU() + if config.RootGenConcurrency != 0 { + rootGenConcurrency = int(config.RootGenConcurrency) } + // Share a bytes pool between the intermediateNodeDB and valueNodeDB to + // reduce memory allocations. + bufferPool := utils.NewBytesPool() + trieDB := &merkleDB{ metrics: metrics, baseDB: db, @@ -267,22 +286,44 @@ func newDatabase( int(config.IntermediateNodeCacheSize), int(config.IntermediateWriteBufferSize), int(config.IntermediateWriteBatchSize), - BranchFactorToTokenSize[config.BranchFactor]), - valueNodeDB: newValueNodeDB(db, + BranchFactorToTokenSize[config.BranchFactor], + hasher, + ), + valueNodeDB: newValueNodeDB( + db, bufferPool, metrics, - int(config.ValueNodeCacheSize)), - history: newTrieHistory(int(config.HistoryLength)), - debugTracer: getTracerIfEnabled(config.TraceLevel, DebugTrace, config.Tracer), - infoTracer: getTracerIfEnabled(config.TraceLevel, InfoTrace, config.Tracer), - childViews: make([]*view, 0, defaultPreallocationSize), - calculateNodeIDsSema: semaphore.NewWeighted(int64(rootGenConcurrency)), - tokenSize: BranchFactorToTokenSize[config.BranchFactor], + int(config.ValueNodeCacheSize), + hasher, + ), + history: newTrieHistory(int(config.HistoryLength)), + debugTracer: getTracerIfEnabled(config.TraceLevel, DebugTrace, config.Tracer), + infoTracer: getTracerIfEnabled(config.TraceLevel, InfoTrace, config.Tracer), + childViews: make([]*view, 0, defaultPreallocationSize), + hashNodesKeyPool: newBytesPool(rootGenConcurrency), + tokenSize: BranchFactorToTokenSize[config.BranchFactor], + hasher: hasher, } - if err := trieDB.initializeRoot(); err != nil { + shutdownType, err := trieDB.baseDB.Get(cleanShutdownKey) + switch err { + case nil: + case database.ErrNotFound: + // If the marker wasn't found then the DB is being created for the first + // time and there is nothing to do. + shutdownType = hadCleanShutdown + default: return nil, err } + if bytes.Equal(shutdownType, didNotHaveCleanShutdown) { + if err := trieDB.rebuild(ctx, int(config.ValueNodeCacheSize)); err != nil { + return nil, err + } + } else { + if err := trieDB.initializeRoot(); err != nil { + return nil, err + } + } // add current root to history (has no changes) trieDB.history.record(&changeSummary{ @@ -294,21 +335,6 @@ func newDatabase( nodes: map[Key]*change[*node]{}, }) - shutdownType, err := trieDB.baseDB.Get(cleanShutdownKey) - switch err { - case nil: - if bytes.Equal(shutdownType, didNotHaveCleanShutdown) { - if err := trieDB.rebuild(ctx, int(config.ValueNodeCacheSize)); err != nil { - return nil, err - } - } - case database.ErrNotFound: - // If the marker wasn't found then the DB is being created for the first - // time and there is nothing to do. - default: - return nil, err - } - // mark that the db has not yet been cleanly closed err = trieDB.baseDB.Put(cleanShutdownKey, didNotHaveCleanShutdown) return trieDB, err @@ -462,8 +488,26 @@ func (db *merkleDB) Close() error { return err } - // Successfully wrote intermediate nodes. - return db.baseDB.Put(cleanShutdownKey, hadCleanShutdown) + var ( + batch = db.baseDB.NewBatch() + err error + ) + // Write the root key + if db.root.IsNothing() { + err = batch.Delete(rootDBKey) + } else { + rootKey := encodeKey(db.root.Value().key) + err = batch.Put(rootDBKey, rootKey) + } + if err != nil { + return err + } + + // Write the clean shutdown marker + if err := batch.Put(cleanShutdownKey, hadCleanShutdown); err != nil { + return err + } + return batch.Write() } func (db *merkleDB) PrefetchPaths(keys [][]byte) error { @@ -779,7 +823,7 @@ func (db *merkleDB) NewView( return nil, database.ErrClosed } - newView, err := newView(db, db, changes) + view, err := newView(db, db, changes) if err != nil { return nil, err } @@ -788,8 +832,8 @@ func (db *merkleDB) NewView( db.lock.Lock() defer db.lock.Unlock() - db.childViews = append(db.childViews, newView) - return newView, nil + db.childViews = append(db.childViews, view) + return view, nil } func (db *merkleDB) Has(k []byte) (bool, error) { @@ -902,10 +946,10 @@ func (db *merkleDB) commitBatch(ops []database.BatchOp) error { return view.commitToDB(context.Background()) } -// commitChanges commits the changes in [trieToCommit] to [db]. +// commitView commits the changes in [trieToCommit] to [db]. // Assumes [trieToCommit]'s node IDs have been calculated. // Assumes [db.commitLock] is held. -func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *view) error { +func (db *merkleDB) commitView(ctx context.Context, trieToCommit *view) error { db.lock.Lock() defer db.lock.Unlock() @@ -923,7 +967,7 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *view) error } changes := trieToCommit.changes - _, span := db.infoTracer.Start(ctx, "MerkleDB.commitChanges", oteltrace.WithAttributes( + _, span := db.infoTracer.Start(ctx, "MerkleDB.commitView", oteltrace.WithAttributes( attribute.Int("nodesChanged", len(changes.nodes)), attribute.Int("valuesChanged", len(changes.values)), )) @@ -939,8 +983,46 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *view) error return nil } - currentValueNodeBatch := db.valueNodeDB.NewBatch() - _, nodesSpan := db.infoTracer.Start(ctx, "MerkleDB.commitChanges.writeNodes") + valueNodeBatch := db.baseDB.NewBatch() + if err := db.applyChanges(ctx, valueNodeBatch, changes); err != nil { + return err + } + + if err := db.commitValueChanges(ctx, valueNodeBatch); err != nil { + return err + } + + db.history.record(changes) + + // Update root in database. + db.root = changes.rootChange.after + db.rootID = changes.rootID + return nil +} + +// moveChildViewsToDB removes any child views from the trieToCommit and moves +// them to the db. +// +// assumes [db.lock] is held +func (db *merkleDB) moveChildViewsToDB(trieToCommit *view) { + trieToCommit.validityTrackingLock.Lock() + defer trieToCommit.validityTrackingLock.Unlock() + + for _, childView := range trieToCommit.childViews { + childView.updateParent(db) + db.childViews = append(db.childViews, childView) + } + trieToCommit.childViews = make([]*view, 0, defaultPreallocationSize) +} + +// applyChanges takes the [changes] and applies them to [db.intermediateNodeDB] +// and [valueNodeBatch]. +// +// assumes [db.lock] is held +func (db *merkleDB) applyChanges(ctx context.Context, valueNodeBatch database.KeyValueWriterDeleter, changes *changeSummary) error { + _, span := db.infoTracer.Start(ctx, "MerkleDB.applyChanges") + defer span.End() + for key, nodeChange := range changes.nodes { shouldAddIntermediate := nodeChange.after != nil && !nodeChange.after.hasValue() shouldDeleteIntermediate := !shouldAddIntermediate && nodeChange.before != nil && !nodeChange.before.hasValue() @@ -950,56 +1032,34 @@ func (db *merkleDB) commitChanges(ctx context.Context, trieToCommit *view) error if shouldAddIntermediate { if err := db.intermediateNodeDB.Put(key, nodeChange.after); err != nil { - nodesSpan.End() return err } } else if shouldDeleteIntermediate { if err := db.intermediateNodeDB.Delete(key); err != nil { - nodesSpan.End() return err } } if shouldAddValue { - currentValueNodeBatch.Put(key, nodeChange.after) + if err := db.valueNodeDB.Write(valueNodeBatch, key, nodeChange.after); err != nil { + return err + } } else if shouldDeleteValue { - currentValueNodeBatch.Delete(key) + if err := db.valueNodeDB.Write(valueNodeBatch, key, nil); err != nil { + return err + } } } - nodesSpan.End() - - _, commitSpan := db.infoTracer.Start(ctx, "MerkleDB.commitChanges.valueNodeDBCommit") - err := currentValueNodeBatch.Write() - commitSpan.End() - if err != nil { - return err - } - - db.history.record(changes) - - // Update root in database. - db.root = changes.rootChange.after - db.rootID = changes.rootID - - if db.root.IsNothing() { - return db.baseDB.Delete(rootDBKey) - } - - rootKey := codec.encodeKey(db.root.Value().key) - return db.baseDB.Put(rootDBKey, rootKey) + return nil } -// moveChildViewsToDB removes any child views from the trieToCommit and moves them to the db -// assumes [db.lock] is held -func (db *merkleDB) moveChildViewsToDB(trieToCommit *view) { - trieToCommit.validityTrackingLock.Lock() - defer trieToCommit.validityTrackingLock.Unlock() +// commitValueChanges is a thin wrapper around [valueNodeBatch.Write()] to +// provide tracing. +func (db *merkleDB) commitValueChanges(ctx context.Context, valueNodeBatch database.Batch) error { + _, span := db.infoTracer.Start(ctx, "MerkleDB.commitValueChanges") + defer span.End() - for _, childView := range trieToCommit.childViews { - childView.updateParent(db) - db.childViews = append(db.childViews, childView) - } - trieToCommit.childViews = make([]*view, 0, defaultPreallocationSize) + return valueNodeBatch.Write() } // CommitToDB is a no-op for db since it is already in sync with itself. @@ -1168,23 +1228,21 @@ func (db *merkleDB) invalidateChildrenExcept(exception *view) { // Otherwise leave [db.root] as Nothing. func (db *merkleDB) initializeRoot() error { rootKeyBytes, err := db.baseDB.Get(rootDBKey) + if errors.Is(err, database.ErrNotFound) { + return nil // Root isn't on disk. + } if err != nil { - if !errors.Is(err, database.ErrNotFound) { - return err - } - // Root isn't on disk. - return nil + return err } // Root is on disk. - rootKey, err := codec.decodeKey(rootKeyBytes) + rootKey, err := decodeKey(rootKeyBytes) if err != nil { return err } // First, see if root is an intermediate node. - var root *node - root, err = db.getEditableNode(rootKey, false /* hasValue */) + root, err := db.getEditableNode(rootKey, false /* hasValue */) if err != nil { if !errors.Is(err, database.ErrNotFound) { return err @@ -1197,7 +1255,9 @@ func (db *merkleDB) initializeRoot() error { } } - db.rootID = root.calculateID(db.metrics) + db.rootID = db.hasher.HashNode(root) + db.metrics.HashCalculated() + db.root = maybe.Some(root) return nil } @@ -1319,37 +1379,21 @@ func (db *merkleDB) getTokenSize() int { } // Returns [key] prefixed by [prefix]. -// The returned []byte is taken from [bufferPool] and -// should be returned to it when the caller is done with it. -func addPrefixToKey(bufferPool *sync.Pool, prefix []byte, key []byte) []byte { +// The returned *[]byte is taken from [bufferPool] and should be returned to it +// when the caller is done with it. +func addPrefixToKey(bufferPool *utils.BytesPool, prefix []byte, key []byte) *[]byte { prefixLen := len(prefix) keyLen := prefixLen + len(key) - prefixedKey := getBufferFromPool(bufferPool, keyLen) - copy(prefixedKey, prefix) - copy(prefixedKey[prefixLen:], key) + prefixedKey := bufferPool.Get(keyLen) + copy(*prefixedKey, prefix) + copy((*prefixedKey)[prefixLen:], key) return prefixedKey } -// Returns a []byte from [bufferPool] with length exactly [size]. -// The []byte is not guaranteed to be zeroed. -func getBufferFromPool(bufferPool *sync.Pool, size int) []byte { - buffer := bufferPool.Get().([]byte) - if cap(buffer) >= size { - // The [] byte we got from the pool is big enough to hold the prefixed key - buffer = buffer[:size] - } else { - // The []byte from the pool wasn't big enough. - // Put it back and allocate a new, bigger one - bufferPool.Put(buffer) - buffer = make([]byte, size) - } - return buffer -} - // cacheEntrySize returns a rough approximation of the memory consumed by storing the key and node. func cacheEntrySize(key Key, n *node) int { if n == nil { return cacheEntryOverHead + len(key.Bytes()) } - return cacheEntryOverHead + len(key.Bytes()) + codec.encodedDBNodeSize(&n.dbNode) + return cacheEntryOverHead + len(key.Bytes()) + encodedDBNodeSize(&n.dbNode) } diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index ed8dc568b40f..f18e5053854b 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -6,6 +6,7 @@ package merkledb import ( "bytes" "context" + "encoding/binary" "fmt" "math/rand" "slices" @@ -39,14 +40,17 @@ func newDB(ctx context.Context, db database.Database, config Config) (*merkleDB, func newDefaultConfig() Config { return Config{ - IntermediateWriteBatchSize: 10, + BranchFactor: BranchFactor16, + Hasher: DefaultHasher, + RootGenConcurrency: 0, HistoryLength: defaultHistoryLength, ValueNodeCacheSize: units.MiB, IntermediateNodeCacheSize: units.MiB, IntermediateWriteBufferSize: units.KiB, + IntermediateWriteBatchSize: 256 * units.KiB, Reg: prometheus.NewRegistry(), + TraceLevel: InfoTrace, Tracer: trace.Noop, - BranchFactor: BranchFactor16, } } @@ -961,6 +965,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, token end, root, tokenSize, + db.hasher, )) case opGenerateChangeProof: root, err := db.GetMerkleRoot(context.Background()) @@ -1061,11 +1066,11 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest, token }) } - newView, err := newDB.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view, err := newDB.NewView(context.Background(), ViewChanges{BatchOps: ops}) require.NoError(err) // Check that the root of the view is the same as the root of [db] - newRoot, err := newView.GetMerkleRoot(context.Background()) + newRoot, err := view.GetMerkleRoot(context.Background()) require.NoError(err) dbRoot, err := db.GetMerkleRoot(context.Background()) @@ -1287,3 +1292,115 @@ func TestGetChangeProofEmptyRootID(t *testing.T) { ) require.ErrorIs(err, ErrEmptyProof) } + +func TestCrashRecovery(t *testing.T) { + require := require.New(t) + + baseDB := memdb.New() + merkleDB, err := newDatabase( + context.Background(), + baseDB, + newDefaultConfig(), + &mockMetrics{}, + ) + require.NoError(err) + + merkleDBBatch := merkleDB.NewBatch() + require.NoError(merkleDBBatch.Put([]byte("is this"), []byte("hope"))) + require.NoError(merkleDBBatch.Put([]byte("expected?"), []byte("so"))) + require.NoError(merkleDBBatch.Write()) + + expectedRoot, err := merkleDB.GetMerkleRoot(context.Background()) + require.NoError(err) + + // Do not `.Close()` the database to simulate a process crash. + + newMerkleDB, err := newDatabase( + context.Background(), + baseDB, + newDefaultConfig(), + &mockMetrics{}, + ) + require.NoError(err) + + value, err := newMerkleDB.Get([]byte("is this")) + require.NoError(err) + require.Equal([]byte("hope"), value) + + value, err = newMerkleDB.Get([]byte("expected?")) + require.NoError(err) + require.Equal([]byte("so"), value) + + rootAfterRecovery, err := newMerkleDB.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(expectedRoot, rootAfterRecovery) +} + +func BenchmarkCommitView(b *testing.B) { + db, err := getBasicDB() + require.NoError(b, err) + + ops := make([]database.BatchOp, 1_000) + for i := range ops { + k := binary.AppendUvarint(nil, uint64(i)) + ops[i] = database.BatchOp{ + Key: k, + Value: hashing.ComputeHash256(k), + } + } + + ctx := context.Background() + viewIntf, err := db.NewView(ctx, ViewChanges{BatchOps: ops}) + require.NoError(b, err) + + view := viewIntf.(*view) + require.NoError(b, view.applyValueChanges(ctx)) + + b.Run("apply and commit changes", func(b *testing.B) { + require := require.New(b) + + for i := 0; i < b.N; i++ { + db.baseDB = memdb.New() // Keep each iteration independent + + valueNodeBatch := db.baseDB.NewBatch() + require.NoError(db.applyChanges(ctx, valueNodeBatch, view.changes)) + require.NoError(db.commitValueChanges(ctx, valueNodeBatch)) + } + }) +} + +func BenchmarkIteration(b *testing.B) { + db, err := getBasicDB() + require.NoError(b, err) + + ops := make([]database.BatchOp, 1_000) + for i := range ops { + k := binary.AppendUvarint(nil, uint64(i)) + ops[i] = database.BatchOp{ + Key: k, + Value: hashing.ComputeHash256(k), + } + } + + ctx := context.Background() + view, err := db.NewView(ctx, ViewChanges{BatchOps: ops}) + require.NoError(b, err) + + require.NoError(b, view.CommitToDB(ctx)) + + b.Run("create iterator", func(b *testing.B) { + for i := 0; i < b.N; i++ { + it := db.NewIterator() + it.Release() + } + }) + + b.Run("iterate", func(b *testing.B) { + for i := 0; i < b.N; i++ { + it := db.NewIterator() + for it.Next() { + } + it.Release() + } + }) +} diff --git a/x/merkledb/hashing.go b/x/merkledb/hashing.go new file mode 100644 index 000000000000..a2a3fe90da34 --- /dev/null +++ b/x/merkledb/hashing.go @@ -0,0 +1,99 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "crypto/sha256" + "encoding/binary" + "slices" + + "github.com/ava-labs/avalanchego/ids" +) + +// TODO: Support configurable hash lengths +const HashLength = 32 + +var ( + SHA256Hasher Hasher = &sha256Hasher{} + + // If a Hasher isn't specified, this package defaults to using the + // [SHA256Hasher]. + DefaultHasher = SHA256Hasher +) + +type Hasher interface { + // Returns the canonical hash of the non-nil [node]. + HashNode(node *node) ids.ID + // Returns the canonical hash of [value]. + HashValue(value []byte) ids.ID +} + +type sha256Hasher struct{} + +// This method is performance critical. It is not expected to perform any memory +// allocations. +func (*sha256Hasher) HashNode(n *node) ids.ID { + var ( + // sha.Write always returns nil, so we ignore its return values. + sha = sha256.New() + hash ids.ID + // The hash length is larger than the maximum Uvarint length. This + // ensures binary.AppendUvarint doesn't perform any memory allocations. + emptyHashBuffer = hash[:0] + ) + + // By directly calling sha.Write rather than passing sha around as an + // io.Writer, the compiler can perform sufficient escape analysis to avoid + // allocating buffers on the heap. + numChildren := len(n.children) + _, _ = sha.Write(binary.AppendUvarint(emptyHashBuffer, uint64(numChildren))) + + // Avoid allocating keys entirely if the node doesn't have any children. + if numChildren != 0 { + // By allocating BranchFactorLargest rather than [numChildren], this + // slice is allocated on the stack rather than the heap. + // BranchFactorLargest is at least [numChildren] which avoids memory + // allocations. + keys := make([]byte, numChildren, BranchFactorLargest) + i := 0 + for k := range n.children { + keys[i] = k + i++ + } + + // Ensure that the order of entries is correct. + slices.Sort(keys) + for _, index := range keys { + entry := n.children[index] + _, _ = sha.Write(binary.AppendUvarint(emptyHashBuffer, uint64(index))) + _, _ = sha.Write(entry.id[:]) + } + } + + if n.valueDigest.HasValue() { + _, _ = sha.Write(trueBytes) + value := n.valueDigest.Value() + _, _ = sha.Write(binary.AppendUvarint(emptyHashBuffer, uint64(len(value)))) + _, _ = sha.Write(value) + } else { + _, _ = sha.Write(falseBytes) + } + + _, _ = sha.Write(binary.AppendUvarint(emptyHashBuffer, uint64(n.key.length))) + _, _ = sha.Write(n.key.Bytes()) + sha.Sum(emptyHashBuffer) + return hash +} + +// This method is performance critical. It is not expected to perform any memory +// allocations. +func (*sha256Hasher) HashValue(value []byte) ids.ID { + sha := sha256.New() + // sha.Write always returns nil, so we ignore its return values. + _, _ = sha.Write(value) + + var hash ids.ID + sha.Sum(hash[:0]) + return hash +} diff --git a/x/merkledb/hashing_test.go b/x/merkledb/hashing_test.go new file mode 100644 index 000000000000..82f6d11f2e56 --- /dev/null +++ b/x/merkledb/hashing_test.go @@ -0,0 +1,157 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/maybe" +) + +var sha256HashNodeTests = []struct { + name string + n *node + expectedHash string +}{ + { + name: "empty node", + n: newNode(Key{}), + expectedHash: "rbhtxoQ1DqWHvb6w66BZdVyjmPAneZUSwQq9uKj594qvFSdav", + }, + { + name: "has value", + n: func() *node { + n := newNode(Key{}) + n.setValue(SHA256Hasher, maybe.Some([]byte("value1"))) + return n + }(), + expectedHash: "2vx2xueNdWoH2uB4e8hbMU5jirtZkZ1c3ePCWDhXYaFRHpCbnQ", + }, + { + name: "has key", + n: newNode(ToKey([]byte{0, 1, 2, 3, 4, 5, 6, 7})), + expectedHash: "2vA8ggXajhFEcgiF8zHTXgo8T2ALBFgffp1xfn48JEni1Uj5uK", + }, + { + name: "1 child", + n: func() *node { + n := newNode(Key{}) + childNode := newNode(ToKey([]byte{255})) + childNode.setValue(SHA256Hasher, maybe.Some([]byte("value1"))) + n.addChildWithID(childNode, 4, SHA256Hasher.HashNode(childNode)) + return n + }(), + expectedHash: "YfJRufqUKBv9ez6xZx6ogpnfDnw9fDsyebhYDaoaH57D3vRu3", + }, + { + name: "2 children", + n: func() *node { + n := newNode(Key{}) + + childNode1 := newNode(ToKey([]byte{255})) + childNode1.setValue(SHA256Hasher, maybe.Some([]byte("value1"))) + + childNode2 := newNode(ToKey([]byte{237})) + childNode2.setValue(SHA256Hasher, maybe.Some([]byte("value2"))) + + n.addChildWithID(childNode1, 4, SHA256Hasher.HashNode(childNode1)) + n.addChildWithID(childNode2, 4, SHA256Hasher.HashNode(childNode2)) + return n + }(), + expectedHash: "YVmbx5MZtSKuYhzvHnCqGrswQcxmozAkv7xE1vTA2EiGpWUkv", + }, + { + name: "16 children", + n: func() *node { + n := newNode(Key{}) + + for i := byte(0); i < 16; i++ { + childNode := newNode(ToKey([]byte{i << 4})) + childNode.setValue(SHA256Hasher, maybe.Some([]byte("some value"))) + + n.addChildWithID(childNode, 4, SHA256Hasher.HashNode(childNode)) + } + return n + }(), + expectedHash: "5YiFLL7QV3f441See9uWePi3wVKsx9fgvX5VPhU8PRxtLqhwY", + }, +} + +// Ensure that SHA256.HashNode is deterministic +func Fuzz_SHA256_HashNode(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + randSeed int, + ) { + require := require.New(t) + for _, bf := range validBranchFactors { // Create a random node + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + children := map[byte]*child{} + numChildren := r.Intn(int(bf)) // #nosec G404 + for i := 0; i < numChildren; i++ { + compressedKeyLen := r.Intn(32) // #nosec G404 + compressedKeyBytes := make([]byte, compressedKeyLen) + _, _ = r.Read(compressedKeyBytes) // #nosec G404 + + children[byte(i)] = &child{ + compressedKey: ToKey(compressedKeyBytes), + id: ids.GenerateTestID(), + hasValue: r.Intn(2) == 1, // #nosec G404 + } + } + + hasValue := r.Intn(2) == 1 // #nosec G404 + value := maybe.Nothing[[]byte]() + if hasValue { + valueBytes := make([]byte, r.Intn(64)) // #nosec G404 + _, _ = r.Read(valueBytes) // #nosec G404 + value = maybe.Some(valueBytes) + } + + key := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(key) // #nosec G404 + + hv := &node{ + key: ToKey(key), + dbNode: dbNode{ + children: children, + value: value, + }, + } + + // Hash hv multiple times + hash1 := SHA256Hasher.HashNode(hv) + hash2 := SHA256Hasher.HashNode(hv) + + // Make sure they're the same + require.Equal(hash1, hash2) + } + }, + ) +} + +func Test_SHA256_HashNode(t *testing.T) { + for _, test := range sha256HashNodeTests { + t.Run(test.name, func(t *testing.T) { + hash := SHA256Hasher.HashNode(test.n) + require.Equal(t, test.expectedHash, hash.String()) + }) + } +} + +func Benchmark_SHA256_HashNode(b *testing.B) { + for _, benchmark := range sha256HashNodeTests { + b.Run(benchmark.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + SHA256Hasher.HashNode(benchmark.n) + } + }) + } +} diff --git a/x/merkledb/history.go b/x/merkledb/history.go index 22d87cd1cb48..bd41d29268ed 100644 --- a/x/merkledb/history.go +++ b/x/merkledb/history.go @@ -57,7 +57,7 @@ type changeSummary struct { // The ID of the trie after these changes. rootID ids.ID // The root before/after this change. - // Set in [calculateNodeIDs]. + // Set in [applyValueChanges]. rootChange change[maybe.Maybe[*node]] nodes map[Key]*change[*node] values map[Key]*change[maybe.Maybe[[]byte]] diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 09c84321f50c..b15995f409e3 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -37,7 +37,7 @@ func Test_History_Simple(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key"), []byte("value0"))) @@ -45,7 +45,7 @@ func Test_History_Simple(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("value1"))) @@ -54,7 +54,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) @@ -62,7 +62,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Delete([]byte("k"))) @@ -78,7 +78,7 @@ func Test_History_Simple(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) } func Test_History_Large(t *testing.T) { @@ -141,7 +141,7 @@ func Test_History_Large(t *testing.T) { require.NoError(err) require.NotNil(proof) - require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor])) + require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor], config.Hasher)) } } } @@ -240,6 +240,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { maybe.Some([]byte("key3")), origRootID, db.tokenSize, + db.hasher, )) // write a new value into the db, now there should be 2 roots in the history @@ -257,6 +258,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { maybe.Some([]byte("key3")), origRootID, db.tokenSize, + db.hasher, )) // trigger a new root to be added to the history, which should cause rollover since there can only be 2 @@ -338,7 +340,7 @@ func Test_History_RepeatedRoot(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("other"))) @@ -348,7 +350,7 @@ func Test_History_RepeatedRoot(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) // revert state to be the same as in orig proof batch = db.NewBatch() @@ -360,7 +362,7 @@ func Test_History_RepeatedRoot(t *testing.T) { newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) } func Test_History_ExcessDeletes(t *testing.T) { @@ -380,7 +382,7 @@ func Test_History_ExcessDeletes(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Delete([]byte("key1"))) @@ -392,7 +394,7 @@ func Test_History_ExcessDeletes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) } func Test_History_DontIncludeAllNodes(t *testing.T) { @@ -412,7 +414,7 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("z"), []byte("z"))) @@ -420,7 +422,7 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) } func Test_History_Branching2Nodes(t *testing.T) { @@ -440,7 +442,7 @@ func Test_History_Branching2Nodes(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) @@ -448,7 +450,7 @@ func Test_History_Branching2Nodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) } func Test_History_Branching3Nodes(t *testing.T) { @@ -468,7 +470,7 @@ func Test_History_Branching3Nodes(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) batch = db.NewBatch() require.NoError(batch.Put([]byte("key321"), []byte("value321"))) @@ -476,7 +478,7 @@ func Test_History_Branching3Nodes(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize)) + require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher)) } func Test_History_MaxLength(t *testing.T) { diff --git a/x/merkledb/intermediate_node_db.go b/x/merkledb/intermediate_node_db.go index e57dcb31834b..111d2007ed07 100644 --- a/x/merkledb/intermediate_node_db.go +++ b/x/merkledb/intermediate_node_db.go @@ -4,20 +4,16 @@ package merkledb import ( - "sync" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils" ) -const defaultBufferLength = 256 - // Holds intermediate nodes. That is, those without values. // Changes to this database aren't written to [baseDB] until // they're evicted from the [nodeCache] or Flush is called. type intermediateNodeDB struct { - // Holds unused []byte - bufferPool *sync.Pool + bufferPool *utils.BytesPool // The underlying storage. // Keys written to [baseDB] are prefixed with [intermediateNodePrefix]. @@ -35,18 +31,20 @@ type intermediateNodeDB struct { // the number of bytes to evict during an eviction batch evictionBatchSize int - metrics merkleMetrics + metrics metrics tokenSize int + hasher Hasher } func newIntermediateNodeDB( db database.Database, - bufferPool *sync.Pool, - metrics merkleMetrics, + bufferPool *utils.BytesPool, + metrics metrics, cacheSize int, writeBufferSize int, evictionBatchSize int, tokenSize int, + hasher Hasher, ) *intermediateNodeDB { result := &intermediateNodeDB{ metrics: metrics, @@ -54,6 +52,7 @@ func newIntermediateNodeDB( bufferPool: bufferPool, evictionBatchSize: evictionBatchSize, tokenSize: tokenSize, + hasher: hasher, nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), } result.writeBuffer = newOnEvictCache( @@ -98,14 +97,15 @@ func (db *intermediateNodeDB) onEviction(key Key, n *node) error { return nil } -func (db *intermediateNodeDB) addToBatch(b database.Batch, key Key, n *node) error { +func (db *intermediateNodeDB) addToBatch(b database.KeyValueWriterDeleter, key Key, n *node) error { dbKey := db.constructDBKey(key) defer db.bufferPool.Put(dbKey) + db.metrics.DatabaseNodeWrite() if n == nil { - return b.Delete(dbKey) + return b.Delete(*dbKey) } - return b.Put(dbKey, n.bytes()) + return b.Put(*dbKey, n.bytes()) } func (db *intermediateNodeDB) Get(key Key) (*node, error) { @@ -116,8 +116,6 @@ func (db *intermediateNodeDB) Get(key Key) (*node, error) { } return cachedValue, nil } - db.metrics.IntermediateNodeCacheMiss() - if cachedValue, isCached := db.writeBuffer.Get(key); isCached { db.metrics.IntermediateNodeCacheHit() if cachedValue == nil { @@ -128,27 +126,45 @@ func (db *intermediateNodeDB) Get(key Key) (*node, error) { db.metrics.IntermediateNodeCacheMiss() dbKey := db.constructDBKey(key) + defer db.bufferPool.Put(dbKey) + db.metrics.DatabaseNodeRead() - nodeBytes, err := db.baseDB.Get(dbKey) + nodeBytes, err := db.baseDB.Get(*dbKey) if err != nil { return nil, err } - db.bufferPool.Put(dbKey) - return parseNode(key, nodeBytes) + return parseNode(db.hasher, key, nodeBytes) } // constructDBKey returns a key that can be used in [db.baseDB]. // We need to be able to differentiate between two keys of equal // byte length but different bit length, so we add padding to differentiate. // Additionally, we add a prefix indicating it is part of the intermediateNodeDB. -func (db *intermediateNodeDB) constructDBKey(key Key) []byte { +func (db *intermediateNodeDB) constructDBKey(key Key) *[]byte { if db.tokenSize == 8 { - // For tokens of size byte, no padding is needed since byte length == token length + // For tokens of size byte, no padding is needed since byte + // length == token length return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Bytes()) } - return addPrefixToKey(db.bufferPool, intermediateNodePrefix, key.Extend(ToToken(1, db.tokenSize)).Bytes()) + var ( + prefixLen = len(intermediateNodePrefix) + prefixBitLen = 8 * prefixLen + dualIndex = dualBitIndex(db.tokenSize) + paddingByteValue byte = 1 << dualIndex + paddingSliceValue = []byte{paddingByteValue} + paddingKey = Key{ + value: byteSliceToString(paddingSliceValue), + length: db.tokenSize, + } + ) + + bufferPtr := db.bufferPool.Get(bytesNeeded(prefixBitLen + key.length + db.tokenSize)) + copy(*bufferPtr, intermediateNodePrefix) // add prefix + copy((*bufferPtr)[prefixLen:], key.Bytes()) // add key + extendIntoBuffer(*bufferPtr, paddingKey, prefixBitLen+key.length) // add padding + return bufferPtr } func (db *intermediateNodeDB) Put(key Key, n *node) error { diff --git a/x/merkledb/intermediate_node_db_test.go b/x/merkledb/intermediate_node_db_test.go index 26ad722ffa45..bd97279cc8f0 100644 --- a/x/merkledb/intermediate_node_db_test.go +++ b/x/merkledb/intermediate_node_db_test.go @@ -4,14 +4,16 @@ package merkledb import ( - "sync" + "fmt" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/maybe" + "github.com/ava-labs/avalanchego/utils/units" ) // Tests: @@ -24,7 +26,7 @@ func Test_IntermediateNodeDB(t *testing.T) { require := require.New(t) n := newNode(ToKey([]byte{0x00})) - n.setValue(maybe.Some([]byte{byte(0x02)})) + n.setValue(DefaultHasher, maybe.Some([]byte{byte(0x02)})) nodeSize := cacheEntrySize(n.key, n) // use exact multiple of node size so require.Equal(1, db.nodeCache.fifo.Len()) is correct later @@ -35,20 +37,19 @@ func Test_IntermediateNodeDB(t *testing.T) { baseDB := memdb.New() db := newIntermediateNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, bufferSize, evictionBatchSize, 4, + DefaultHasher, ) // Put a key-node pair node1Key := ToKey([]byte{0x01}) node1 := newNode(node1Key) - node1.setValue(maybe.Some([]byte{byte(0x01)})) + node1.setValue(DefaultHasher, maybe.Some([]byte{byte(0x01)})) require.NoError(db.Put(node1Key, node1)) // Get the key-node pair from cache @@ -58,7 +59,7 @@ func Test_IntermediateNodeDB(t *testing.T) { // Overwrite the key-node pair node1Updated := newNode(node1Key) - node1Updated.setValue(maybe.Some([]byte{byte(0x02)})) + node1Updated.setValue(DefaultHasher, maybe.Some([]byte{byte(0x02)})) require.NoError(db.Put(node1Key, node1Updated)) // Assert the key-node pair was overwritten @@ -79,7 +80,7 @@ func Test_IntermediateNodeDB(t *testing.T) { for { key := ToKey([]byte{byte(added)}) node := newNode(Key{}) - node.setValue(maybe.Some([]byte{byte(added)})) + node.setValue(DefaultHasher, maybe.Some([]byte{byte(added)})) newExpectedSize := expectedSize + cacheEntrySize(key, node) if newExpectedSize > bufferSize { // Don't trigger eviction. @@ -99,7 +100,7 @@ func Test_IntermediateNodeDB(t *testing.T) { // the added key prefix increasing the size tracked by the batch. key := ToKey([]byte{byte(added)}) node := newNode(Key{}) - node.setValue(maybe.Some([]byte{byte(added)})) + node.setValue(DefaultHasher, maybe.Some([]byte{byte(added)})) require.NoError(db.Put(key, node)) // Assert cache has expected number of elements @@ -149,14 +150,13 @@ func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) { for _, tokenSize := range validTokenSizes { db := newIntermediateNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, bufferSize, evictionBatchSize, tokenSize, + DefaultHasher, ) p := ToKey(key) @@ -167,18 +167,18 @@ func FuzzIntermediateNodeDBConstructDBKey(f *testing.F) { p = p.Take(int(uBitLength)) constructedKey := db.constructDBKey(p) baseLength := len(p.value) + len(intermediateNodePrefix) - require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) + require.Equal(intermediateNodePrefix, (*constructedKey)[:len(intermediateNodePrefix)]) switch { case tokenSize == 8: // for keys with tokens of size byte, no padding is added - require.Equal(p.Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Equal(p.Bytes(), (*constructedKey)[len(intermediateNodePrefix):]) case p.hasPartialByte(): - require.Len(constructedKey, baseLength) - require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Len(*constructedKey, baseLength) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), (*constructedKey)[len(intermediateNodePrefix):]) default: // when a whole number of bytes, there is an extra padding byte - require.Len(constructedKey, baseLength+1) - require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Len(*constructedKey, baseLength+1) + require.Equal(p.Extend(ToToken(1, tokenSize)).Bytes(), (*constructedKey)[len(intermediateNodePrefix):]) } } }) @@ -192,33 +192,28 @@ func Test_IntermediateNodeDB_ConstructDBKey_DirtyBuffer(t *testing.T) { baseDB := memdb.New() db := newIntermediateNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, bufferSize, evictionBatchSize, 4, + DefaultHasher, ) - db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) + db.bufferPool.Put(&[]byte{0xFF, 0xFF, 0xFF}) constructedKey := db.constructDBKey(ToKey([]byte{})) - require.Len(constructedKey, 2) - require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) - require.Equal(byte(16), constructedKey[len(constructedKey)-1]) - - db.bufferPool = &sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufferLength) - }, - } - db.bufferPool.Put([]byte{0xFF, 0xFF, 0xFF}) + require.Len(*constructedKey, 2) + require.Equal(intermediateNodePrefix, (*constructedKey)[:len(intermediateNodePrefix)]) + require.Equal(byte(16), (*constructedKey)[len(*constructedKey)-1]) + + db.bufferPool = utils.NewBytesPool() + db.bufferPool.Put(&[]byte{0xFF, 0xFF, 0xFF}) p := ToKey([]byte{0xF0}).Take(4) constructedKey = db.constructDBKey(p) - require.Len(constructedKey, 2) - require.Equal(intermediateNodePrefix, constructedKey[:len(intermediateNodePrefix)]) - require.Equal(p.Extend(ToToken(1, 4)).Bytes(), constructedKey[len(intermediateNodePrefix):]) + require.Len(*constructedKey, 2) + require.Equal(intermediateNodePrefix, (*constructedKey)[:len(intermediateNodePrefix)]) + require.Equal(p.Extend(ToToken(1, 4)).Bytes(), (*constructedKey)[len(intermediateNodePrefix):]) } func TestIntermediateNodeDBClear(t *testing.T) { @@ -229,14 +224,13 @@ func TestIntermediateNodeDBClear(t *testing.T) { baseDB := memdb.New() db := newIntermediateNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, bufferSize, evictionBatchSize, 4, + DefaultHasher, ) for _, b := range [][]byte{{1}, {2}, {3}} { @@ -265,14 +259,13 @@ func TestIntermediateNodeDBDeleteEmptyKey(t *testing.T) { baseDB := memdb.New() db := newIntermediateNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, bufferSize, evictionBatchSize, 4, + DefaultHasher, ) emptyKey := ToKey([]byte{}) @@ -280,7 +273,7 @@ func TestIntermediateNodeDBDeleteEmptyKey(t *testing.T) { require.NoError(db.Flush()) emptyDBKey := db.constructDBKey(emptyKey) - has, err := baseDB.Has(emptyDBKey) + has, err := baseDB.Has(*emptyDBKey) require.NoError(err) require.True(has) @@ -288,7 +281,37 @@ func TestIntermediateNodeDBDeleteEmptyKey(t *testing.T) { require.NoError(db.Flush()) emptyDBKey = db.constructDBKey(emptyKey) - has, err = baseDB.Has(emptyDBKey) + has, err = baseDB.Has(*emptyDBKey) require.NoError(err) require.False(has) } + +func Benchmark_IntermediateNodeDB_ConstructDBKey(b *testing.B) { + keyTokenSizes := []int{0, 1, 4, 16, 64, 256} + for _, tokenSize := range validTokenSizes { + db := newIntermediateNodeDB( + memdb.New(), + utils.NewBytesPool(), + &mockMetrics{}, + units.MiB, + units.MiB, + units.MiB, + tokenSize, + DefaultHasher, + ) + + for _, keyTokenSize := range keyTokenSizes { + keyBitSize := keyTokenSize * tokenSize + keyBytes := make([]byte, bytesNeeded(keyBitSize)) + key := Key{ + length: keyBitSize, + value: string(keyBytes), + } + b.Run(fmt.Sprintf("%d/%d", tokenSize, keyTokenSize), func(b *testing.B) { + for i := 0; i < b.N; i++ { + db.bufferPool.Put(db.constructDBKey(key)) + } + }) + } + } +} diff --git a/x/merkledb/key.go b/x/merkledb/key.go index 524c95bb2dae..9febe3313875 100644 --- a/x/merkledb/key.go +++ b/x/merkledb/key.go @@ -48,6 +48,8 @@ const ( BranchFactor4 = BranchFactor(4) BranchFactor16 = BranchFactor(16) BranchFactor256 = BranchFactor(256) + + BranchFactorLargest = BranchFactor256 ) // Valid checks if BranchFactor [b] is one of the predefined valid options for BranchFactor diff --git a/x/merkledb/metrics.go b/x/merkledb/metrics.go index 058b4869904a..b9517ce9cd59 100644 --- a/x/merkledb/metrics.go +++ b/x/merkledb/metrics.go @@ -11,87 +11,197 @@ import ( "github.com/ava-labs/avalanchego/utils" ) +const ( + ioType = "type" + readType = "read" + writeType = "write" + + lookupType = "type" + valueNodeCacheType = "valueNodeCache" + intermediateNodeCacheType = "intermediateNodeCache" + viewChangesValueType = "viewChangesValue" + viewChangesNodeType = "viewChangesNode" + + lookupResult = "result" + hitResult = "hit" + missResult = "miss" +) + var ( - _ merkleMetrics = (*mockMetrics)(nil) - _ merkleMetrics = (*metrics)(nil) + _ metrics = (*prometheusMetrics)(nil) + _ metrics = (*mockMetrics)(nil) + + ioLabels = []string{ioType} + ioReadLabels = prometheus.Labels{ + ioType: readType, + } + ioWriteLabels = prometheus.Labels{ + ioType: writeType, + } + + lookupLabels = []string{lookupType, lookupResult} + valueNodeCacheHitLabels = prometheus.Labels{ + lookupType: valueNodeCacheType, + lookupResult: hitResult, + } + valueNodeCacheMissLabels = prometheus.Labels{ + lookupType: valueNodeCacheType, + lookupResult: missResult, + } + intermediateNodeCacheHitLabels = prometheus.Labels{ + lookupType: intermediateNodeCacheType, + lookupResult: hitResult, + } + intermediateNodeCacheMissLabels = prometheus.Labels{ + lookupType: intermediateNodeCacheType, + lookupResult: missResult, + } + viewChangesValueHitLabels = prometheus.Labels{ + lookupType: viewChangesValueType, + lookupResult: hitResult, + } + viewChangesValueMissLabels = prometheus.Labels{ + lookupType: viewChangesValueType, + lookupResult: missResult, + } + viewChangesNodeHitLabels = prometheus.Labels{ + lookupType: viewChangesNodeType, + lookupResult: hitResult, + } + viewChangesNodeMissLabels = prometheus.Labels{ + lookupType: viewChangesNodeType, + lookupResult: missResult, + } ) -type merkleMetrics interface { +type metrics interface { + HashCalculated() DatabaseNodeRead() DatabaseNodeWrite() - HashCalculated() ValueNodeCacheHit() ValueNodeCacheMiss() IntermediateNodeCacheHit() IntermediateNodeCacheMiss() - ViewNodeCacheHit() - ViewNodeCacheMiss() - ViewValueCacheHit() - ViewValueCacheMiss() + ViewChangesValueHit() + ViewChangesValueMiss() + ViewChangesNodeHit() + ViewChangesNodeMiss() } -type mockMetrics struct { - lock sync.Mutex - keyReadCount int64 - keyWriteCount int64 - hashCount int64 - valueNodeCacheHit int64 - valueNodeCacheMiss int64 - intermediateNodeCacheHit int64 - intermediateNodeCacheMiss int64 - viewNodeCacheHit int64 - viewNodeCacheMiss int64 - viewValueCacheHit int64 - viewValueCacheMiss int64 +type prometheusMetrics struct { + hashes prometheus.Counter + io *prometheus.CounterVec + lookup *prometheus.CounterVec } -func (m *mockMetrics) HashCalculated() { - m.lock.Lock() - defer m.lock.Unlock() +func newMetrics(namespace string, reg prometheus.Registerer) (metrics, error) { + // TODO: Should we instead return an error if reg is nil? + if reg == nil { + return &mockMetrics{}, nil + } + m := prometheusMetrics{ + hashes: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "hashes", + Help: "cumulative number of nodes hashed", + }), + io: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "io", + Help: "cumulative number of operations performed to the db", + }, ioLabels), + lookup: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Name: "lookup", + Help: "cumulative number of in-memory lookups performed", + }, lookupLabels), + } + err := utils.Err( + reg.Register(m.hashes), + reg.Register(m.io), + reg.Register(m.lookup), + ) + return &m, err +} - m.hashCount++ +func (m *prometheusMetrics) HashCalculated() { + m.hashes.Inc() } -func (m *mockMetrics) DatabaseNodeRead() { - m.lock.Lock() - defer m.lock.Unlock() +func (m *prometheusMetrics) DatabaseNodeRead() { + m.io.With(ioReadLabels).Inc() +} - m.keyReadCount++ +func (m *prometheusMetrics) DatabaseNodeWrite() { + m.io.With(ioWriteLabels).Inc() } -func (m *mockMetrics) DatabaseNodeWrite() { - m.lock.Lock() - defer m.lock.Unlock() +func (m *prometheusMetrics) ValueNodeCacheHit() { + m.lookup.With(valueNodeCacheHitLabels).Inc() +} - m.keyWriteCount++ +func (m *prometheusMetrics) ValueNodeCacheMiss() { + m.lookup.With(valueNodeCacheMissLabels).Inc() } -func (m *mockMetrics) ViewNodeCacheHit() { - m.lock.Lock() - defer m.lock.Unlock() +func (m *prometheusMetrics) IntermediateNodeCacheHit() { + m.lookup.With(intermediateNodeCacheHitLabels).Inc() +} + +func (m *prometheusMetrics) IntermediateNodeCacheMiss() { + m.lookup.With(intermediateNodeCacheMissLabels).Inc() +} + +func (m *prometheusMetrics) ViewChangesValueHit() { + m.lookup.With(viewChangesValueHitLabels).Inc() +} - m.viewNodeCacheHit++ +func (m *prometheusMetrics) ViewChangesValueMiss() { + m.lookup.With(viewChangesValueMissLabels).Inc() +} + +func (m *prometheusMetrics) ViewChangesNodeHit() { + m.lookup.With(viewChangesNodeHitLabels).Inc() +} + +func (m *prometheusMetrics) ViewChangesNodeMiss() { + m.lookup.With(viewChangesNodeMissLabels).Inc() +} + +type mockMetrics struct { + lock sync.Mutex + hashCount int64 + nodeReadCount int64 + nodeWriteCount int64 + valueNodeCacheHit int64 + valueNodeCacheMiss int64 + intermediateNodeCacheHit int64 + intermediateNodeCacheMiss int64 + viewChangesValueHit int64 + viewChangesValueMiss int64 + viewChangesNodeHit int64 + viewChangesNodeMiss int64 } -func (m *mockMetrics) ViewValueCacheHit() { +func (m *mockMetrics) HashCalculated() { m.lock.Lock() defer m.lock.Unlock() - m.viewValueCacheHit++ + m.hashCount++ } -func (m *mockMetrics) ViewNodeCacheMiss() { +func (m *mockMetrics) DatabaseNodeRead() { m.lock.Lock() defer m.lock.Unlock() - m.viewNodeCacheMiss++ + m.nodeReadCount++ } -func (m *mockMetrics) ViewValueCacheMiss() { +func (m *mockMetrics) DatabaseNodeWrite() { m.lock.Lock() defer m.lock.Unlock() - m.viewValueCacheMiss++ + m.nodeWriteCount++ } func (m *mockMetrics) ValueNodeCacheHit() { @@ -122,138 +232,30 @@ func (m *mockMetrics) IntermediateNodeCacheMiss() { m.intermediateNodeCacheMiss++ } -type metrics struct { - ioKeyWrite prometheus.Counter - ioKeyRead prometheus.Counter - hashCount prometheus.Counter - intermediateNodeCacheHit prometheus.Counter - intermediateNodeCacheMiss prometheus.Counter - valueNodeCacheHit prometheus.Counter - valueNodeCacheMiss prometheus.Counter - viewNodeCacheHit prometheus.Counter - viewNodeCacheMiss prometheus.Counter - viewValueCacheHit prometheus.Counter - viewValueCacheMiss prometheus.Counter -} - -func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, error) { - // TODO: Should we instead return an error if reg is nil? - if reg == nil { - return &mockMetrics{}, nil - } - m := metrics{ - ioKeyWrite: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "io_key_write", - Help: "cumulative amount of io write to the key db", - }), - ioKeyRead: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "io_key_read", - Help: "cumulative amount of io read to the key db", - }), - hashCount: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "hashes_calculated", - Help: "cumulative number of node hashes done", - }), - valueNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "value_node_cache_hit", - Help: "cumulative amount of hits on the value node db cache", - }), - valueNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "value_node_cache_miss", - Help: "cumulative amount of misses on the value node db cache", - }), - intermediateNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "intermediate_node_cache_hit", - Help: "cumulative amount of hits on the intermediate node db cache", - }), - intermediateNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "intermediate_node_cache_miss", - Help: "cumulative amount of misses on the intermediate node db cache", - }), - viewNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "view_node_cache_hit", - Help: "cumulative amount of hits on the view node cache", - }), - viewNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "view_node_cache_miss", - Help: "cumulative amount of misses on the view node cache", - }), - viewValueCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "view_value_cache_hit", - Help: "cumulative amount of hits on the view value cache", - }), - viewValueCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "view_value_cache_miss", - Help: "cumulative amount of misses on the view value cache", - }), - } - err := utils.Err( - reg.Register(m.ioKeyWrite), - reg.Register(m.ioKeyRead), - reg.Register(m.hashCount), - reg.Register(m.valueNodeCacheHit), - reg.Register(m.valueNodeCacheMiss), - reg.Register(m.intermediateNodeCacheHit), - reg.Register(m.intermediateNodeCacheMiss), - reg.Register(m.viewNodeCacheHit), - reg.Register(m.viewNodeCacheMiss), - reg.Register(m.viewValueCacheHit), - reg.Register(m.viewValueCacheMiss), - ) - return &m, err -} - -func (m *metrics) DatabaseNodeRead() { - m.ioKeyRead.Inc() -} - -func (m *metrics) DatabaseNodeWrite() { - m.ioKeyWrite.Inc() -} - -func (m *metrics) HashCalculated() { - m.hashCount.Inc() -} - -func (m *metrics) ViewNodeCacheHit() { - m.viewNodeCacheHit.Inc() -} +func (m *mockMetrics) ViewChangesValueHit() { + m.lock.Lock() + defer m.lock.Unlock() -func (m *metrics) ViewNodeCacheMiss() { - m.viewNodeCacheMiss.Inc() + m.viewChangesValueHit++ } -func (m *metrics) ViewValueCacheHit() { - m.viewValueCacheHit.Inc() -} +func (m *mockMetrics) ViewChangesValueMiss() { + m.lock.Lock() + defer m.lock.Unlock() -func (m *metrics) ViewValueCacheMiss() { - m.viewValueCacheMiss.Inc() + m.viewChangesValueMiss++ } -func (m *metrics) IntermediateNodeCacheHit() { - m.intermediateNodeCacheHit.Inc() -} +func (m *mockMetrics) ViewChangesNodeHit() { + m.lock.Lock() + defer m.lock.Unlock() -func (m *metrics) IntermediateNodeCacheMiss() { - m.intermediateNodeCacheMiss.Inc() + m.viewChangesNodeHit++ } -func (m *metrics) ValueNodeCacheHit() { - m.valueNodeCacheHit.Inc() -} +func (m *mockMetrics) ViewChangesNodeMiss() { + m.lock.Lock() + defer m.lock.Unlock() -func (m *metrics) ValueNodeCacheMiss() { - m.valueNodeCacheMiss.Inc() + m.viewChangesNodeMiss++ } diff --git a/x/merkledb/metrics_test.go b/x/merkledb/metrics_test.go index 20c4accbd13c..35f63a504a39 100644 --- a/x/merkledb/metrics_test.go +++ b/x/merkledb/metrics_test.go @@ -26,27 +26,27 @@ func Test_Metrics_Basic_Usage(t *testing.T) { ) require.NoError(t, err) - db.metrics.(*mockMetrics).keyReadCount = 0 - db.metrics.(*mockMetrics).keyWriteCount = 0 + db.metrics.(*mockMetrics).nodeReadCount = 0 + db.metrics.(*mockMetrics).nodeWriteCount = 0 db.metrics.(*mockMetrics).hashCount = 0 require.NoError(t, db.Put([]byte("key"), []byte("value"))) - require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) - require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyWriteCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).nodeReadCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).nodeWriteCount) require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) require.NoError(t, db.Delete([]byte("key"))) - require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) - require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).nodeReadCount) + require.Equal(t, int64(2), db.metrics.(*mockMetrics).nodeWriteCount) require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) _, err = db.Get([]byte("key2")) require.ErrorIs(t, err, database.ErrNotFound) - require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyReadCount) - require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) + require.Equal(t, int64(2), db.metrics.(*mockMetrics).nodeReadCount) + require.Equal(t, int64(2), db.metrics.(*mockMetrics).nodeWriteCount) require.Equal(t, int64(1), db.metrics.(*mockMetrics).hashCount) } diff --git a/x/merkledb/node.go b/x/merkledb/node.go index dd1f2ed65cd2..67273bd6f587 100644 --- a/x/merkledb/node.go +++ b/x/merkledb/node.go @@ -7,12 +7,9 @@ import ( "slices" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/maybe" ) -const HashLength = 32 - // Representation of a node stored in the database. type dbNode struct { value maybe.Maybe[[]byte] @@ -43,9 +40,9 @@ func newNode(key Key) *node { } // Parse [nodeBytes] to a node and set its key to [key]. -func parseNode(key Key, nodeBytes []byte) (*node, error) { +func parseNode(hasher Hasher, key Key, nodeBytes []byte) (*node, error) { n := dbNode{} - if err := codec.decodeDBNode(nodeBytes, &n); err != nil { + if err := decodeDBNode(nodeBytes, &n); err != nil { return nil, err } result := &node{ @@ -53,7 +50,7 @@ func parseNode(key Key, nodeBytes []byte) (*node, error) { key: key, } - result.setValueDigest() + result.setValueDigest(hasher) return result, nil } @@ -64,27 +61,21 @@ func (n *node) hasValue() bool { // Returns the byte representation of this node. func (n *node) bytes() []byte { - return codec.encodeDBNode(&n.dbNode) -} - -// Returns and caches the ID of this node. -func (n *node) calculateID(metrics merkleMetrics) ids.ID { - metrics.HashCalculated() - bytes := codec.encodeHashValues(n) - return hashing.ComputeHash256Array(bytes) + return encodeDBNode(&n.dbNode) } // Set [n]'s value to [val]. -func (n *node) setValue(val maybe.Maybe[[]byte]) { +func (n *node) setValue(hasher Hasher, val maybe.Maybe[[]byte]) { n.value = val - n.setValueDigest() + n.setValueDigest(hasher) } -func (n *node) setValueDigest() { +func (n *node) setValueDigest(hasher Hasher) { if n.value.IsNothing() || len(n.value.Value()) < HashLength { n.valueDigest = n.value } else { - n.valueDigest = maybe.Some(hashing.ComputeHash256(n.value.Value())) + hash := hasher.HashValue(n.value.Value()) + n.valueDigest = maybe.Some(hash[:]) } } diff --git a/x/merkledb/node_test.go b/x/merkledb/node_test.go index 3c09679570f3..c99b83b3ec9e 100644 --- a/x/merkledb/node_test.go +++ b/x/merkledb/node_test.go @@ -19,14 +19,13 @@ func Test_Node_Marshal(t *testing.T) { fullKey := ToKey([]byte("key")) childNode := newNode(fullKey) root.addChild(childNode, 4) - childNode.setValue(maybe.Some([]byte("value"))) + childNode.setValue(DefaultHasher, maybe.Some([]byte("value"))) require.NotNil(t, childNode) - childNode.calculateID(&mockMetrics{}) root.addChild(childNode, 4) data := root.bytes() - rootParsed, err := parseNode(ToKey([]byte("")), data) + rootParsed, err := parseNode(DefaultHasher, ToKey([]byte("")), data) require.NoError(t, err) require.Len(t, rootParsed.children, 1) @@ -44,26 +43,24 @@ func Test_Node_Marshal_Errors(t *testing.T) { fullKey := ToKey([]byte{255}) childNode1 := newNode(fullKey) root.addChild(childNode1, 4) - childNode1.setValue(maybe.Some([]byte("value1"))) + childNode1.setValue(DefaultHasher, maybe.Some([]byte("value1"))) require.NotNil(t, childNode1) - childNode1.calculateID(&mockMetrics{}) root.addChild(childNode1, 4) fullKey = ToKey([]byte{237}) childNode2 := newNode(fullKey) root.addChild(childNode2, 4) - childNode2.setValue(maybe.Some([]byte("value2"))) + childNode2.setValue(DefaultHasher, maybe.Some([]byte("value2"))) require.NotNil(t, childNode2) - childNode2.calculateID(&mockMetrics{}) root.addChild(childNode2, 4) data := root.bytes() for i := 1; i < len(data); i++ { broken := data[:i] - _, err := parseNode(ToKey([]byte("")), broken) + _, err := parseNode(DefaultHasher, ToKey([]byte("")), broken) require.ErrorIs(t, err, io.ErrUnexpectedEOF) } } diff --git a/x/merkledb/proof.go b/x/merkledb/proof.go index 8ddd97ffa5f9..a938ee80faf0 100644 --- a/x/merkledb/proof.go +++ b/x/merkledb/proof.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/trace" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/maybe" pb "github.com/ava-labs/avalanchego/proto/pb/sync" @@ -133,7 +132,12 @@ type Proof struct { // Verify returns nil if the trie given in [proof] has root [expectedRootID]. // That is, this is a valid proof that [proof.Key] exists/doesn't exist // in the trie with root [expectedRootID]. -func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize int) error { +func (proof *Proof) Verify( + ctx context.Context, + expectedRootID ids.ID, + tokenSize int, + hasher Hasher, +) error { // Make sure the proof is well-formed. if len(proof.Path) == 0 { return ErrEmptyProof @@ -152,7 +156,7 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize // and thus has a whole number of bytes if !lastNode.Key.hasPartialByte() && proof.Key == lastNode.Key && - !valueOrHashMatches(proof.Value, lastNode.ValueOrHash) { + !valueOrHashMatches(hasher, proof.Value, lastNode.ValueOrHash) { return ErrProofValueDoesntMatch } @@ -174,7 +178,7 @@ func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID, tokenSize // Insert all proof nodes. // [provenKey] is the key that we are proving exists, or the key // that is the next key along the node path, proving that [proof.Key] doesn't exist in the trie. - provenKey := maybe.Some(proof.Path[len(proof.Path)-1].Key) + provenKey := maybe.Some(lastNode.Key) if err = addPathInfo(view, proof.Path, provenKey, provenKey); err != nil { return err @@ -278,6 +282,7 @@ func (proof *RangeProof) Verify( end maybe.Maybe[[]byte], expectedRootID ids.ID, tokenSize int, + hasher Hasher, ) error { switch { case start.HasValue() && end.HasValue() && bytes.Compare(start.Value(), end.Value()) > 0: @@ -325,6 +330,7 @@ func (proof *RangeProof) Verify( return err } if err := verifyAllRangeProofKeyValuesPresent( + hasher, proof.StartProof, smallestProvenKey, largestProvenKey, @@ -339,6 +345,7 @@ func (proof *RangeProof) Verify( return err } if err := verifyAllRangeProofKeyValuesPresent( + hasher, proof.EndProof, smallestProvenKey, largestProvenKey, @@ -453,7 +460,13 @@ func (proof *RangeProof) UnmarshalProto(pbProof *pb.RangeProof) error { // Verify that all non-intermediate nodes in [proof] which have keys // in [[start], [end]] have the value given for that key in [keysValues]. -func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start maybe.Maybe[Key], end maybe.Maybe[Key], keysValues map[Key][]byte) error { +func verifyAllRangeProofKeyValuesPresent( + hasher Hasher, + proof []ProofNode, + start maybe.Maybe[Key], + end maybe.Maybe[Key], + keysValues map[Key][]byte, +) error { for i := 0; i < len(proof); i++ { var ( node = proof[i] @@ -467,7 +480,7 @@ func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start maybe.Maybe[Ke // We didn't get a key-value pair for this key, but the proof node has a value. return ErrProofNodeHasUnincludedValue } - if ok && !valueOrHashMatches(maybe.Some(value), node.ValueOrHash) { + if ok && !valueOrHashMatches(hasher, maybe.Some(value), node.ValueOrHash) { // We got a key-value pair for this key, but the value in the proof // node doesn't match the value we got for this key. return ErrProofValueDoesntMatch @@ -620,7 +633,7 @@ func (proof *ChangeProof) UnmarshalProto(pbProof *pb.ChangeProof) error { // - if the node's key is within the key range, that has a value that matches the value passed in the change list or in the db func verifyAllChangeProofKeyValuesPresent( ctx context.Context, - db MerkleDB, + db *merkleDB, proof []ProofNode, start maybe.Maybe[Key], end maybe.Maybe[Key], @@ -650,7 +663,7 @@ func verifyAllChangeProofKeyValuesPresent( value = maybe.Some(dbValue) } } - if !valueOrHashMatches(value, node.ValueOrHash) { + if !valueOrHashMatches(db.hasher, value, node.ValueOrHash) { return ErrProofValueDoesntMatch } } @@ -734,8 +747,8 @@ func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { currentProofNode := proof[i] nodeKey := currentProofNode.Key - // Because the interface only support []byte keys, - // a key with a partial byte should store a value + // Because the interface only supports []byte keys, + // a key with a partial byte may not store a value if nodeKey.hasPartialByte() && proof[i].ValueOrHash.HasValue() { return ErrPartialByteLengthWithValue } @@ -765,7 +778,11 @@ func verifyProofPath(proof []ProofNode, key maybe.Maybe[Key]) error { // Returns true if [value] and [valueDigest] match. // [valueOrHash] should be the [ValueOrHash] field of a [ProofNode]. -func valueOrHashMatches(value maybe.Maybe[[]byte], valueOrHash maybe.Maybe[[]byte]) bool { +func valueOrHashMatches( + hasher Hasher, + value maybe.Maybe[[]byte], + valueOrHash maybe.Maybe[[]byte], +) bool { var ( valueIsNothing = value.IsNothing() digestIsNothing = valueOrHash.IsNothing() @@ -781,8 +798,8 @@ func valueOrHashMatches(value maybe.Maybe[[]byte], valueOrHash maybe.Maybe[[]byt case len(value.Value()) < HashLength: return bytes.Equal(value.Value(), valueOrHash.Value()) default: - valueHash := hashing.ComputeHash256(value.Value()) - return bytes.Equal(valueHash, valueOrHash.Value()) + valueHash := hasher.HashValue(value.Value()) + return bytes.Equal(valueHash[:], valueOrHash.Value()) } } @@ -829,16 +846,17 @@ func addPathInfo( // Add [proofNode]'s children which are outside the range // [insertChildrenLessThan, insertChildrenGreaterThan]. - compressedKey := Key{} for index, childID := range proofNode.Children { + var compressedKey Key if existingChild, ok := n.children[index]; ok { compressedKey = existingChild.compressedKey } childKey := key.Extend(ToToken(index, v.tokenSize), compressedKey) if (shouldInsertLeftChildren && childKey.Less(insertChildrenLessThan.Value())) || (shouldInsertRightChildren && childKey.Greater(insertChildrenGreaterThan.Value())) { - // We didn't set the other values on the child entry, but it doesn't matter. - // We only need the IDs to be correct so that the calculated hash is correct. + // We don't set the [hasValue] field of the child but that's OK. + // We only need the compressed key and ID to be correct so that the + // calculated hash is correct. n.setChildEntry( index, &child{ diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index 3bc21962ac54..9378887f60d4 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -23,7 +23,7 @@ import ( func Test_Proof_Empty(t *testing.T) { proof := &Proof{} - err := proof.Verify(context.Background(), ids.Empty, 4) + err := proof.Verify(context.Background(), ids.Empty, 4, DefaultHasher) require.ErrorIs(t, err, ErrEmptyProof) } @@ -43,7 +43,7 @@ func Test_Proof_Simple(t *testing.T) { proof, err := db.GetProof(ctx, []byte{}) require.NoError(err) - require.NoError(proof.Verify(ctx, expectedRoot, 4)) + require.NoError(proof.Verify(ctx, expectedRoot, db.tokenSize, db.hasher)) } func Test_Proof_Verify_Bad_Data(t *testing.T) { @@ -119,7 +119,7 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot(), 4) + err = proof.Verify(context.Background(), db.getMerkleRoot(), db.tokenSize, db.hasher) require.ErrorIs(err, tt.expectedErr) }) } @@ -128,14 +128,14 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { func Test_Proof_ValueOrHashMatches(t *testing.T) { require := require.New(t) - require.True(valueOrHashMatches(maybe.Some([]byte{0}), maybe.Some([]byte{0}))) - require.False(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Some(hashing.ComputeHash256([]byte{0})))) - require.True(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Nothing[[]byte]())) + require.True(valueOrHashMatches(SHA256Hasher, maybe.Some([]byte{0}), maybe.Some([]byte{0}))) + require.False(valueOrHashMatches(SHA256Hasher, maybe.Nothing[[]byte](), maybe.Some(hashing.ComputeHash256([]byte{0})))) + require.True(valueOrHashMatches(SHA256Hasher, maybe.Nothing[[]byte](), maybe.Nothing[[]byte]())) - require.False(valueOrHashMatches(maybe.Some([]byte{0}), maybe.Nothing[[]byte]())) - require.False(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Some([]byte{0}))) - require.False(valueOrHashMatches(maybe.Nothing[[]byte](), maybe.Some(hashing.ComputeHash256([]byte{1})))) - require.False(valueOrHashMatches(maybe.Some(hashing.ComputeHash256([]byte{0})), maybe.Nothing[[]byte]())) + require.False(valueOrHashMatches(SHA256Hasher, maybe.Some([]byte{0}), maybe.Nothing[[]byte]())) + require.False(valueOrHashMatches(SHA256Hasher, maybe.Nothing[[]byte](), maybe.Some([]byte{0}))) + require.False(valueOrHashMatches(SHA256Hasher, maybe.Nothing[[]byte](), maybe.Some(hashing.ComputeHash256([]byte{1})))) + require.False(valueOrHashMatches(SHA256Hasher, maybe.Some(hashing.ComputeHash256([]byte{0})), maybe.Nothing[[]byte]())) } func Test_RangeProof_Extra_Value(t *testing.T) { @@ -159,6 +159,7 @@ func Test_RangeProof_Extra_Value(t *testing.T) { maybe.Some([]byte{5, 5}), db.rootID, db.tokenSize, + db.hasher, )) proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) @@ -169,6 +170,7 @@ func Test_RangeProof_Extra_Value(t *testing.T) { maybe.Some([]byte{5, 5}), db.rootID, db.tokenSize, + db.hasher, ) require.ErrorIs(err, ErrInvalidProof) } @@ -239,7 +241,7 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) - err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize) + err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize, db.hasher) require.ErrorIs(err, tt.expectedErr) }) } @@ -299,10 +301,10 @@ func Test_Proof(t *testing.T) { expectedRootID, err := trie.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize)) + require.NoError(proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize, dbTrie.hasher)) proof.Path[0].Key = ToKey([]byte("key1")) - err = proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize) + err = proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize, dbTrie.hasher) require.ErrorIs(err, ErrProofNodeNotForKey) } @@ -483,7 +485,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty, 4) + err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty, 4, DefaultHasher) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -523,6 +525,7 @@ func Test_RangeProof(t *testing.T) { maybe.Some([]byte{3, 5}), db.rootID, db.tokenSize, + db.hasher, )) } @@ -577,6 +580,7 @@ func Test_RangeProof_NilStart(t *testing.T) { maybe.Some([]byte("key35")), db.rootID, db.tokenSize, + db.hasher, )) } @@ -617,6 +621,7 @@ func Test_RangeProof_NilEnd(t *testing.T) { maybe.Nothing[[]byte](), db.rootID, db.tokenSize, + db.hasher, )) } @@ -660,6 +665,7 @@ func Test_RangeProof_EmptyValues(t *testing.T) { maybe.Some([]byte("key2")), db.rootID, db.tokenSize, + db.hasher, )) } @@ -1751,6 +1757,7 @@ func FuzzRangeProofInvariants(f *testing.F) { end, rootID, db.tokenSize, + db.hasher, )) // Make sure the start proof doesn't contain any nodes @@ -1796,7 +1803,7 @@ func FuzzRangeProofInvariants(f *testing.F) { rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize, db.hasher)) default: require.NotEmpty(rangeProof.EndProof) @@ -1811,7 +1818,7 @@ func FuzzRangeProofInvariants(f *testing.F) { rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize, db.hasher)) } }) } @@ -1852,7 +1859,7 @@ func FuzzProofVerification(f *testing.F) { rootID, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize)) + require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize, db.hasher)) // Insert a new key-value pair newKey := make([]byte, 32) diff --git a/x/merkledb/trie.go b/x/merkledb/trie.go index 891a90d34e09..26cd6a728983 100644 --- a/x/merkledb/trie.go +++ b/x/merkledb/trie.go @@ -92,11 +92,10 @@ type View interface { CommitToDB(ctx context.Context) error } -// Returns the nodes along the path to [key]. +// Calls [visitNode] on the nodes along the path to [key]. // The first node is the root, and the last node is either the node with the // given [key], if it's in the trie, or the node with the largest prefix of // the [key] if it isn't in the trie. -// Always returns at least the root node. // Assumes [t] doesn't change while this function is running. func visitPathToKey(t Trie, key Key, visitNode func(*node) error) error { maybeRoot := t.getRoot() @@ -137,7 +136,7 @@ func visitPathToKey(t Trie, key Key, visitNode func(*node) error) error { return nil } -// Returns a proof that [bytesPath] is in or not in trie [t]. +// Returns a proof that [key] is in or not in trie [t]. // Assumes [t] doesn't change while this function is running. func getProof(t Trie, key []byte) (*Proof, error) { root := t.getRoot() diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index f6dc0351f549..8ef5c9c1ddfc 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -21,7 +21,7 @@ import ( func getNodeValue(t Trie, key string) ([]byte, error) { path := ToKey([]byte(key)) if asView, ok := t.(*view); ok { - if err := asView.calculateNodeIDs(context.Background()); err != nil { + if err := asView.applyValueChanges(context.Background()); err != nil { return nil, err } } @@ -131,7 +131,7 @@ func TestVisitPathToKey(t *testing.T) { require.NoError(err) require.IsType(&view{}, trieIntf) trie = trieIntf.(*view) - require.NoError(trie.calculateNodeIDs(context.Background())) + require.NoError(trie.applyValueChanges(context.Background())) nodePath = make([]*node, 0, 1) require.NoError(visitPathToKey(trie, ToKey(key1), func(n *node) error { @@ -156,7 +156,7 @@ func TestVisitPathToKey(t *testing.T) { require.NoError(err) require.IsType(&view{}, trieIntf) trie = trieIntf.(*view) - require.NoError(trie.calculateNodeIDs(context.Background())) + require.NoError(trie.applyValueChanges(context.Background())) nodePath = make([]*node, 0, 2) require.NoError(visitPathToKey(trie, ToKey(key2), func(n *node) error { @@ -185,7 +185,7 @@ func TestVisitPathToKey(t *testing.T) { require.NoError(err) require.IsType(&view{}, trieIntf) trie = trieIntf.(*view) - require.NoError(trie.calculateNodeIDs(context.Background())) + require.NoError(trie.applyValueChanges(context.Background())) // Trie is: // [] @@ -258,7 +258,7 @@ func Test_Trie_ViewOnCommitedView(t *testing.T) { require.NoError(committedTrie.CommitToDB(context.Background())) - newView, err := committedTrie.NewView( + view, err := committedTrie.NewView( context.Background(), ViewChanges{ BatchOps: []database.BatchOp{ @@ -267,7 +267,7 @@ func Test_Trie_ViewOnCommitedView(t *testing.T) { }, ) require.NoError(err) - require.NoError(newView.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(context.Background())) val0, err := dbTrie.GetValue(context.Background(), []byte{0}) require.NoError(err) @@ -318,7 +318,7 @@ func Test_Trie_WriteToDB(t *testing.T) { rawBytes, err := dbTrie.baseDB.Get(prefixedKey) require.NoError(err) - node, err := parseNode(ToKey(key), rawBytes) + node, err := parseNode(dbTrie.hasher, ToKey(key), rawBytes) require.NoError(err) require.Equal([]byte("value"), node.value.Value()) } @@ -775,7 +775,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { ) require.NoError(err) - require.NoError(newTrie.(*view).calculateNodeIDs(context.Background())) + require.NoError(newTrie.(*view).applyValueChanges(context.Background())) maybeRoot := newTrie.getRoot() require.NoError(err) require.True(maybeRoot.HasValue()) @@ -794,7 +794,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { }, ) require.NoError(err) - require.NoError(newTrie.(*view).calculateNodeIDs(context.Background())) + require.NoError(newTrie.(*view).applyValueChanges(context.Background())) // trie should be empty root := newTrie.getRoot() @@ -861,7 +861,7 @@ func Test_Trie_NodeCollapse(t *testing.T) { ) require.NoError(err) - require.NoError(trie.(*view).calculateNodeIDs(context.Background())) + require.NoError(trie.(*view).applyValueChanges(context.Background())) for _, kv := range kvs { node, err := trie.getEditableNode(ToKey(kv.Key), true) @@ -888,7 +888,7 @@ func Test_Trie_NodeCollapse(t *testing.T) { ) require.NoError(err) - require.NoError(trie.(*view).calculateNodeIDs(context.Background())) + require.NoError(trie.(*view).applyValueChanges(context.Background())) for _, kv := range deletedKVs { _, err := trie.getEditableNode(ToKey(kv.Key), true) @@ -1235,9 +1235,9 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { require.NoError(newTrie.CommitToDB(context.Background())) }() - newView, err := newTrie.NewView(context.Background(), ViewChanges{}) + view, err := newTrie.NewView(context.Background(), ViewChanges{}) require.NoError(err) - require.NotNil(newView) + require.NotNil(view) } // Returns the path of the only child of this node. diff --git a/x/merkledb/value_node_db.go b/x/merkledb/value_node_db.go index 8ee7d7436fcc..55411f6b0353 100644 --- a/x/merkledb/value_node_db.go +++ b/x/merkledb/value_node_db.go @@ -4,18 +4,21 @@ package merkledb import ( - "sync" + "errors" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils" ) -var _ database.Iterator = (*iterator)(nil) +var ( + _ database.Iterator = (*iterator)(nil) + + errNodeMissingValue = errors.New("valueNodeDB contains node without a value") +) type valueNodeDB struct { - // Holds unused []byte - bufferPool *sync.Pool + bufferPool *utils.BytesPool // The underlying storage. // Keys written to [baseDB] are prefixed with [valueNodePrefix]. @@ -24,48 +27,58 @@ type valueNodeDB struct { // If a value is nil, the corresponding key isn't in the trie. // Paths in [nodeCache] aren't prefixed with [valueNodePrefix]. nodeCache cache.Cacher[Key, *node] - metrics merkleMetrics + metrics metrics + + hasher Hasher closed utils.Atomic[bool] } func newValueNodeDB( db database.Database, - bufferPool *sync.Pool, - metrics merkleMetrics, + bufferPool *utils.BytesPool, + metrics metrics, cacheSize int, + hasher Hasher, ) *valueNodeDB { return &valueNodeDB{ metrics: metrics, baseDB: db, bufferPool: bufferPool, nodeCache: cache.NewSizedLRU(cacheSize, cacheEntrySize), + hasher: hasher, + } +} + +func (db *valueNodeDB) Write(batch database.KeyValueWriterDeleter, key Key, n *node) error { + db.metrics.DatabaseNodeWrite() + db.nodeCache.Put(key, n) + prefixedKey := addPrefixToKey(db.bufferPool, valueNodePrefix, key.Bytes()) + defer db.bufferPool.Put(prefixedKey) + + if n == nil { + return batch.Delete(*prefixedKey) } + return batch.Put(*prefixedKey, n.bytes()) } func (db *valueNodeDB) newIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { prefixedStart := addPrefixToKey(db.bufferPool, valueNodePrefix, start) + defer db.bufferPool.Put(prefixedStart) + prefixedPrefix := addPrefixToKey(db.bufferPool, valueNodePrefix, prefix) - i := &iterator{ + defer db.bufferPool.Put(prefixedPrefix) + + return &iterator{ db: db, - nodeIter: db.baseDB.NewIteratorWithStartAndPrefix(prefixedStart, prefixedPrefix), + nodeIter: db.baseDB.NewIteratorWithStartAndPrefix(*prefixedStart, *prefixedPrefix), } - db.bufferPool.Put(prefixedStart) - db.bufferPool.Put(prefixedPrefix) - return i } func (db *valueNodeDB) Close() { db.closed.Set(true) } -func (db *valueNodeDB) NewBatch() *valueNodeBatch { - return &valueNodeBatch{ - db: db, - ops: make(map[Key]*node, defaultBufferLength), - } -} - func (db *valueNodeDB) Get(key Key) (*node, error) { if cachedValue, isCached := db.nodeCache.Get(key); isCached { db.metrics.ValueNodeCacheHit() @@ -80,12 +93,12 @@ func (db *valueNodeDB) Get(key Key) (*node, error) { defer db.bufferPool.Put(prefixedKey) db.metrics.DatabaseNodeRead() - nodeBytes, err := db.baseDB.Get(prefixedKey) + nodeBytes, err := db.baseDB.Get(*prefixedKey) if err != nil { return nil, err } - return parseNode(key, nodeBytes) + return parseNode(db.hasher, key, nodeBytes) } func (db *valueNodeDB) Clear() error { @@ -93,45 +106,11 @@ func (db *valueNodeDB) Clear() error { return database.AtomicClearPrefix(db.baseDB, db.baseDB, valueNodePrefix) } -// Batch of database operations -type valueNodeBatch struct { - db *valueNodeDB - ops map[Key]*node -} - -func (b *valueNodeBatch) Put(key Key, value *node) { - b.ops[key] = value -} - -func (b *valueNodeBatch) Delete(key Key) { - b.ops[key] = nil -} - -// Write flushes any accumulated data to the underlying database. -func (b *valueNodeBatch) Write() error { - dbBatch := b.db.baseDB.NewBatch() - for key, n := range b.ops { - b.db.metrics.DatabaseNodeWrite() - b.db.nodeCache.Put(key, n) - prefixedKey := addPrefixToKey(b.db.bufferPool, valueNodePrefix, key.Bytes()) - if n == nil { - if err := dbBatch.Delete(prefixedKey); err != nil { - return err - } - } else if err := dbBatch.Put(prefixedKey, n.bytes()); err != nil { - return err - } - - b.db.bufferPool.Put(prefixedKey) - } - - return dbBatch.Write() -} - type iterator struct { db *valueNodeDB nodeIter database.Iterator - current *node + key []byte + value []byte err error } @@ -146,21 +125,16 @@ func (i *iterator) Error() error { } func (i *iterator) Key() []byte { - if i.current == nil { - return nil - } - return i.current.key.Bytes() + return i.key } func (i *iterator) Value() []byte { - if i.current == nil { - return nil - } - return i.current.value.Value() + return i.value } func (i *iterator) Next() bool { - i.current = nil + i.key = nil + i.value = nil if i.Error() != nil || i.db.closed.Get() { return false } @@ -169,15 +143,25 @@ func (i *iterator) Next() bool { } i.db.metrics.DatabaseNodeRead() - key := i.nodeIter.Key() - key = key[valueNodePrefixLen:] - n, err := parseNode(ToKey(key), i.nodeIter.Value()) + + r := codecReader{ + b: i.nodeIter.Value(), + // We are discarding the other bytes from the node, so we avoid copying + // the value here. + copy: false, + } + maybeValue, err := r.MaybeBytes() if err != nil { i.err = err return false } + if maybeValue.IsNothing() { + i.err = errNodeMissingValue + return false + } - i.current = n + i.key = i.nodeIter.Key()[valueNodePrefixLen:] + i.value = maybeValue.Value() return true } diff --git a/x/merkledb/value_node_db_test.go b/x/merkledb/value_node_db_test.go index 224a4fe94ac1..a86b31ace3b9 100644 --- a/x/merkledb/value_node_db_test.go +++ b/x/merkledb/value_node_db_test.go @@ -4,13 +4,13 @@ package merkledb import ( - "sync" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/maybe" ) @@ -23,11 +23,10 @@ func TestValueNodeDB(t *testing.T) { cacheSize := 10_000 db := newValueNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, + DefaultHasher, ) // Getting a key that doesn't exist should return an error. @@ -42,8 +41,8 @@ func TestValueNodeDB(t *testing.T) { }, key: key, } - batch := db.NewBatch() - batch.Put(key, node1) + batch := db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, node1)) require.NoError(batch.Write()) // Get the key-node pair. @@ -52,8 +51,8 @@ func TestValueNodeDB(t *testing.T) { require.Equal(node1, node1Read) // Delete the key-node pair. - batch = db.NewBatch() - batch.Delete(key) + batch = db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, nil)) require.NoError(batch.Write()) // Key should be gone now. @@ -61,9 +60,9 @@ func TestValueNodeDB(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // Put a key-node pair and delete it in the same batch. - batch = db.NewBatch() - batch.Put(key, node1) - batch.Delete(key) + batch = db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, node1)) + require.NoError(db.Write(batch, key, nil)) require.NoError(batch.Write()) // Key should still be gone. @@ -77,9 +76,9 @@ func TestValueNodeDB(t *testing.T) { }, key: key, } - batch = db.NewBatch() - batch.Put(key, node1) - batch.Put(key, node2) + batch = db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, node1)) + require.NoError(db.Write(batch, key, node2)) require.NoError(batch.Write()) // Get the key-node pair. @@ -88,8 +87,8 @@ func TestValueNodeDB(t *testing.T) { require.Equal(node2, node2Read) // Overwrite the key-node pair in a subsequent batch. - batch = db.NewBatch() - batch.Put(key, node1) + batch = db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, node1)) require.NoError(batch.Write()) // Get the key-node pair. @@ -118,11 +117,10 @@ func TestValueNodeDBIterator(t *testing.T) { cacheSize := 10 db := newValueNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, + DefaultHasher, ) // Put key-node pairs. @@ -134,8 +132,8 @@ func TestValueNodeDBIterator(t *testing.T) { }, key: key, } - batch := db.NewBatch() - batch.Put(key, node) + batch := db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, node)) require.NoError(batch.Write()) } @@ -172,8 +170,8 @@ func TestValueNodeDBIterator(t *testing.T) { }, key: key, } - batch := db.NewBatch() - batch.Put(key, n) + batch := db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, n)) require.NoError(batch.Write()) key = ToKey([]byte{0xFF, 0x01}) @@ -183,8 +181,8 @@ func TestValueNodeDBIterator(t *testing.T) { }, key: key, } - batch = db.NewBatch() - batch.Put(key, n) + batch = db.baseDB.NewBatch() + require.NoError(db.Write(batch, key, n)) require.NoError(batch.Write()) // Iterate over the key-node pairs with a prefix. @@ -225,16 +223,15 @@ func TestValueNodeDBClear(t *testing.T) { baseDB := memdb.New() db := newValueNodeDB( baseDB, - &sync.Pool{ - New: func() interface{} { return make([]byte, 0) }, - }, + utils.NewBytesPool(), &mockMetrics{}, cacheSize, + DefaultHasher, ) - batch := db.NewBatch() + batch := db.baseDB.NewBatch() for _, b := range [][]byte{{1}, {2}, {3}} { - batch.Put(ToKey(b), newNode(ToKey(b))) + require.NoError(db.Write(batch, ToKey(b), newNode(ToKey(b)))) } require.NoError(batch.Write()) diff --git a/x/merkledb/view.go b/x/merkledb/view.go index 441cc37166d6..a947fe6bf49e 100644 --- a/x/merkledb/view.go +++ b/x/merkledb/view.go @@ -45,11 +45,13 @@ type view struct { committed bool commitLock sync.RWMutex - // tracking bool to enforce that no changes are made to the trie after the nodes have been calculated - nodesAlreadyCalculated utils.Atomic[bool] + // valueChangesApplied is used to enforce that no changes are made to the + // trie after the nodes have been calculated + valueChangesApplied utils.Atomic[bool] - // calculateNodesOnce is a once to ensure that node calculation only occurs a single time - calculateNodesOnce sync.Once + // applyValueChangesOnce prevents node calculation from occurring multiple + // times + applyValueChangesOnce sync.Once // Controls the view's validity related fields. // Must be held while reading/writing [childViews], [invalidated], and [parentTrie]. @@ -117,11 +119,11 @@ func (v *view) NewView( return v.getParentTrie().NewView(ctx, changes) } - if err := v.calculateNodeIDs(ctx); err != nil { + if err := v.applyValueChanges(ctx); err != nil { return nil, err } - newView, err := newView(v.db, v, changes) + childView, err := newView(v.db, v, changes) if err != nil { return nil, err } @@ -132,9 +134,9 @@ func (v *view) NewView( if v.invalidated { return nil, ErrInvalid } - v.childViews = append(v.childViews, newView) + v.childViews = append(v.childViews, childView) - return newView, nil + return childView, nil } // Creates a new view with the given [parentTrie]. @@ -143,7 +145,7 @@ func newView( parentTrie View, changes ViewChanges, ) (*view, error) { - newView := &view{ + v := &view{ root: maybe.Bind(parentTrie.getRoot(), (*node).clone), db: db, parentTrie: parentTrie, @@ -164,7 +166,7 @@ func newView( newVal = maybe.Some(slices.Clone(op.Value)) } } - if err := newView.recordValueChange(toKey(key), newVal); err != nil { + if err := v.recordValueChange(toKey(key), newVal); err != nil { return nil, err } } @@ -172,11 +174,11 @@ func newView( if !changes.ConsumeBytes { val = maybe.Bind(val, slices.Clone[[]byte]) } - if err := newView.recordValueChange(toKey(stringToByteSlice(key)), val); err != nil { + if err := v.recordValueChange(toKey(stringToByteSlice(key)), val); err != nil { return nil, err } } - return newView, nil + return v, nil } // Creates a view of the db at a historical root using the provided [changes]. @@ -189,7 +191,7 @@ func newViewWithChanges( return nil, ErrNoChanges } - newView := &view{ + v := &view{ root: changes.rootChange.after, db: db, parentTrie: db, @@ -198,9 +200,9 @@ func newViewWithChanges( } // since this is a set of historical changes, all nodes have already been calculated // since no new changes have occurred, no new calculations need to be done - newView.calculateNodesOnce.Do(func() {}) - newView.nodesAlreadyCalculated.Set(true) - return newView, nil + v.applyValueChangesOnce.Do(func() {}) + v.valueChangesApplied.Set(true) + return v, nil } func (v *view) getTokenSize() int { @@ -211,45 +213,32 @@ func (v *view) getRoot() maybe.Maybe[*node] { return v.root } -// Recalculates the node IDs for all changed nodes in the trie. -// Cancelling [ctx] doesn't cancel calculation. It's used only for tracing. -func (v *view) calculateNodeIDs(ctx context.Context) error { +// applyValueChanges generates the node changes from the value changes. It then +// hashes the changed nodes to calculate the new trie. +// +// Cancelling [ctx] doesn't cancel the operation. It's used only for tracing. +func (v *view) applyValueChanges(ctx context.Context) error { var err error - v.calculateNodesOnce.Do(func() { + v.applyValueChangesOnce.Do(func() { + // Create the span inside the once wrapper to make traces more useful. + // Otherwise, spans would be created during calls where the IDs are not + // re-calculated. + ctx, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.applyValueChanges") + defer span.End() + if v.isInvalid() { err = ErrInvalid return } - defer v.nodesAlreadyCalculated.Set(true) + defer v.valueChangesApplied.Set(true) oldRoot := maybe.Bind(v.root, (*node).clone) - // We wait to create the span until after checking that we need to actually - // calculateNodeIDs to make traces more useful (otherwise there may be a span - // per key modified even though IDs are not re-calculated). - _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.calculateNodeIDs") - defer span.End() - - // add all the changed key/values to the nodes of the trie - for key, change := range v.changes.values { - if change.after.IsNothing() { - // Note we're setting [err] defined outside this function. - if err = v.remove(key); err != nil { - return - } - // Note we're setting [err] defined outside this function. - } else if _, err = v.insert(key, change.after); err != nil { - return - } - } - - if !v.root.IsNothing() { - _ = v.db.calculateNodeIDsSema.Acquire(context.Background(), 1) - v.changes.rootID = v.calculateNodeIDsHelper(v.root.Value()) - v.db.calculateNodeIDsSema.Release(1) - } else { - v.changes.rootID = ids.Empty + // Note we're setting [err] defined outside this function. + if err = v.calculateNodeChanges(ctx); err != nil { + return } + v.hashChangedNodes(ctx) v.changes.rootChange = change[maybe.Maybe[*node]]{ before: oldRoot, @@ -265,33 +254,144 @@ func (v *view) calculateNodeIDs(ctx context.Context) error { return err } +func (v *view) calculateNodeChanges(ctx context.Context) error { + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.calculateNodeChanges") + defer span.End() + + // Add all the changed key/values to the nodes of the trie + for key, change := range v.changes.values { + if change.after.IsNothing() { + if err := v.remove(key); err != nil { + return err + } + } else if _, err := v.insert(key, change.after); err != nil { + return err + } + } + + return nil +} + +func (v *view) hashChangedNodes(ctx context.Context) { + _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.hashChangedNodes") + defer span.End() + + if v.root.IsNothing() { + v.changes.rootID = ids.Empty + return + } + + // If there are no children, we can avoid allocating [keyBuffer]. + root := v.root.Value() + if len(root.children) == 0 { + v.changes.rootID = v.db.hasher.HashNode(root) + v.db.metrics.HashCalculated() + return + } + + // Allocate [keyBuffer] and populate it with the root node's key. + keyBuffer := v.db.hashNodesKeyPool.Acquire() + keyBuffer = v.setKeyBuffer(root, keyBuffer) + v.changes.rootID, keyBuffer = v.hashChangedNode(root, keyBuffer) + v.db.hashNodesKeyPool.Release(keyBuffer) +} + // Calculates the ID of all descendants of [n] which need to be recalculated, // and then calculates the ID of [n] itself. -func (v *view) calculateNodeIDsHelper(n *node) ids.ID { - // We use [wg] to wait until all descendants of [n] have been updated. - var wg sync.WaitGroup +// +// Returns a potentially expanded [keyBuffer]. By returning this value this +// function is able to have a maximum total number of allocations shared across +// multiple invocations. +// +// Invariant: [keyBuffer] must be populated with [n]'s key and have sufficient +// length to contain any of [n]'s child keys. +func (v *view) hashChangedNode(n *node, keyBuffer []byte) (ids.ID, []byte) { + var ( + // childBuffer is allocated on the stack. + childBuffer = make([]byte, 1) + dualIndex = dualBitIndex(v.tokenSize) + bytesForKey = bytesNeeded(n.key.length) + // We track the last byte of [n.key] so that we can reset the value for + // each key. This is needed because the child buffer may get ORed at + // this byte. + lastKeyByte byte + + // We use [wg] to wait until all descendants of [n] have been updated. + wg waitGroup + ) + if bytesForKey > 0 { + lastKeyByte = keyBuffer[bytesForKey-1] + } + + // This loop is optimized to avoid allocations when calculating the + // [childKey] by reusing [keyBuffer] and leaving the first [bytesForKey-1] + // bytes unmodified. + for childIndex, childEntry := range n.children { + childBuffer[0] = childIndex << dualIndex + childIndexAsKey := Key{ + // It is safe to use byteSliceToString because [childBuffer] is not + // modified while [childIndexAsKey] is in use. + value: byteSliceToString(childBuffer), + length: v.tokenSize, + } + + totalBitLength := n.key.length + v.tokenSize + childEntry.compressedKey.length + // Because [keyBuffer] may have been modified in a prior iteration of + // this loop, it is not guaranteed that its length is at least + // [bytesNeeded(totalBitLength)]. However, that's fine. The below + // slicing would only panic if the buffer didn't have sufficient + // capacity. + keyBuffer = keyBuffer[:bytesNeeded(totalBitLength)] + // We don't need to copy this node's key. It's assumed to already be + // correct; except for the last byte. We must make sure the last byte of + // the key is set correctly because extendIntoBuffer may OR bits from + // the extension and overwrite the last byte. However, extendIntoBuffer + // does not modify the first [bytesForKey-1] bytes of [keyBuffer]. + if bytesForKey > 0 { + keyBuffer[bytesForKey-1] = lastKeyByte + } + extendIntoBuffer(keyBuffer, childIndexAsKey, n.key.length) + extendIntoBuffer(keyBuffer, childEntry.compressedKey, n.key.length+v.tokenSize) + childKey := Key{ + // It is safe to use byteSliceToString because [keyBuffer] is not + // modified while [childKey] is in use. + value: byteSliceToString(keyBuffer), + length: totalBitLength, + } - for childIndex := range n.children { - childEntry := n.children[childIndex] - childKey := n.key.Extend(ToToken(childIndex, v.tokenSize), childEntry.compressedKey) childNodeChange, ok := v.changes.nodes[childKey] if !ok { // This child wasn't changed. continue } - childEntry.hasValue = childNodeChange.after.hasValue() + + childNode := childNodeChange.after + childEntry.hasValue = childNode.hasValue() + + // If there are no children of the childNode, we can avoid constructing + // the buffer for the child keys. + if len(childNode.children) == 0 { + childEntry.id = v.db.hasher.HashNode(childNode) + v.db.metrics.HashCalculated() + continue + } // Try updating the child and its descendants in a goroutine. - if ok := v.db.calculateNodeIDsSema.TryAcquire(1); ok { + if childKeyBuffer, ok := v.db.hashNodesKeyPool.TryAcquire(); ok { wg.Add(1) - go func() { - childEntry.id = v.calculateNodeIDsHelper(childNodeChange.after) - v.db.calculateNodeIDsSema.Release(1) + go func(wg *sync.WaitGroup, childEntry *child, childNode *node, childKeyBuffer []byte) { + childKeyBuffer = v.setKeyBuffer(childNode, childKeyBuffer) + childEntry.id, childKeyBuffer = v.hashChangedNode(childNode, childKeyBuffer) + v.db.hashNodesKeyPool.Release(childKeyBuffer) wg.Done() - }() + }(wg.wg, childEntry, childNode, childKeyBuffer) } else { // We're at the goroutine limit; do the work in this goroutine. - childEntry.id = v.calculateNodeIDsHelper(childNodeChange.after) + // + // We can skip copying the key here because [keyBuffer] is already + // constructed to be childNode's key. + keyBuffer = v.setLengthForChildren(childNode, keyBuffer) + childEntry.id, keyBuffer = v.hashChangedNode(childNode, keyBuffer) } } @@ -299,7 +399,36 @@ func (v *view) calculateNodeIDsHelper(n *node) ids.ID { wg.Wait() // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. - return n.calculateID(v.db.metrics) + v.db.metrics.HashCalculated() + return v.db.hasher.HashNode(n), keyBuffer +} + +// setKeyBuffer expands [keyBuffer] to have sufficient size for any of [n]'s +// child keys and populates [n]'s key into [keyBuffer]. If [keyBuffer] already +// has sufficient size, this function will not perform any memory allocations. +func (v *view) setKeyBuffer(n *node, keyBuffer []byte) []byte { + keyBuffer = v.setLengthForChildren(n, keyBuffer) + copy(keyBuffer, n.key.value) + return keyBuffer +} + +// setLengthForChildren expands [keyBuffer] to have sufficient size for any of +// [n]'s child keys. +func (v *view) setLengthForChildren(n *node, keyBuffer []byte) []byte { + // Calculate the size of the largest child key of this node. + var maxBitLength int + for _, childEntry := range n.children { + maxBitLength = max(maxBitLength, childEntry.compressedKey.length) + } + maxBytesNeeded := bytesNeeded(n.key.length + v.tokenSize + maxBitLength) + return setBytesLength(keyBuffer, maxBytesNeeded) +} + +func setBytesLength(b []byte, size int) []byte { + if size <= cap(b) { + return b[:size] + } + return append(b[:cap(b)], make([]byte, size-cap(b))...) } // GetProof returns a proof that [bytesPath] is in or not in trie [t]. @@ -307,7 +436,7 @@ func (v *view) GetProof(ctx context.Context, key []byte) (*Proof, error) { _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.GetProof") defer span.End() - if err := v.calculateNodeIDs(ctx); err != nil { + if err := v.applyValueChanges(ctx); err != nil { return nil, err } @@ -333,7 +462,7 @@ func (v *view) GetRangeProof( _, span := v.db.infoTracer.Start(ctx, "MerkleDB.view.GetRangeProof") defer span.End() - if err := v.calculateNodeIDs(ctx); err != nil { + if err := v.applyValueChanges(ctx); err != nil { return nil, err } result, err := getRangeProof(v, start, end, maxLength) @@ -369,13 +498,13 @@ func (v *view) commitToDB(ctx context.Context) error { )) defer span.End() - // Call this here instead of in [v.db.commitChanges] - // because doing so there would be a deadlock. - if err := v.calculateNodeIDs(ctx); err != nil { + // Call this here instead of in [v.db.commitView] because doing so there + // would be a deadlock. + if err := v.applyValueChanges(ctx); err != nil { return err } - if err := v.db.commitChanges(ctx, v); err != nil { + if err := v.db.commitView(ctx, v); err != nil { return err } @@ -417,7 +546,7 @@ func (v *view) updateParent(newParent View) { // GetMerkleRoot returns the ID of the root of this view. func (v *view) GetMerkleRoot(ctx context.Context) (ids.ID, error) { - if err := v.calculateNodeIDs(ctx); err != nil { + if err := v.applyValueChanges(ctx); err != nil { return ids.Empty, err } return v.changes.rootID, nil @@ -463,15 +592,15 @@ func (v *view) getValue(key Key) ([]byte, error) { } if change, ok := v.changes.values[key]; ok { - v.db.metrics.ViewValueCacheHit() + v.db.metrics.ViewChangesValueHit() if change.after.IsNothing() { return nil, database.ErrNotFound } return change.after.Value(), nil } - v.db.metrics.ViewValueCacheMiss() + v.db.metrics.ViewChangesValueMiss() - // if we don't have local copy of the key, then grab a copy from the parent trie + // if we don't have local copy of the value, then grab a copy from the parent trie value, err := v.getParentTrie().getValue(key) if err != nil { return nil, err @@ -485,9 +614,9 @@ func (v *view) getValue(key Key) ([]byte, error) { return value, nil } -// Must not be called after [calculateNodeIDs] has returned. +// Must not be called after [applyValueChanges] has returned. func (v *view) remove(key Key) error { - if v.nodesAlreadyCalculated.Get() { + if v.valueChangesApplied.Get() { return ErrNodesAlreadyCalculated } @@ -519,11 +648,12 @@ func (v *view) remove(key Key) error { return err } - nodeToDelete.setValue(maybe.Nothing[[]byte]()) + hadValue := nodeToDelete.hasValue() + nodeToDelete.setValue(v.db.hasher, maybe.Nothing[[]byte]()) // if the removed node has no children, the node can be removed from the trie if len(nodeToDelete.children) == 0 { - if err := v.recordNodeDeleted(nodeToDelete); err != nil { + if err := v.recordNodeDeleted(nodeToDelete, hadValue); err != nil { return err } @@ -541,21 +671,18 @@ func (v *view) remove(key Key) error { return v.compressNodePath(grandParent, parent) } - // merge this node and its descendants into a single node if possible + // merge this node and its parent into a single node if possible return v.compressNodePath(parent, nodeToDelete) } -// Merges together nodes in the inclusive descendants of [n] that -// have no value and a single child into one node with a compressed -// path until a node that doesn't meet those criteria is reached. -// [parent] is [n]'s parent. If [parent] is nil, [n] is the root -// node and [v.root] is updated to [n]. +// Merges [n] with its [parent] if [n] has only one child and no value. +// If [parent] is nil, [n] is the root node and [v.root] is updated to [n]. // Assumes at least one of the following is true: // * [n] has a value. // * [n] has children. -// Must not be called after [calculateNodeIDs] has returned. +// Must not be called after [applyValueChanges] has returned. func (v *view) compressNodePath(parent, n *node) error { - if v.nodesAlreadyCalculated.Get() { + if v.valueChangesApplied.Get() { return ErrNodesAlreadyCalculated } @@ -563,7 +690,8 @@ func (v *view) compressNodePath(parent, n *node) error { return nil } - if err := v.recordNodeDeleted(n); err != nil { + // We know from above that [n] has no value. + if err := v.recordNodeDeleted(n, false /* hasValue */); err != nil { return err } @@ -620,19 +748,19 @@ func (v *view) getEditableNode(key Key, hadValue bool) (*node, error) { } // insert a key/value pair into the correct node of the trie. -// Must not be called after [calculateNodeIDs] has returned. +// Must not be called after [applyValueChanges] has returned. func (v *view) insert( key Key, value maybe.Maybe[[]byte], ) (*node, error) { - if v.nodesAlreadyCalculated.Get() { + if v.valueChangesApplied.Get() { return nil, ErrNodesAlreadyCalculated } if v.root.IsNothing() { // the trie is empty, so create a new root node. root := newNode(key) - root.setValue(value) + root.setValue(v.db.hasher, value) v.root = maybe.Some(root) return root, v.recordNewNode(root) } @@ -654,8 +782,9 @@ func (v *view) insert( commonPrefixLength = getLengthOfCommonPrefix(oldRoot.key, key, 0 /*offset*/, v.tokenSize) commonPrefix = oldRoot.key.Take(commonPrefixLength) newRoot = newNode(commonPrefix) - oldRootID = oldRoot.calculateID(v.db.metrics) + oldRootID = v.db.hasher.HashNode(oldRoot) ) + v.db.metrics.HashCalculated() // Call addChildWithID instead of addChild so the old root is added // to the new root with the correct ID. @@ -674,7 +803,7 @@ func (v *view) insert( // a node with that exact key already exists so update its value if closestNode.key == key { - closestNode.setValue(value) + closestNode.setValue(v.db.hasher, value) // closestNode was already marked as changed in the ancestry loop above return closestNode, nil } @@ -687,7 +816,7 @@ func (v *view) insert( if !hasChild { // there are no existing nodes along the key [key], so create a new node to insert [value] newNode := newNode(key) - newNode.setValue(value) + newNode.setValue(v.db.hasher, value) closestNode.addChild(newNode, v.tokenSize) return newNode, v.recordNewNode(newNode) } @@ -720,12 +849,12 @@ func (v *view) insert( if key.length == branchNode.key.length { // the branch node has exactly the key to be inserted as its key, so set the value on the branch node - branchNode.setValue(value) + branchNode.setValue(v.db.hasher, value) } else { // the key to be inserted is a child of the branch node // create a new node and add the value to it newNode := newNode(key) - newNode.setValue(value) + newNode.setValue(v.db.hasher, value) branchNode.addChild(newNode, v.tokenSize) if err := v.recordNewNode(newNode); err != nil { return nil, err @@ -755,28 +884,28 @@ func getLengthOfCommonPrefix(first, second Key, secondOffset int, tokenSize int) } // Records that a node has been created. -// Must not be called after [calculateNodeIDs] has returned. +// Must not be called after [applyValueChanges] has returned. func (v *view) recordNewNode(after *node) error { return v.recordKeyChange(after.key, after, after.hasValue(), true /* newNode */) } // Records that an existing node has been changed. -// Must not be called after [calculateNodeIDs] has returned. +// Must not be called after [applyValueChanges] has returned. func (v *view) recordNodeChange(after *node) error { return v.recordKeyChange(after.key, after, after.hasValue(), false /* newNode */) } // Records that the node associated with the given key has been deleted. -// Must not be called after [calculateNodeIDs] has returned. -func (v *view) recordNodeDeleted(after *node) error { - return v.recordKeyChange(after.key, nil, after.hasValue(), false /* newNode */) +// Must not be called after [applyValueChanges] has returned. +func (v *view) recordNodeDeleted(after *node, hadValue bool) error { + return v.recordKeyChange(after.key, nil, hadValue, false /* newNode */) } // Records that the node associated with the given key has been changed. // If it is an existing node, record what its value was before it was changed. -// Must not be called after [calculateNodeIDs] has returned. +// Must not be called after [applyValueChanges] has returned. func (v *view) recordKeyChange(key Key, after *node, hadValue bool, newNode bool) error { - if v.nodesAlreadyCalculated.Get() { + if v.valueChangesApplied.Get() { return ErrNodesAlreadyCalculated } @@ -805,10 +934,10 @@ func (v *view) recordKeyChange(key Key, after *node, hadValue bool, newNode bool // Records that a key's value has been added or updated. // Doesn't actually change the trie data structure. -// That's deferred until we call [calculateNodeIDs]. -// Must not be called after [calculateNodeIDs] has returned. +// That's deferred until we call [applyValueChanges]. +// Must not be called after [applyValueChanges] has returned. func (v *view) recordValueChange(key Key, value maybe.Maybe[[]byte]) error { - if v.nodesAlreadyCalculated.Get() { + if v.valueChangesApplied.Get() { return ErrNodesAlreadyCalculated } @@ -845,12 +974,13 @@ func (v *view) recordValueChange(key Key, value maybe.Maybe[[]byte]) error { func (v *view) getNode(key Key, hasValue bool) (*node, error) { // check for the key within the changed nodes if nodeChange, isChanged := v.changes.nodes[key]; isChanged { - v.db.metrics.ViewNodeCacheHit() + v.db.metrics.ViewChangesNodeHit() if nodeChange.after == nil { return nil, database.ErrNotFound } return nodeChange.after, nil } + v.db.metrics.ViewChangesNodeMiss() // get the node from the parent trie and store a local copy return v.getParentTrie().getEditableNode(key, hasValue) diff --git a/x/merkledb/view_test.go b/x/merkledb/view_test.go new file mode 100644 index 000000000000..f321dffd511b --- /dev/null +++ b/x/merkledb/view_test.go @@ -0,0 +1,105 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "encoding/binary" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +var hashChangedNodesTests = []struct { + name string + numKeys uint64 + expectedRootHash string +}{ + { + name: "1", + numKeys: 1, + expectedRootHash: "2A4DRkSWbTvSxgA1UMGp1Mpt1yzMFaeMMiDnrijVGJXPcRYiD4", + }, + { + name: "10", + numKeys: 10, + expectedRootHash: "2PGy7QvbYwVwn5QmLgj4KBgV2BisanZE8Nue2SxK9ffybb4mAn", + }, + { + name: "100", + numKeys: 100, + expectedRootHash: "LCeS4DWh6TpNKWH4ke9a2piSiwwLbmxGUj8XuaWx1XDGeCMAv", + }, + { + name: "1000", + numKeys: 1000, + expectedRootHash: "2S6f84wdRHmnx51mj35DF2owzf8wio5pzNJXfEWfFYFNxUB64T", + }, + { + name: "10000", + numKeys: 10000, + expectedRootHash: "wF6UnhaDoA9fAqiXAcx27xCYBK2aspDBEXkicmC7rs8EzLCD8", + }, + { + name: "100000", + numKeys: 100000, + expectedRootHash: "2Dy3RWZeNDUnUvzXpruB5xdp1V7xxb14M53ywdZVACDkdM66M1", + }, +} + +func makeViewForHashChangedNodes(t require.TestingT, numKeys uint64, parallelism uint) *view { + config := newDefaultConfig() + config.RootGenConcurrency = parallelism + db, err := newDatabase( + context.Background(), + memdb.New(), + config, + &mockMetrics{}, + ) + require.NoError(t, err) + + ops := make([]database.BatchOp, 0, numKeys) + for i := uint64(0); i < numKeys; i++ { + k := binary.AppendUvarint(nil, i) + ops = append(ops, database.BatchOp{ + Key: k, + Value: hashing.ComputeHash256(k), + }) + } + + ctx := context.Background() + viewIntf, err := db.NewView(ctx, ViewChanges{BatchOps: ops}) + require.NoError(t, err) + + view := viewIntf.(*view) + require.NoError(t, view.calculateNodeChanges(ctx)) + return view +} + +func Test_HashChangedNodes(t *testing.T) { + for _, test := range hashChangedNodesTests { + t.Run(test.name, func(t *testing.T) { + view := makeViewForHashChangedNodes(t, test.numKeys, 16) + ctx := context.Background() + view.hashChangedNodes(ctx) + require.Equal(t, test.expectedRootHash, view.changes.rootID.String()) + }) + } +} + +func Benchmark_HashChangedNodes(b *testing.B) { + for _, test := range hashChangedNodesTests { + view := makeViewForHashChangedNodes(b, test.numKeys, 1) + ctx := context.Background() + b.Run(test.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + view.hashChangedNodes(ctx) + } + }) + } +} diff --git a/x/merkledb/wait_group.go b/x/merkledb/wait_group.go new file mode 100644 index 000000000000..01f26403e90d --- /dev/null +++ b/x/merkledb/wait_group.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "sync" + +// waitGroup is a small wrapper of a sync.WaitGroup that avoids performing a +// memory allocation when Add is never called. +type waitGroup struct { + wg *sync.WaitGroup +} + +func (wg *waitGroup) Add(delta int) { + if wg.wg == nil { + wg.wg = new(sync.WaitGroup) + } + wg.wg.Add(delta) +} + +func (wg *waitGroup) Wait() { + if wg.wg != nil { + wg.wg.Wait() + } +} diff --git a/x/merkledb/wait_group_test.go b/x/merkledb/wait_group_test.go new file mode 100644 index 000000000000..2993a9fb2a2c --- /dev/null +++ b/x/merkledb/wait_group_test.go @@ -0,0 +1,29 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "testing" + +func Benchmark_WaitGroup_Wait(b *testing.B) { + for i := 0; i < b.N; i++ { + var wg waitGroup + wg.Wait() + } +} + +func Benchmark_WaitGroup_Add(b *testing.B) { + for i := 0; i < b.N; i++ { + var wg waitGroup + wg.Add(1) + } +} + +func Benchmark_WaitGroup_AddDoneWait(b *testing.B) { + for i := 0; i < b.N; i++ { + var wg waitGroup + wg.Add(1) + wg.wg.Done() + wg.Wait() + } +} diff --git a/x/sync/client.go b/x/sync/client.go index 7a71f1d4435e..cc983db3fcbe 100644 --- a/x/sync/client.go +++ b/x/sync/client.go @@ -72,6 +72,7 @@ type client struct { log logging.Logger metrics SyncMetrics tokenSize int + hasher merkledb.Hasher } type ClientConfig struct { @@ -80,18 +81,25 @@ type ClientConfig struct { Log logging.Logger Metrics SyncMetrics BranchFactor merkledb.BranchFactor + // If not specified, [merkledb.DefaultHasher] will be used. + Hasher merkledb.Hasher } func NewClient(config *ClientConfig) (Client, error) { if err := config.BranchFactor.Valid(); err != nil { return nil, err } + hasher := config.Hasher + if hasher == nil { + hasher = merkledb.DefaultHasher + } return &client{ networkClient: config.NetworkClient, stateSyncNodes: config.StateSyncNodeIDs, log: config.Log, metrics: config.Metrics, tokenSize: merkledb.BranchFactorToTokenSize[config.BranchFactor], + hasher: hasher, }, nil } @@ -168,6 +176,7 @@ func (c *client) GetChangeProof( endKey, req.EndRootHash, c.tokenSize, + c.hasher, ) if err != nil { return nil, err @@ -206,6 +215,7 @@ func verifyRangeProof( end maybe.Maybe[[]byte], rootBytes []byte, tokenSize int, + hasher merkledb.Hasher, ) error { root, err := ids.ToID(rootBytes) if err != nil { @@ -226,6 +236,7 @@ func verifyRangeProof( end, root, tokenSize, + hasher, ); err != nil { return fmt.Errorf("%w due to %w", errInvalidRangeProof, err) } @@ -265,6 +276,7 @@ func (c *client) GetRangeProof( maybeBytesToMaybe(req.EndKey), req.RootHash, c.tokenSize, + c.hasher, ); err != nil { return nil, err } diff --git a/x/sync/network_client.go b/x/sync/network_client.go index 15f59cc5885a..18530d1c4e7c 100644 --- a/x/sync/network_client.go +++ b/x/sync/network_client.go @@ -286,7 +286,14 @@ func (c *networkClient) sendRequestLocked( // Send an app request to the peer. nodeIDs := set.Of(nodeID) - if err := c.appSender.SendAppRequest(ctx, nodeIDs, requestID, request); err != nil { + // Cancellation is removed from this context to avoid erroring unexpectedly. + // SendAppRequest should be non-blocking and any error other than context + // cancellation is unexpected. + // + // This guarantees that the network should never receive an unexpected + // AppResponse. + ctxWithoutCancel := context.WithoutCancel(ctx) + if err := c.appSender.SendAppRequest(ctxWithoutCancel, nodeIDs, requestID, request); err != nil { c.lock.Unlock() c.log.Fatal("failed to send app request", zap.Stringer("nodeID", nodeID),