diff --git a/.gitignore b/.gitignore index 3d251ce..bf4d05a 100644 --- a/.gitignore +++ b/.gitignore @@ -27,9 +27,9 @@ block-signer-config.json /ton-node-se/pub-key /ton-node-se/log_cfg.yml /ton-node-se/tonos-se-tests -workchains docker/.DS_Store readme.html /dev/node_modules/ /dev/package-lock.json /node/target/ +/target/ diff --git a/CHANGELOG.md b/CHANGELOG.md index b5d3f1d..da8fa73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,19 @@ # Release Notes All notable changes to this project will be documented in this file. +## 0.37.0 Jan 11, 2023 + +### New + +- Produces masterchain block for each workchain block. +- Added ability to update blockchain config with node restart (without killing the database). After [changing the config](https://github.com/tonlabs/evernode-se#how-to-change-the-blockchain-configuration) stop and start the node to apply it. It will produce new key block from the new config. +- `CapSignatureWithId` capability is supported: `global_id` parameter is used as a `signature_id` if `CapSignatureWithId` cap is turned + on in the blockchain config. + +### Fixed +- `global_id` parameter in `ton-node.conf.json` is written into blocks. + + ## 0.36.3 Jan 11, 2023 ### Fixed diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..c84c873 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,7 @@ +[workspace] +members = [ + "node", +] + +exclude = [ +] diff --git a/contracts/README.md b/contracts/README.md index 2111274..6f88f7d 100644 --- a/contracts/README.md +++ b/contracts/README.md @@ -1,5 +1,10 @@ # Smart Contracts, used in Evernode SE -* [Giver v2](giver_v2) -* [SafeMultisig Wallet](safe_multisig) + +|Giver type | Address | Comment +| --------------- | -------------------------------------------------------------------|------ +| [GiverV3](giver_v3) | 0:96137b99dcd65afce5a54a48dac83c0fd276432abbe3ba7f1bfb0fb795e69025 | Recommented Giver version +| [SafeMultisig](safe_multisig) | 0:d5f5cfc4b52d2eb1bd9d3a8e51707872c7ce0c174facddd0e06ae5ffd17d2fcd | Safe Multisig as Giver. +| [GiverV2](giver_v2) | 0:ece57bcc6c530283becbbd8a3b24d3c5987cdddc3c8b7b33be6e4a6312490415 | Deprecated. Recommended to use GiverV3 +| GiverV1 | 0:841288ed3b55d9cdafa806807f02a0ae0c169aa5edfe88a789a6482429756a94 | Deprecated. Recommended to use GiverV3 diff --git a/contracts/giver_v2/README.md b/contracts/giver_v2/README.md index 727f6a3..00dddab 100644 --- a/contracts/giver_v2/README.md +++ b/contracts/giver_v2/README.md @@ -1,11 +1,12 @@ # Giver v2 -This directory contains Giver v2 (ABI v2) contract. This giver is recommended over Giver v1 because ABI v1 is deprecated. +This directory contains Giver v2 (ABI v2) contract. In Evernode SE this giver is predeployed at `0:ece57bcc6c530283becbbd8a3b24d3c5987cdddc3c8b7b33be6e4a6312490415` address and its initial balance is about 5 billion tokens. -If you want to compile and deploy your own v2 giver you can find its fresh version in `giver_for_Sol_0.59.0_abi.2.2` folder that is compatible with the latest compiler version. We do not deploy this giver to SE because it has the same ABI, but will cause a breaking change because a lot of tools and scripts use the old `0:ece57bc...` address. +It is not recommented to use in production or recompile Giver V2 because its works on old Solidity version. +If you want to make changes to the Giver contract or use it in production - use [Giver V3](../giver_v3) version which can be successfully compiled with the latest Solidity compiler. ## Keys: * Public: `2ada2e65ab8eeab09490e3521415f45b6e42df9c760a639bcf53957550b25a16` @@ -24,7 +25,7 @@ parameters: tonos-cli call 0:ece57bcc6c530283becbbd8a3b24d3c5987cdddc3c8b7b33be6e4a6312490415 \ sendTransaction '{"dest":"
","value":,"bounce":false}' \ --abi GiverV2.abi.json \ - --sign GiverV2.keys.json + --sign seGiver.keys.json ``` diff --git a/contracts/giver_v2/GiverV2.keys.json b/contracts/giver_v2/seGiver.keys.json similarity index 100% rename from contracts/giver_v2/GiverV2.keys.json rename to contracts/giver_v2/seGiver.keys.json diff --git a/contracts/giver_v3/GiverV3.abi.json b/contracts/giver_v3/GiverV3.abi.json index a75bcf7..7749f9f 100644 --- a/contracts/giver_v3/GiverV3.abi.json +++ b/contracts/giver_v3/GiverV3.abi.json @@ -1,49 +1,49 @@ { - "ABI version": 2, - "version": "2.2", - "header": ["time", "expire"], - "functions": [ - { - "name": "sendTransaction", - "inputs": [ - {"name":"dest","type":"address"}, - {"name":"value","type":"uint128"}, - {"name":"bounce","type":"bool"} - ], - "outputs": [ - ] - }, - { - "name": "getMessages", - "inputs": [ - ], - "outputs": [ - {"components":[{"name":"hash","type":"uint256"},{"name":"expireAt","type":"uint64"}],"name":"messages","type":"tuple[]"} - ] - }, - { - "name": "upgrade", - "inputs": [ - {"name":"newcode","type":"cell"} - ], - "outputs": [ - ] - }, - { - "name": "constructor", - "inputs": [ - ], - "outputs": [ - ] - } - ], - "data": [ - ], - "events": [ - ], - "fields": [ - {"name":"_pubkey","type":"uint256"}, - {"name":"_constructorFlag","type":"bool"}, - {"name":"m_messages","type":"map(uint256,uint64)"} - ] + "ABI version": 2, + "version": "2.3", + "header": ["time", "expire"], + "functions": [ + { + "name": "sendTransaction", + "inputs": [ + {"name":"dest","type":"address"}, + {"name":"value","type":"uint128"}, + {"name":"bounce","type":"bool"} + ], + "outputs": [ + ] + }, + { + "name": "getMessages", + "inputs": [ + ], + "outputs": [ + {"components":[{"name":"hash","type":"uint256"},{"name":"expireAt","type":"uint32"}],"name":"messages","type":"tuple[]"} + ] + }, + { + "name": "upgrade", + "inputs": [ + {"name":"newcode","type":"cell"} + ], + "outputs": [ + ] + }, + { + "name": "constructor", + "inputs": [ + ], + "outputs": [ + ] + } + ], + "data": [ + ], + "events": [ + ], + "fields": [ + {"name":"_pubkey","type":"uint256"}, + {"name":"_constructorFlag","type":"bool"}, + {"name":"m_messages","type":"map(uint256,uint32)"} + ] } diff --git a/contracts/giver_v3/GiverV3.sol b/contracts/giver_v3/GiverV3.sol index 5f08ed4..c1f69e1 100644 --- a/contracts/giver_v3/GiverV3.sol +++ b/contracts/giver_v3/GiverV3.sol @@ -1,4 +1,4 @@ -pragma ton-solidity >= 0.59.0; +pragma ever-solidity >= 0.61.2; pragma AbiHeader time; pragma AbiHeader expire; @@ -22,7 +22,7 @@ abstract contract Upgradable { contract GiverV3 is Upgradable { uint8 constant MAX_CLEANUP_MSGS = 30; - mapping(uint256 => uint64) m_messages; + mapping(uint256 => uint32) m_messages; modifier acceptOnlyOwner { require(msg.pubkey() == tvm.pubkey(), 101); @@ -49,19 +49,19 @@ contract GiverV3 is Upgradable { /// @notice Function with predefined name called after signature check. Used to /// implement custom replay protection with parallel access. - function afterSignatureCheck(TvmSlice body, TvmCell message) private inline + function afterSignatureCheck(TvmSlice body, TvmCell) private inline returns (TvmSlice) { // owner check require(msg.pubkey() == tvm.pubkey(), 101); + uint256 bodyHash = tvm.hash(body); // load and drop message timestamp (uint64) - (, uint64 expireAt) = body.decode(uint64, uint32); + (, uint32 expireAt) = body.decode(uint64, uint32); require(expireAt > now, 57); - uint256 msgHash = tvm.hash(message); - require(!m_messages.exists(msgHash), 102); + require(!m_messages.exists(bodyHash), 102); tvm.accept(); - m_messages[msgHash] = expireAt; + m_messages[bodyHash] = expireAt; return body; } @@ -69,13 +69,13 @@ contract GiverV3 is Upgradable { /// @notice Allows to delete expired messages from dict. function gc() private inline { uint counter = 0; - for ((uint256 msgHash, uint64 expireAt) : m_messages) { + for ((uint256 bodyHash, uint32 expireAt) : m_messages) { if (counter >= MAX_CLEANUP_MSGS) { break; } counter++; if (expireAt <= now) { - delete m_messages[msgHash]; + delete m_messages[bodyHash]; } } } @@ -85,10 +85,10 @@ contract GiverV3 is Upgradable { */ struct Message { uint256 hash; - uint64 expireAt; + uint32 expireAt; } function getMessages() public view returns (Message[] messages) { - for ((uint256 msgHash, uint64 expireAt) : m_messages) { + for ((uint256 msgHash, uint32 expireAt) : m_messages) { messages.push(Message(msgHash, expireAt)); } } diff --git a/contracts/giver_v3/GiverV3.tvc b/contracts/giver_v3/GiverV3.tvc index 8e31859..96a3c21 100644 Binary files a/contracts/giver_v3/GiverV3.tvc and b/contracts/giver_v3/GiverV3.tvc differ diff --git a/contracts/giver_v3/README.md b/contracts/giver_v3/README.md index 056cd5d..fdab8c1 100644 --- a/contracts/giver_v3/README.md +++ b/contracts/giver_v3/README.md @@ -1,14 +1,11 @@ # Giver v3 -This directory contains Giver v3 (ABI v2.2) contract. This giver is recommended over Giver v1 or v2. +This directory contains Giver v3 contract. This giver is recommended to use with solc version above 0.61.2 to deploy it on devnet or mainnet. -In Evernode SE this giver is predeployed (since version `TODO`) at `0:78fbd6980c10cf41401b32e9b51810415e7578b52403af80dae68ddf99714498` address +In Evernode SE this giver is predeployed at `0:96137b99dcd65afce5a54a48dac83c0fd276432abbe3ba7f1bfb0fb795e69025` address and its initial balance is about 5 billion tokens. ## Keys: - -> ⚠ Using only local and dev environment - * Public: `2ada2e65ab8eeab09490e3521415f45b6e42df9c760a639bcf53957550b25a16` * Secret: `172af540e43a524763dd53b26a066d472a97c4de37d5498170564510608250c3` @@ -21,46 +18,72 @@ parameters: * `bounce`: `bool` - bounce flag of the message. ### Using tonos-cli: -```shell -npx tonos-cli call 0:78fbd6980c10cf41401b32e9b51810415e7578b52403af80dae68ddf99714498 \ +```commandline +tonos-cli call 0:cbd3ef42dcc261b9369fdb15fb836b460d4806c5b255e31541c46d6676a2f13d \ sendTransaction '{"dest":"
","value":,"bounce":false}' \ - --abi GiverV2.abi.json \ - --sign GiverV2.keys.json + --abi GiverV3.abi.json \ + --sign seGiver.keys.json ``` -## Self deploy +## How to deploy GiverV3 on any network -### Compile +### Setup your signer +You can skip this step if you already have one. ```shell -npx everdev sol set --compiler 0.59.4 --linker 0.15.24 -npx everdev sol compile GiverV3.sol +npx everdev signer add devnet_giver_keys +npx everdev signer default devnet_giver_keys ``` -> ℹ️ For compiler v0.59.4 and linker v0.15.2 code hash is `726aec999006a2e036af36c46024237acb946c13b4d4b3e1ad3b4ad486d564b1` +### [Optional] Verify Giver contract bytecode +This contract is compiled with `0.66.0 ` Solidity and `0.19.3`Linker version. + +To check that the code hash of the compiled version from repository is equal to your freshly compiled version, run this command and check that the *Code Hash* is +`57a1e5e4304f4db2beb23117e4d85df9cb5caec127531350e73219a8b8dc8afd`. -### Setup yore signer ```shell -npx everdev signer add -npx everdev signer default +npx everdev sol set --compiler 0.66.0 --linker 0.19.3 +npx everdev sol compile GiverV3.sol +npx everdev contract info --signer devnet_giver_keys GiverV3 + +Configuration + + Network: se (http://localhost) + Signer: devnet_giver_keys (public 7fbbd813ac8358ed2d8598de156eb62bdddf5191d6ce4a0f307d4eac8d4c8e16) + +Address: 0:dd39b607834a23f7091d4d6d8982c6269c1d71f1b512757cf4d298325a550b6a (calculated from TVC and signer public) +Code Hash: 57a1e5e4304f4db2beb23117e4d85df9cb5caec127531350e73219a8b8dc8afd (from TVC file) ``` -### Get `` and topup it +### Get your Giver address and top it up +The address is calculated from the compiled contract codehash and your public key. +Run this command to see the *Address*: ```shell -npx everdev contract info GiverV3 +npx everdev contract info --signer devnet_giver_keys GiverV3 ``` +Now, you need to top up your giver. Transfer tokens from Surf wallet or Everwallet. -### Deploy + +### Deploy your Giver +After you topped up Giver address, you can deploy it. +Run this command: ```shell npx everdev contract deploy GiverV3 ``` -### Setup yore Giver +### Setup your Giver +Run this command to set up the giver for your network. + ```shell -everdev network giver --signer --type GiverV3 +npx everdev network giver --signer --type GiverV3 +``` + +### Using your Giver +This command under the hood will use predefined signer and configured giver on the default network. +``` +npx everdev contract topup -a `` -v `` ``` ## Files * ABI: [GiverV3.abi.json](GiverV3.abi.json) -* Keypair: [GiverV3.keys.json](GiverV3.keys.json) * Source code: [GiverV3.sol](GiverV3.sol) * TVC file: [GiverV3.tvc](GiverV3.tvc) diff --git a/contracts/giver_v3/GiverV3.keys.json b/contracts/giver_v3/seGiver.keys.json similarity index 100% rename from contracts/giver_v3/GiverV3.keys.json rename to contracts/giver_v3/seGiver.keys.json diff --git a/contracts/giver_v4/.gitignore b/contracts/giver_v4/.gitignore deleted file mode 100644 index a6c7c28..0000000 --- a/contracts/giver_v4/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.js diff --git a/contracts/giver_v4/GiverV4.abi.json b/contracts/giver_v4/GiverV4.abi.json deleted file mode 100644 index 3c1e19d..0000000 --- a/contracts/giver_v4/GiverV4.abi.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "ABI version": 2, - "version": "2.2", - "header": ["pubkey", "time", "expire"], - "functions": [ - { - "name": "sendTransaction", - "inputs": [ - {"name":"dest","type":"address"}, - {"name":"value","type":"uint128"}, - {"name":"bounce","type":"bool"} - ], - "outputs": [ - ] - }, - { - "name": "getMessages", - "inputs": [ - ], - "outputs": [ - {"components":[{"name":"hash","type":"uint256"},{"name":"expireAt","type":"uint32"}],"name":"messages","type":"tuple[]"} - ] - }, - { - "name": "upgrade", - "inputs": [ - {"name":"newcode","type":"cell"} - ], - "outputs": [ - ] - }, - { - "name": "constructor", - "inputs": [ - ], - "outputs": [ - ] - } - ], - "data": [ - ], - "events": [ - ], - "fields": [ - {"name":"_pubkey","type":"uint256"}, - {"name":"_constructorFlag","type":"bool"}, - {"name":"m_messages","type":"map(uint256,uint32)"} - ] -} diff --git a/contracts/giver_v4/GiverV4.sol b/contracts/giver_v4/GiverV4.sol deleted file mode 100644 index b882355..0000000 --- a/contracts/giver_v4/GiverV4.sol +++ /dev/null @@ -1,98 +0,0 @@ -pragma ever-solidity >= 0.61.2; -pragma AbiHeader time; -pragma AbiHeader expire; -pragma AbiHeader pubkey; - -abstract contract Upgradable { - /* - * Set code - */ - - function upgrade(TvmCell newcode) public virtual { - require(msg.pubkey() == tvm.pubkey(), 101); - tvm.accept(); - tvm.commit(); - tvm.setcode(newcode); - tvm.setCurrentCode(newcode); - onCodeUpgrade(); - } - - function onCodeUpgrade() internal virtual; -} - -contract GiverV3 is Upgradable { - - uint8 constant MAX_CLEANUP_MSGS = 30; - mapping(uint256 => uint32) m_messages; - - modifier acceptOnlyOwner { - require(msg.pubkey() == tvm.pubkey(), 101); - tvm.accept(); - _; - } - - /* - * Publics - */ - - /// @notice Allows to accept simple transfers. - receive() external {} - - /// @notice Transfers grams to other contracts. - function sendTransaction(address dest, uint128 value, bool bounce) public { - dest.transfer(value, bounce, 3); - gc(); - } - - /* - * Privates - */ - - /// @notice Function with predefined name called after signature check. Used to - /// implement custom replay protection with parallel access. - function afterSignatureCheck(TvmSlice body, TvmCell message) private inline - returns (TvmSlice) - { - // owner check - require(msg.pubkey() == tvm.pubkey(), 101); - // load and drop message timestamp (uint64) - (, uint32 expireAt) = body.decode(uint64, uint32); - require(expireAt > now, 57); - uint256 msgHash = tvm.hash(message); - require(!m_messages.exists(msgHash), 102); - - tvm.accept(); - m_messages[msgHash] = expireAt; - - return body; - } - - /// @notice Allows to delete expired messages from dict. - function gc() private inline { - uint counter = 0; - for ((uint256 msgHash, uint32 expireAt) : m_messages) { - if (counter >= MAX_CLEANUP_MSGS) { - break; - } - counter++; - if (expireAt <= now) { - delete m_messages[msgHash]; - } - } - } - - /* - * Get methods - */ - struct Message { - uint256 hash; - uint32 expireAt; - } - function getMessages() public view returns (Message[] messages) { - for ((uint256 msgHash, uint32 expireAt) : m_messages) { - messages.push(Message(msgHash, expireAt)); - } - } - - function onCodeUpgrade() internal override {} -} diff --git a/contracts/giver_v4/GiverV4.tvc b/contracts/giver_v4/GiverV4.tvc deleted file mode 100644 index 5c880dc..0000000 Binary files a/contracts/giver_v4/GiverV4.tvc and /dev/null differ diff --git a/contracts/giver_v4/README.md b/contracts/giver_v4/README.md deleted file mode 100644 index b6acb51..0000000 --- a/contracts/giver_v4/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# Giver v4 - -This directory contains Giver v4 (ABI v2.2) contract. This giver is recommended to use with solc version above 0.61.2 to deploy it on devnet or mainnet. - -## Self deploy contract on any network - -### Compile Giver contract -This contract is prepared for compilation by `solc_0_61_2` and `tvm_linker_0_17_3` or above. -```shell -npx everdev sol set --compiler 0.61.2 --linker 0.17.3 -npx everdev sol compile GiverV4.sol -``` - -### Setup your signer -You can skip this step if you already have one. But make sure the right signer linked to ``. -```shell -npx everdev signer add -npx everdev signer default -``` - -### Get your Giver `` -The address was calculated using the compiled contract codehash and your public key. -```shell -npx everdev contract info GiverV4 -``` - -### Deploy your Giver -Before deploy you need to transfer tokens to ``. -```shell -npx everdev contract deploy GiverV4 -``` - -### Setup your Giver -Before this step, make sure that you have configured the network with the correct endpoint, which contains your evercloud projectId. -```shell -npx everdev network giver --signer --type GiverV4 -``` - -### Using your Giver -This command under the hood will use predefined signer and configured giver on the default network. -``` -npx everdev contract topup -a `` -v `` -``` - -## Files -* ABI: [GiverV4.abi.json](GiverV4.abi.json) -* Source code: [GiverV4.sol](GiverV4.sol) -* TVC file: [GiverV4.tvc](GiverV4.tvc) diff --git a/dev/q-server.json b/dev/q-server.json index fcfb5dd..813c90a 100644 --- a/dev/q-server.json +++ b/dev/q-server.json @@ -9,38 +9,38 @@ }, "blockchain": { "accounts": [ - "http://root@localhost:8529" + "http://root@127.0.0.1:8529" ], "blocks": { "hot": [ - "http://root@localhost:8529" + "http://root@127.0.0.1:8529" ], "cold": [] }, "transactions": { "hot": [ - "http://root@localhost:8529" + "http://root@127.0.0.1:8529" ], "cold": [] }, - "zerostate": "http://root@localhost:8529" + "zerostate": "http://root@127.0.0.1:8529" }, "slowQueriesBlockchain": { "accounts": [ - "http://root@localhost:8529" + "http://root@127.0.0.1:8529" ], "blocks": { "hot": [ - "http://root@localhost:8529" + "http://root@127.0.0.1:8529" ], "cold": [] }, "transactions": { "hot": [ - "http://root@localhost:8529" + "http://root@127.0.0.1:8529" ], "cold": [] }, - "zerostate": "http://root@localhost:8529" + "zerostate": "http://root@127.0.0.1:8529" } } diff --git a/dev/reset-db.js b/dev/reset-db.js index 41d8caa..24a86b7 100644 --- a/dev/reset-db.js +++ b/dev/reset-db.js @@ -1,7 +1,9 @@ const { Database } = require("arangojs"); +const fs = require("fs"); +const path = require("path"); const config = { - url: "http://localhost:8529", + url: "http://127.0.0.1:8529", auth: { username: "root", password: "", @@ -21,9 +23,9 @@ const COLLECTIONS = { "workchain_id, key_block", "seq_no", "workchain_id, gen_utime", "workchain_id, tr_count", "gen_utime", - "mastermin_shard_gen_utime", - "prev_refroot_hash,_key", - "prev_alt_refroot_hash,_key", + "master.min_shard_gen_utime", + "prev_ref.root_hash,_key", + "prev_alt_ref.root_hash,_key", "tr_count, gen_utime", "chain_order", "gen_utime, chain_order", @@ -158,6 +160,12 @@ async function checkCollections(collections) { try { await checkBlockchainDb(); await checkCollections(COLLECTIONS); + fs.rmSync(path.resolve(__dirname, "..", "docker", "ton-node", "workchains"), + { + recursive: true, + force: true, + }, + ); } catch (err) { console.error(err); process.exit(1); diff --git a/docker/ton-node/ton-node.conf.json b/docker/ton-node/ton-node.conf.json index 44332e0..e267f87 100644 --- a/docker/ton-node/ton-node.conf.json +++ b/docker/ton-node/ton-node.conf.json @@ -1,38 +1,41 @@ { - "node_index": 0, - "port": 40301, - "poa_validators": 2, - "poa_interval": 1, - "private_key": "./private-key", - "keys": ["./pub-key"], - "boot": [], - "shard_id": { - "workchain": 0, - "shardchain_pfx": 0, - "shardchain_pfx_len": 0 - }, - "adnl": { - "address": "127.0.0.1:3030", - "keys": [ - { - "type_id": -1977122418, - "pub_key": "BBKmgGAxz4ZofRgMO2qhYt+K1bGlGeowukPONVAkOcU=", - "pvt_key": "3CFeiTSlGkJf3D8w3ZXS4QS+6/0p+MFZGuv0XYMvMRo=" - } - ] - }, + "node_index": 0, + "port": 40301, + "poa_validators": 2, + "poa_interval": 1, + "private_key": "./private-key", + "keys": [ + "./pub-key" + ], + "boot": [], + "global_id": 100, + "shard_id": { + "workchain": 0, + "shardchain_pfx": 0, + "shardchain_pfx_len": 0 + }, + "adnl": { + "address": "127.0.0.1:3030", + "keys": [ + { + "type_id": -1977122418, + "pub_key": "BBKmgGAxz4ZofRgMO2qhYt+K1bGlGeowukPONVAkOcU=", + "pvt_key": "3CFeiTSlGkJf3D8w3ZXS4QS+6/0p+MFZGuv0XYMvMRo=" + } + ] + }, "api": { "messages": "topics/requests", "live_control": "se", - "address": "127.0.0.1", - "port": 3000 + "address": "127.0.0.1", + "port": 3000 }, - "document_db": { - "server": "127.0.0.1:8529", - "database": "blockchain", - "blocks_collection": "blocks", - "messages_collection": "messages", - "transactions_collection": "transactions", - "accounts_collection": "accounts" - } + "document_db": { + "server": "127.0.0.1:8529", + "database": "blockchain", + "blocks_collection": "blocks", + "messages_collection": "messages", + "transactions_collection": "transactions", + "accounts_collection": "accounts" + } } diff --git a/docker/ton-node/workchains/WC0/shard_8000000000000000/zerostate b/docker/ton-node/workchains/WC0/shard_8000000000000000/zerostate new file mode 100644 index 0000000..ee3a427 Binary files /dev/null and b/docker/ton-node/workchains/WC0/shard_8000000000000000/zerostate differ diff --git a/node/Cargo.toml b/node/Cargo.toml index fa34e6a..c17e9f2 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -2,37 +2,36 @@ edition = '2021' build = 'build.rs' name = 'ton_node_startup' -version = '0.36.3' +version = '0.37.0' [dependencies] # External anyhow = '1.0' base64 = '0.13' -clap = '3.1' +clap = '3.2' ed25519-dalek = '1.0' failure = '0.1' hex = '0.4' -http = '0.1' +http = '0.2' iron = '0.6' jsonrpc-http-server = '10.0.1' lazy_static = '1.4.0' log = '0.4' -log4rs = '1.1' +log4rs = '1.2' num = '0.4' num-traits = '0.2' parking_lot = '0.12' rand = '0.8' -reqwest = '0.9.7' +reqwest = '0.9' router = '0.6.0' serde = '1.0' serde_derive = '1.0' -serde_json = { features = [ 'preserve_order' ], version = '1.0' } +serde_json = { features = ['preserve_order'], version = '1.0' } thiserror = '1.0' # Domestic -ton_block = { git = 'https://github.com/tonlabs/ton-labs-block', tag = '1.9.14' } -ton_block_json = { git = 'https://github.com/tonlabs/ton-labs-block-json.git', tag = '0.7.79' } -ton_executor = { git = 'https://github.com/tonlabs/ton-labs-executor', tag = '1.15.145' } -ton_types = { git = 'https://github.com/tonlabs/ton-labs-types', tag = '1.12.4' } -ton_vm = { git = 'https://github.com/tonlabs/ton-labs-vm', tag = '1.8.89', features = ['gosh'] } - +ton_block = { git = 'https://github.com/tonlabs/ever-block', tag = '1.9.28' } +ton_block_json = { git = 'https://github.com/tonlabs/ever-block-json', tag = '0.7.95' } +ton_executor = { git = 'https://github.com/tonlabs/ever-executor', tag = '1.15.175', features = ['signature_with_id'] } +ton_types = { git = 'https://github.com/tonlabs/ever-types', tag = '1.12.7' } +ton_vm = { git = 'https://github.com/tonlabs/ever-vm', tag = '1.8.116', features = ['gosh'] } diff --git a/node/data/deprecated_giver_abi2_deploy_msg.boc b/node/data/deprecated_giver_abi2_deploy_msg.boc deleted file mode 100644 index 5437b0e..0000000 Binary files a/node/data/deprecated_giver_abi2_deploy_msg.boc and /dev/null differ diff --git a/node/data/giver_abi1_deploy_msg.boc b/node/data/giver_abi1_deploy_msg.boc deleted file mode 100644 index 2d61080..0000000 Binary files a/node/data/giver_abi1_deploy_msg.boc and /dev/null differ diff --git a/node/data/giver_abi2_deploy_msg.boc b/node/data/giver_abi2_deploy_msg.boc deleted file mode 100644 index 3eb611d..0000000 Binary files a/node/data/giver_abi2_deploy_msg.boc and /dev/null differ diff --git a/node/data/safemultisig_deploy_msg.boc b/node/data/safemultisig_deploy_msg.boc deleted file mode 100644 index f1ea8b9..0000000 Binary files a/node/data/safemultisig_deploy_msg.boc and /dev/null differ diff --git a/node/src/api/message_api.rs b/node/src/api/message_api.rs index 996bdfa..9573258 100644 --- a/node/src/api/message_api.rs +++ b/node/src/api/message_api.rs @@ -14,7 +14,7 @@ * under the License. */ -use crate::engine::messages::{InMessagesQueue, QueuedMessage}; +use crate::engine::messages::InMessagesQueue; use crate::engine::MessagesReceiver; use crate::error::NodeResult; use iron::{ @@ -95,7 +95,7 @@ impl MessageReceiverApi { } }; - let mut message = QueuedMessage::with_message(message).unwrap(); + let mut message = message; while let Err(msg) = queue.queue(message) { if queue.has_delivery_problems() { log::warn!(target: "node", "Request was refused because downstream services are not accessible"); diff --git a/node/src/block/applier.rs b/node/src/block/applier.rs deleted file mode 100644 index e245054..0000000 --- a/node/src/block/applier.rs +++ /dev/null @@ -1,84 +0,0 @@ -/* -* Copyright 2018-2022 TON DEV SOLUTIONS LTD. -* -* Licensed under the SOFTWARE EVALUATION License (the "License"); you may not use -* this file except in compliance with the License. You may obtain a copy of the -* License at: -* -* https://www.ton.dev/licenses -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific TON DEV software governing permissions and limitations -* under the License. -*/ - -use crate::data::DocumentsDb; -use crate::NodeResult; -use std::sync::Arc; -use parking_lot::Mutex; -use ton_block::{BlkPrevInfo, Block, ShardStateUnsplit}; -use ton_types::UInt256; - -/// Trait for save finality states blockchain -pub trait BlockFinality { - fn put_block_with_info( - &mut self, - block: &Block, - shard_state: Arc, - ) -> NodeResult<()>; - - fn get_last_seq_no(&self) -> u32; - - fn get_last_block_info(&self) -> NodeResult; - - fn get_last_shard_state(&self) -> Arc; - - fn find_block_by_hash(&self, hash: &UInt256) -> u64; - - fn rollback_to(&mut self, hash: &UInt256) -> NodeResult<()>; - - fn get_raw_block_by_seqno(&self, seq_no: u32, vert_seq_no: u32) -> NodeResult>; - - fn get_last_finality_shard_hash(&self) -> NodeResult<(u64, UInt256)>; - - fn reset(&mut self) -> NodeResult<()>; -} - -/// Applies changes provided by new block to shard state (in memory instance) -/// and saves all changes into all kinds of storages -#[allow(dead_code)] -pub struct NewBlockApplier -where - F: BlockFinality, -{ - db: Arc, - finality: Arc>, -} - -impl NewBlockApplier -where - F: BlockFinality, -{ - /// Create new NewBlockApplier with given storages and shard state - pub fn with_params(finality: Arc>, db: Arc) -> Self { - NewBlockApplier { finality, db } - } - - /// Applies changes provided by given block, returns new shard state - pub fn apply( - &mut self, - block: &Block, - applied_shard: ShardStateUnsplit, - ) -> NodeResult> { - let mut finality = self.finality.lock(); - let new_shard_state = Arc::new(applied_shard); - - log::info!(target: "node", "Apply block seq_no = {}", block.read_info()?.seq_no()); - - finality.put_block_with_info(block, new_shard_state.clone())?; - - Ok(new_shard_state) - } -} diff --git a/node/src/block/builder.rs b/node/src/block/builder.rs index 689a820..6045854 100644 --- a/node/src/block/builder.rs +++ b/node/src/block/builder.rs @@ -19,19 +19,19 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use ton_block::{ Account, AddSub, Augmentation, BlkPrevInfo, Block, BlockExtra, BlockInfo, ComputeSkipReason, - CurrencyCollection, Deserializable, EnqueuedMsg, HashUpdate, HashmapAugType, InMsg, InMsgDescr, - MerkleUpdate, Message, MsgEnvelope, OutMsg, OutMsgDescr, OutMsgQueue, OutMsgQueueInfo, - OutMsgQueueKey, Serializable, ShardAccount, ShardAccountBlocks, ShardAccounts, ShardIdent, - ShardStateUnsplit, TrComputePhase, TrComputePhaseVm, Transaction, TransactionDescr, - TransactionDescrOrdinary, UnixTime32, ValueFlow, CopyleftRewards, + CopyleftRewards, CurrencyCollection, Deserializable, EnqueuedMsg, HashUpdate, HashmapAugType, + InMsg, InMsgDescr, MerkleUpdate, Message, MsgEnvelope, OutMsg, OutMsgDescr, OutMsgQueue, + OutMsgQueueInfo, OutMsgQueueKey, Serializable, ShardAccount, ShardAccountBlocks, ShardAccounts, + ShardIdent, ShardStateUnsplit, TrComputePhase, TrComputePhaseVm, Transaction, TransactionDescr, + TransactionDescrOrdinary, UnixTime32, ValueFlow, }; use ton_executor::{ BlockchainConfig, ExecuteParams, ExecutorError, OrdinaryTransactionExecutor, TransactionExecutor, }; -use ton_types::{error, AccountId, Cell, HashmapRemover, HashmapType, Result, UInt256, SliceData}; +use ton_types::{error, AccountId, Cell, HashmapRemover, HashmapType, Result, SliceData, UInt256}; -use crate::engine::{InMessagesQueue, QueuedMessage}; +use crate::engine::InMessagesQueue; use crate::error::NodeResult; #[cfg(test)] @@ -145,6 +145,7 @@ impl BlockBuilder { last_tr_lt: Arc::clone(<), seed_block: self.rand_seed.clone(), debug, + signature_id: self.shard_state.global_id(), ..Default::default() }, ); @@ -201,20 +202,23 @@ impl BlockBuilder { pub fn execute( &mut self, - msg: QueuedMessage, - blockchain_config: BlockchainConfig, + message: Message, + blockchain_config: &BlockchainConfig, acc_id: &AccountId, debug: bool, ) -> NodeResult<()> { let shard_acc = self.accounts.account(acc_id)?.unwrap_or_default(); let mut acc_root = shard_acc.account_cell(); - let executor = OrdinaryTransactionExecutor::new(blockchain_config); + let executor = OrdinaryTransactionExecutor::new((*blockchain_config).clone()); - log::debug!("Executing message {:x}", msg.message_hash()); + log::debug!( + "Executing message {:x}", + message.serialize().unwrap_or_default().repr_hash() + ); let (mut transaction, max_lt) = self.try_prepare_transaction( &executor, &mut acc_root, - msg.message(), + &message, shard_acc.last_trans_lt(), debug, )?; @@ -253,9 +257,9 @@ impl BlockBuilder { pub fn build_block( mut self, queue: &InMessagesQueue, - blockchain_config: BlockchainConfig, + blockchain_config: &BlockchainConfig, debug: bool, - ) -> NodeResult> { + ) -> NodeResult<(Block, ShardStateUnsplit, bool)> { let mut is_empty = true; // first import internal messages @@ -275,10 +279,11 @@ impl BlockBuilder { let env = enq.read_out_msg()?; let message = env.read_message()?; if let Some(acc_id) = message.int_dst_account_id() { - let msg = QueuedMessage::with_message(message)?; - self.execute(msg, blockchain_config.clone(), &acc_id, debug)?; + self.execute(message, blockchain_config, &acc_id, debug)?; } - self.out_queue_info.out_queue_mut().remove(SliceData::load_cell(key)?)?; + self.out_queue_info + .out_queue_mut() + .remove(SliceData::load_cell(key)?)?; // TODO: check block full is_empty = false; if self.total_gas_used > 1_000_000 { @@ -286,12 +291,13 @@ impl BlockBuilder { break; } } + let workchain_id = self.shard_state.shard().workchain_id(); // second import external messages - while let Some(msg) = queue.dequeue() { - let acc_id = msg.message().int_dst_account_id().unwrap(); + while let Some(msg) = queue.dequeue(workchain_id) { + let acc_id = msg.int_dst_account_id().unwrap(); // lock account in queue - let res = self.execute(msg, blockchain_config.clone(), &acc_id, debug); + let res = self.execute(msg, blockchain_config, &acc_id, debug); if let Err(err) = res { log::warn!(target: "node", "Executor execute failed. {}", err); } else { @@ -328,12 +334,7 @@ impl BlockBuilder { self.out_msg_descr .set(&msg_cell.repr_hash(), &out_msg, &out_msg.aug()?)?; } else { - self.execute( - QueuedMessage::with_message(message)?, - blockchain_config.clone(), - &acc_id, - debug, - )?; + self.execute(message, blockchain_config, &acc_id, debug)?; if self.total_gas_used > 1_000_000 { block_full = true; } @@ -343,15 +344,11 @@ impl BlockBuilder { } if !is_empty { log::info!(target: "node", "in messages queue len={}", queue.len()); - // let seq_no = self.block_info.seq_no(); - let (block, new_shard_state) = self.finalize_block()?; - // let block_text = ton_block_json::debug_block_full(&block)?; - // std::fs::write(&format!("e:\\block_{}.json", seq_no), block_text)?; - Ok(Some((block, new_shard_state))) } else { log::debug!(target: "node", "block is empty in messages queue len={}", queue.len()); - Ok(None) } + let (block, new_shard_state) = self.finalize_block()?; + Ok((block, new_shard_state, is_empty)) } /// @@ -366,9 +363,10 @@ impl BlockBuilder { self.account_blocks .add_serialized_transaction(&transaction, &tr_cell)?; - + if let Some(copyleft_reward) = transaction.copyleft_reward() { - self.copyleft_rewards.add_copyleft_reward(©left_reward.address, ©left_reward.reward)?; + self.copyleft_rewards + .add_copyleft_reward(©left_reward.address, ©left_reward.reward)?; } if let Some(msg_cell) = transaction.in_msg_cell() { @@ -439,7 +437,7 @@ impl BlockBuilder { /// Complete the construction of the block and return it. /// returns generated block and new shard state bag (and transaction count) /// - pub fn finalize_block(self) -> Result<(Block, ShardStateUnsplit)> { + pub fn finalize_block(mut self) -> Result<(Block, ShardStateUnsplit)> { let mut new_shard_state = self.shard_state.deref().clone(); new_shard_state.set_seq_no(self.block_info.seq_no()); new_shard_state.write_accounts(&self.accounts)?; @@ -467,8 +465,15 @@ impl BlockBuilder { // let old_ss_root = self.shard_state.serialize()?; // let state_update = MerkleUpdate::create(&old_ss_root, &new_ss_root)?; let state_update = MerkleUpdate::default(); - - let block = Block::with_params(0, self.block_info, value_flow, state_update, block_extra)?; + self.block_info.set_end_lt(self.end_lt.max(self.start_lt + 1)); + + let block = Block::with_params( + self.shard_state.global_id(), + self.block_info, + value_flow, + state_update, + block_extra, + )?; Ok((block, new_shard_state)) } } diff --git a/node/src/block/finality.rs b/node/src/block/finality.rs index 9a84bdb..3f07f4e 100644 --- a/node/src/block/finality.rs +++ b/node/src/block/finality.rs @@ -14,14 +14,13 @@ * under the License. */ -use crate::block::applier::BlockFinality; use crate::data::{ - BlocksStorage, DocumentsDb, FileBasedStorage, FinalityStorage, ShardStateInfo, - ShardStateStorage, TransactionsStorage, SerializedItem, + BlocksStorage, DocumentsDb, FileBasedStorage, FinalityStorage, SerializedItem, ShardStateInfo, + ShardStateStorage, TransactionsStorage, }; use crate::error::NodeError; use crate::NodeResult; -use std::collections::{HashSet, BTreeMap}; +use std::collections::{BTreeMap, HashSet}; use std::{ collections::HashMap, fs::{create_dir_all, File}, @@ -38,7 +37,7 @@ mod tests; lazy_static::lazy_static!( static ref ACCOUNT_NONE_HASH: UInt256 = Account::default().serialize().unwrap().repr_hash(); - pub static ref MINTER_ADDRESS: MsgAddressInt = + pub static ref MINTER_ADDRESS: MsgAddressInt = MsgAddressInt::AddrStd(MsgAddrStd::with_address(None, 0, [0; 32].into())); ); @@ -67,9 +66,11 @@ where db: Option>, current_block: Box, - blocks_by_hash: HashMap>, // need to remove - blocks_by_no: HashMap>, // need to remove - last_finalized_block: Box, + blocks_by_hash: HashMap>, + // need to remove + blocks_by_no: HashMap>, + // need to remove + pub(crate) last_finalized_block: Box, } fn key_by_seqno(seq_no: u32, vert_seq_no: u32) -> u64 { @@ -86,6 +87,7 @@ where /// Create new instance BlockFinality /// with all kind of storages pub fn with_params( + global_id: i32, shard_ident: ShardIdent, root_path: PathBuf, shard_state_storage: Arc, @@ -99,20 +101,24 @@ where .expect("cannot create shards directory"); OrdinaryBlockFinality { - shard_ident, + shard_ident: shard_ident.clone(), root_path, shard_state_storage, blocks_storage, _tr_storage, fn_storage, db, - current_block: Box::new(ShardBlock::default()), + current_block: Box::new(ShardBlock::new(global_id, shard_ident.clone())), blocks_by_hash: HashMap::new(), blocks_by_no: HashMap::new(), - last_finalized_block: Box::new(ShardBlock::default()), + last_finalized_block: Box::new(ShardBlock::new(global_id, shard_ident.clone())), } } + pub(crate) fn get_last_info(&self) -> NodeResult<(Arc, BlkPrevInfo)> { + Ok((self.get_last_shard_state(), self.get_last_block_info()?)) + } + fn finality_blocks(&mut self, root_hash: UInt256) -> NodeResult<()> { log::debug!("FIN-BLK {:x}", root_hash); if let Some(fin_sb) = self.blocks_by_hash.remove(&root_hash) { @@ -236,16 +242,33 @@ where self.root_path.clone(), &self.shard_ident, )?; - shard_path.push("blocks_finality.info"); - log::info!(target: "node", "load: {}", shard_path.to_str().unwrap()); - let mut file_info = File::open(shard_path)?; - self.deserialize(&mut file_info)?; + let mut info_path = shard_path.clone(); + info_path.push("blocks_finality.info"); + log::info!(target: "node", "load: {}", info_path.to_str().unwrap()); + if let Ok(mut file_info) = File::open(info_path) { + self.deserialize(&mut file_info)?; + } else { + shard_path.push("zerostate"); + log::info!(target: "node", "load: {}", shard_path.to_str().unwrap()); + let zerostate = std::fs::read(shard_path)?; + self.load_zerostate(zerostate)?; + } Ok(()) } /// - /// Write data BlockFinality to file + /// Load zerostate + /// + fn load_zerostate(&mut self, zerostate: Vec) -> NodeResult<()> { + let mut zerostate = ShardStateUnsplit::construct_from_bytes(&zerostate)?; + zerostate.set_global_id(self.current_block.block.global_id()); + self.current_block.shard_state = Arc::new(zerostate.clone()); + self.last_finalized_block.shard_state = Arc::new(zerostate); + Ok(()) + } + /// + /// Write data BlockFinality to file fn serialize(&self, writer: &mut dyn Write) -> NodeResult<()> { // serialize struct: // 32bit - length of structure ShardBlock @@ -292,7 +315,10 @@ where let seq_no = rdr.read_le_u64()?; log::info!(target: "node", "read_one_sb:seq_no: {}", seq_no); if seq_no == 0 { - Ok(Box::new(ShardBlock::default())) + Ok(Box::new(ShardBlock::new( + self.current_block.block.global_id, + self.shard_ident.clone(), + ))) } else { let mut current_block_name = block_finality_path.clone(); current_block_name.push(hash.to_hex_string()); @@ -342,7 +368,6 @@ where Ok(()) } - fn prepare_messages_from_transaction( transaction: &Transaction, block_id: UInt256, @@ -353,7 +378,7 @@ where if let Some(message_cell) = transaction.in_msg_cell() { let message = Message::construct_from_cell(message_cell.clone())?; let message_id = message_cell.repr_hash(); - let mut doc = + let mut doc = if message.is_inbound_external() || message.src_ref() == Some(&MINTER_ADDRESS) { Self::prepare_message_json( message_cell, @@ -363,21 +388,21 @@ where Some(transaction.now()), )? } else { - messages.remove(&message_id) - .unwrap_or_else(|| { - let mut doc = serde_json::value::Map::with_capacity(2); - doc.insert("id".into(), message_id.as_hex_string().into()); - doc - }) + messages.remove(&message_id).unwrap_or_else(|| { + let mut doc = serde_json::value::Map::with_capacity(2); + doc.insert("id".into(), message_id.as_hex_string().into()); + doc + }) }; - + doc.insert( "dst_chain_order".into(), - format!("{}{}", tr_chain_order, ton_block_json::u64_to_string(0)).into()); - + format!("{}{}", tr_chain_order, ton_block_json::u64_to_string(0)).into(), + ); + messages.insert(message_id, doc); }; - + let mut index: u64 = 1; transaction.out_msgs.iterate_slices(|slice| { let message_cell = slice.reference(0)?; @@ -390,17 +415,18 @@ where block_id.clone(), None, // transaction_now affects ExtIn messages only )?; - + // messages are ordered by created_lt doc.insert( "src_chain_order".into(), - format!("{}{}", tr_chain_order, ton_block_json::u64_to_string(index)).into()); - + format!("{}{}", tr_chain_order, ton_block_json::u64_to_string(index)).into(), + ); + index += 1; messages.insert(message_id, doc); Ok(true) })?; - + Ok(()) } @@ -415,7 +441,7 @@ where let proof = block_root_for_proof .map(|cell| serialize_toc(&message.prepare_proof(true, cell)?)) .transpose()?; - + let set = ton_block_json::MessageSerializationSet { message, id: message_cell.repr_hash(), @@ -438,7 +464,7 @@ where } fn prepare_transaction_json( - tr_cell: Cell, + tr_cell: Cell, transaction: Transaction, block_root: &Cell, block_id: UInt256, @@ -469,7 +495,7 @@ where account: Account, prev_account_state: Option, last_trans_chain_order: Option, - ) -> Result { + ) -> Result { let mut boc1 = None; if account.init_code_hash().is_some() { // new format @@ -487,10 +513,13 @@ where boc1, ..Default::default() }; - + let mut doc = ton_block_json::db_serialize_account("id", &set)?; if let Some(last_trans_chain_order) = last_trans_chain_order { - doc.insert("last_trans_chain_order".to_owned(), last_trans_chain_order.into()); + doc.insert( + "last_trans_chain_order".to_owned(), + last_trans_chain_order.into(), + ); } Ok(Self::doc_to_item(doc)) } @@ -510,7 +539,10 @@ where let mut doc = ton_block_json::db_serialize_deleted_account("id", &set)?; if let Some(last_trans_chain_order) = last_trans_chain_order { - doc.insert("last_trans_chain_order".to_owned(), last_trans_chain_order.into()); + doc.insert( + "last_trans_chain_order".to_owned(), + last_trans_chain_order.into(), + ); } Ok(Self::doc_to_item(doc)) } @@ -537,7 +569,7 @@ where pub fn reflect_block_in_db(&self, shard_block: &ShardBlock) -> Result<()> { let db = match self.db.clone() { Some(db) => db, - None => return Ok(()) + None => return Ok(()), }; let add_proof = false; @@ -554,7 +586,7 @@ where let shard_accounts = shard_block.shard_state.read_accounts()?; let now_all = std::time::Instant::now(); - + // Prepare sorted ton_block transactions and addresses of changed accounts let mut changed_acc = HashSet::new(); let mut deleted_acc = HashSet::new(); @@ -562,44 +594,54 @@ where let now = std::time::Instant::now(); let mut tr_count = 0; let mut transactions = BTreeMap::new(); - block_extra.read_account_blocks()?.iterate_objects(|account_block: AccountBlock| { - // extract ids of changed accounts - let state_upd = account_block.read_state_update()?; - let mut check_account_existed = false; - if state_upd.new_hash == *ACCOUNT_NONE_HASH { - deleted_acc.insert(account_block.account_id().clone()); - if state_upd.old_hash == *ACCOUNT_NONE_HASH { - check_account_existed = true; + block_extra + .read_account_blocks()? + .iterate_objects(|account_block: AccountBlock| { + // extract ids of changed accounts + let state_upd = account_block.read_state_update()?; + let mut check_account_existed = false; + if state_upd.new_hash == *ACCOUNT_NONE_HASH { + deleted_acc.insert(account_block.account_id().clone()); + if state_upd.old_hash == *ACCOUNT_NONE_HASH { + check_account_existed = true; + } + } else { + changed_acc.insert(account_block.account_id().clone()); } - } else { - changed_acc.insert(account_block.account_id().clone()); - } - let mut account_existed = false; - account_block.transactions().iterate_slices(|_, transaction_slice| { - // extract transactions - let cell = transaction_slice.reference(0)?; - let transaction = Transaction::construct_from(&mut SliceData::load_cell(cell.clone())?)?; - let ordering_key = (transaction.logical_time(), transaction.account_id().clone()); - if transaction.orig_status != AccountStatus::AccStateNonexist || - transaction.end_status != AccountStatus::AccStateNonexist - { - account_existed = true; + let mut account_existed = false; + account_block + .transactions() + .iterate_slices(|_, transaction_slice| { + // extract transactions + let cell = transaction_slice.reference(0)?; + let transaction = + Transaction::construct_from(&mut SliceData::load_cell(cell.clone())?)?; + let ordering_key = + (transaction.logical_time(), transaction.account_id().clone()); + if transaction.orig_status != AccountStatus::AccStateNonexist + || transaction.end_status != AccountStatus::AccStateNonexist + { + account_existed = true; + } + transactions.insert(ordering_key, (cell, transaction)); + tr_count += 1; + + Ok(true) + })?; + + if check_account_existed && !account_existed { + deleted_acc.remove(account_block.account_id()); } - transactions.insert(ordering_key, (cell, transaction)); - tr_count += 1; Ok(true) })?; - - if check_account_existed && !account_existed { - deleted_acc.remove(account_block.account_id()); - } - - Ok(true) - })?; - log::trace!("TIME: preliminary prepare {} transactions {}ms; {}", tr_count, now.elapsed().as_millis(), block_id); - + log::trace!( + "TIME: preliminary prepare {} transactions {}ms; {}", + tr_count, + now.elapsed().as_millis(), + block_id + ); // Iterate ton_block transactions to: // - prepare messages and transactions for external db @@ -608,8 +650,12 @@ where let mut index = 0; let mut messages = Default::default(); for (_, (cell, transaction)) in transactions.into_iter() { - let tr_chain_order = format!("{}{}", block_order, ton_block_json::u64_to_string(index as u64)); - + let tr_chain_order = format!( + "{}{}", + block_order, + ton_block_json::u64_to_string(index as u64) + ); + Self::prepare_messages_from_transaction( &transaction, block_root.repr_hash(), @@ -622,16 +668,16 @@ where acc_last_trans_chain_order.insert(account_id, tr_chain_order.clone()); let mut doc = Self::prepare_transaction_json( - cell, + cell, transaction, &block_root, block_root.repr_hash(), workchain_id, - add_proof + add_proof, )?; doc.insert("chain_order".into(), tr_chain_order.into()); db.put_transaction(Self::doc_to_item(doc))?; - + index += 1; } let msg_count = messages.len(); // is 0 if not process_message @@ -646,17 +692,16 @@ where block_id, ); - // Prepare accounts (changed and deleted) let now = std::time::Instant::now(); for account_id in changed_acc.iter() { - let acc = shard_accounts.account(account_id)? - .ok_or_else(|| - NodeError::InvalidData( - "Block and shard state mismatch: \ - state doesn't contain changed account".to_string() - ) - )?; + let acc = shard_accounts.account(account_id)?.ok_or_else(|| { + NodeError::InvalidData( + "Block and shard state mismatch: \ + state doesn't contain changed account" + .to_string(), + ) + })?; let acc = acc.read_account()?; db.put_account(Self::prepare_account_record( @@ -675,35 +720,37 @@ where last_trans_chain_order, )?)?; } - log::trace!("TIME: accounts {} {}ms; {}", changed_acc.len(), now.elapsed().as_millis(), block_id); + log::trace!( + "TIME: accounts {} {}ms; {}", + changed_acc.len(), + now.elapsed().as_millis(), + block_id + ); // Block let now = std::time::Instant::now(); - db.put_block( - Self::prepare_block_record( - block, - &block_root, - block_boc, - &file_hash, - block_order.clone(), - )? - )?; - log::trace!("TIME: block {}ms; {}", now.elapsed().as_millis(), block_id); - log::trace!("TIME: prepare & build jsons {}ms; {}", now_all.elapsed().as_millis(), block_id); + db.put_block(Self::prepare_block_record( + block, + &block_root, + block_boc, + &file_hash, + block_order.clone(), + )?)?; + log::trace!( + "TIME: block {}ms; {}", + now.elapsed().as_millis(), + block_id + ); + log::trace!( + "TIME: prepare & build jsons {}ms; {}", + now_all.elapsed().as_millis(), + block_id + ); Ok(()) } -} - -impl BlockFinality for OrdinaryBlockFinality -where - S: ShardStateStorage, - B: BlocksStorage, - T: TransactionsStorage, - F: FinalityStorage, -{ /// Save block until finality comes - fn put_block_with_info( + pub(crate) fn put_block_with_info( &mut self, block: &Block, shard_state: Arc, @@ -726,11 +773,6 @@ where Ok(()) } - /// get last block sequence number - fn get_last_seq_no(&self) -> u32 { - self.current_block.block.read_info().unwrap().seq_no() - } - /// get last block info fn get_last_block_info(&self) -> NodeResult { let info = &self.current_block.block.read_info()?; @@ -749,8 +791,9 @@ where //log::warn!("LAST SHARD BAG {}", self.current_block.shard_bag.get_repr_hash_by_index(0).unwrap().to_hex_string())); Arc::clone(&self.current_block.shard_state) } - /// find block by hash and return his sequence number (for sync) - fn find_block_by_hash(&self, hash: &UInt256) -> u64 { + + #[cfg(test)] + pub fn find_block_by_hash(&self, hash: &UInt256) -> u64 { if self.blocks_by_hash.contains_key(hash) { self.blocks_by_hash.get(hash).unwrap().seq_no() } else { @@ -759,7 +802,8 @@ where } /// Rollback shard state to one of block candidates - fn rollback_to(&mut self, hash: &UInt256) -> NodeResult<()> { + #[cfg(test)] + pub fn rollback_to(&mut self, hash: &UInt256) -> NodeResult<()> { if self.blocks_by_hash.contains_key(hash) { let sb = self.blocks_by_hash.remove(hash).unwrap(); let mut seq_no = sb.seq_no(); @@ -782,41 +826,7 @@ where } } - /// get raw signed block data - for synchronize - fn get_raw_block_by_seqno(&self, seq_no: u32, vert_seq_no: u32) -> NodeResult> { - let key = key_by_seqno(seq_no, vert_seq_no); - if self.blocks_by_no.contains_key(&key) { - /* TODO: which case to use? - return Ok(self.blocks_by_no.get(&key).unwrap().serialized_block.clone()) - TODO rewrite to - return Ok( - self.fn_storage.load_non_finalized_block_by_seq_no(key)?.serialized_block.clone() - )*/ - return Ok(self - .stored_to_loaded(self.blocks_by_no.get(&key).unwrap().clone())? - .serialized_block - .clone()); - } - self.blocks_storage.raw_block(seq_no, vert_seq_no) - } - - /// get number of last finalized shard - fn get_last_finality_shard_hash(&self) -> NodeResult<(u64, UInt256)> { - // TODO avoid serialization there - let cell = self.last_finalized_block.shard_state.serialize()?; - - Ok((self.last_finalized_block.seq_no, cell.repr_hash())) - } - /// reset block finality - /// clean all maps, load last finalized data - fn reset(&mut self) -> NodeResult<()> { - self.current_block = self.last_finalized_block.clone(); - // remove files from disk - self.blocks_by_hash.clear(); - self.blocks_by_no.clear(); - Ok(()) - } } #[derive(Clone, Debug, PartialEq)] @@ -833,6 +843,7 @@ impl FinalityBlock { } } + #[cfg(test)] pub fn root_hash(&self) -> &UInt256 { match self { FinalityBlock::Stored(sb) => &sb.root_hash, @@ -863,24 +874,27 @@ pub struct ShardBlock { serialized_block: Vec, root_hash: UInt256, file_hash: UInt256, - block: Block, + pub(crate) block: Block, shard_state: Arc, } -impl Default for ShardBlock { - fn default() -> Self { +impl ShardBlock { + fn new(global_id: i32, shard: ShardIdent) -> Self { + let mut shard_state = ShardStateUnsplit::default(); + shard_state.set_global_id(global_id); + shard_state.set_shard(shard); + let mut block = Block::default(); + block.global_id = global_id; Self { seq_no: 0, serialized_block: Vec::new(), root_hash: UInt256::ZERO, file_hash: UInt256::ZERO, - block: Block::default(), - shard_state: Arc::new(ShardStateUnsplit::default()), + block, + shard_state: Arc::new(shard_state), } } -} -impl ShardBlock { /// get current block sequence number pub fn get_seq_no(&self) -> u64 { self.seq_no @@ -923,7 +937,7 @@ impl ShardBlock { /// deserialize shard block pub fn deserialize(rdr: &mut R) -> NodeResult { - let mut sb = ShardBlock::default(); + let mut sb = ShardBlock::new(0, ShardIdent::default()); sb.seq_no = rdr.read_le_u64()?; let sb_len = rdr.read_le_u32()?; let mut sb_buf = vec![0; sb_len as usize]; @@ -941,7 +955,6 @@ impl ShardBlock { let cell = deserialize_tree_of_cells(rdr)?; sb.block = Block::construct_from_cell(cell)?; - Ok(sb) } } @@ -967,7 +980,8 @@ pub fn generate_block_with_seq_no( (0..32).map(|_| rand::random::()).collect::>(), 256, ); - let mut transaction = Transaction::with_address_and_status(acc.clone(), AccountStatus::AccStateActive); + let mut transaction = + Transaction::with_address_and_status(acc.clone(), AccountStatus::AccStateActive); let mut value = CurrencyCollection::default(); value.grams = 10202u64.into(); let mut imh = InternalMessageHeader::with_addresses( @@ -990,7 +1004,8 @@ pub fn generate_block_with_seq_no( inmsg1.set_body(SliceData::new(vec![0x21; 120])); let ext_in_header = ExternalInboundMessageHeader { - src: MsgAddressExt::with_extern(SliceData::new(vec![0x23, 0x52, 0x73, 0x00, 0x80])).unwrap(), + src: MsgAddressExt::with_extern(SliceData::new(vec![0x23, 0x52, 0x73, 0x00, 0x80])) + .unwrap(), dst: MsgAddressInt::with_standart(None, 0, acc.clone()).unwrap(), import_fee: 10u64.into(), }; @@ -1015,8 +1030,7 @@ pub fn generate_block_with_seq_no( let ext_out_header = ExtOutMessageHeader::with_addresses( MsgAddressInt::with_standart(None, 0, acc.clone()).unwrap(), - MsgAddressExt::with_extern(SliceData::new(vec![0x23, 0x52, 0x73, 0x00, 0x80])) - .unwrap(), + MsgAddressExt::with_extern(SliceData::new(vec![0x23, 0x52, 0x73, 0x00, 0x80])).unwrap(), ); let mut outmsg2 = Message::with_ext_out_header(ext_out_header); @@ -1026,7 +1040,9 @@ pub fn generate_block_with_seq_no( transaction.add_out_message(&outmsg2).unwrap(); let tr_cell = transaction.serialize().unwrap(); - block_builder.add_raw_transaction(transaction, tr_cell).unwrap(); + block_builder + .add_raw_transaction(transaction, tr_cell) + .unwrap(); } let (block, _) = block_builder.finalize_block().unwrap(); block diff --git a/node/src/block/mod.rs b/node/src/block/mod.rs index b264636..d1f514d 100644 --- a/node/src/block/mod.rs +++ b/node/src/block/mod.rs @@ -1,9 +1,5 @@ -mod applier; mod builder; pub mod finality; -pub use applier::{BlockFinality, NewBlockApplier}; pub use builder::BlockBuilder; -pub use finality::{ - FinalityBlock, OrdinaryBlockFinality, ShardBlock, ShardBlockHash, -}; +pub use finality::{FinalityBlock, OrdinaryBlockFinality, ShardBlock, ShardBlockHash}; diff --git a/node/src/config.rs b/node/src/config.rs index 1dcdc71..8c43b6e 100644 --- a/node/src/config.rs +++ b/node/src/config.rs @@ -89,6 +89,8 @@ pub struct NodeConfig { pub private_key: String, pub keys: Vec, pub boot: Vec, + #[serde(default = "NodeConfig::default_global_id")] + pub global_id: i32, pub shard_id: ShardIdConfig, pub document_db: serde_json::Value, #[serde(default = "NodeConfig::default_log_path")] @@ -98,6 +100,9 @@ pub struct NodeConfig { } impl NodeConfig { + fn default_global_id() -> i32 { + 0 + } fn default_log_path() -> String { "./log_cfg.yml".to_string() } diff --git a/node/src/data/arango.rs b/node/src/data/arango.rs index 926fabb..750ea92 100644 --- a/node/src/data/arango.rs +++ b/node/src/data/arango.rs @@ -197,7 +197,7 @@ impl ArangoHelper { log::error!(target: "node", "error sending to arango {}, ({:?})", url, resp.status().canonical_reason()); } else { has_delivery_problems.store(false, Ordering::SeqCst); - log::debug!(target: "node", "sucessfully sent to arango"); + log::debug!(target: "node", "successfully sent to arango"); break; } } diff --git a/node/src/data/file_based_storage.rs b/node/src/data/file_based_storage.rs index 2ca9ed1..df618fe 100644 --- a/node/src/data/file_based_storage.rs +++ b/node/src/data/file_based_storage.rs @@ -15,6 +15,7 @@ */ use super::{BlocksStorage, ShardStateStorage, TransactionsStorage}; +use crate::data::{FinalityStorage, ShardStateInfo}; use crate::error::NodeResult; use parking_lot::Mutex; use std; @@ -30,7 +31,6 @@ use ton_block::{ }; use ton_types::cells_serialization::{deserialize_tree_of_cells, serialize_tree_of_cells}; use ton_types::{types::UInt256, AccountId, Cell}; -use crate::data::{FinalityStorage, ShardStateInfo}; #[cfg(test)] #[path = "../../../../tonos-se-tests/unit/test_file_based_storage.rs"] @@ -54,7 +54,6 @@ impl ShardHash { shard_hash: UInt256::from([0; 32]), } } - } impl Ord for ShardHash { @@ -105,8 +104,7 @@ impl FileBasedStorage { /// Create "Shards" directory pub fn create_workchains_dir(root: &PathBuf) -> NodeResult { - let mut shards = root.clone(); - shards.push("workchains"); + let shards = root.join("workchains"); if !shards.as_path().exists() { create_dir_all(shards.as_path())?; } @@ -115,37 +113,34 @@ impl FileBasedStorage { /// /// Create catalog tree for storage - /// root_path/Shards/ - /// /Shard_(PFX)/Shard - /// /Blocks/Block_(seq_no)_(ver_no) + /// root_path + /// workchains + /// MC | WC + /// shard_(prefix) + /// blocks + /// block_(seq_no)_(ver_no) /// /// returned Shard_state_path and Blocks_dir_path pub fn create_default_shard_catalog( - mut workchains_dir: PathBuf, + workchains_dir: PathBuf, shard_ident: &ShardIdent, ) -> NodeResult<(PathBuf, PathBuf, PathBuf)> { - workchains_dir.push(format!("WC{}", shard_ident.workchain_id())); - workchains_dir.push(format!( - "shard_{:016x}", - shard_ident.shard_prefix_with_tag() - )); - if !workchains_dir.as_path().exists() { - create_dir_all(workchains_dir.as_path())?; - } - - let mut shard_blocks_dir = workchains_dir.clone(); - shard_blocks_dir.push("blocks"); - if !shard_blocks_dir.as_path().exists() { - create_dir_all(shard_blocks_dir.as_path())?; + let workchain_name = if shard_ident.is_masterchain() { + "MC".to_string() + } else { + format!("WC{}", shard_ident.workchain_id()) + }; + let shard_name = format!("shard_{:016x}", shard_ident.shard_prefix_with_tag()); + let shard_dir = workchains_dir.join(workchain_name).join(shard_name); + let blocks_dir = shard_dir.join("blocks"); + let transactions_dir = shard_dir.join("transactions"); + if !blocks_dir.as_path().exists() { + create_dir_all(blocks_dir.as_path())?; } - - let mut transactions_dir = workchains_dir.clone(); - transactions_dir.push("transactions"); if !transactions_dir.as_path().exists() { create_dir_all(transactions_dir.as_path())?; } - - Ok((workchains_dir, shard_blocks_dir, transactions_dir)) + Ok((shard_dir, blocks_dir, transactions_dir)) } fn key_by_seqno(seq_no: u32, vert_seq_no: u32) -> u64 { @@ -516,4 +511,3 @@ impl TransactionsStorage for FileBasedStorage { // shard_data: Option>, shard_hash: &UInt256, // shard_state_info: ShardStateInfo) -> NodeResult<()>; // } - diff --git a/node/src/engine/engine.rs b/node/src/engine/engine.rs index 26035f1..6b094d6 100644 --- a/node/src/engine/engine.rs +++ b/node/src/engine/engine.rs @@ -14,45 +14,30 @@ * under the License. */ -use crate::block::finality::MINTER_ADDRESS; -use crate::block::{BlockBuilder, BlockFinality, NewBlockApplier, OrdinaryBlockFinality}; #[cfg(test)] use crate::config::NodeConfig; -use crate::data::{DocumentsDb, DocumentsDbMock, FileBasedStorage}; -use crate::engine::{ - InMessagesQueue, LiveControl, LiveControlReceiver, QueuedMessage, - DEPRECATED_GIVER_ABI2_DEPLOY_MSG, GIVER_ABI1_DEPLOY_MSG, GIVER_ABI2_DEPLOY_MSG, GIVER_BALANCE, - MULTISIG_BALANCE, MULTISIG_DEPLOY_MSG, -}; -use crate::error::{NodeError, NodeResult}; +use crate::data::DocumentsDb; +use crate::engine::masterchain::Masterchain; +use crate::engine::shardchain::Shardchain; +use crate::engine::{InMessagesQueue, LiveControl, LiveControlReceiver}; +use crate::error::NodeResult; use crate::MessagesReceiver; use parking_lot::Mutex; use std::path::PathBuf; use std::{ - io::ErrorKind, sync::{ atomic::{AtomicU32, Ordering}, Arc, }, thread, - time::{Duration, Instant}, -}; -use ton_block::{ - Block, CommonMsgInfo, CurrencyCollection, Deserializable, Grams, InternalMessageHeader, - Message, MsgAddressInt, ShardIdent, ShardStateUnsplit, UnixTime32, }; +use ton_block::ShardIdent; use ton_executor::BlockchainConfig; -use ton_types::{AccountId, HashmapType}; #[cfg(test)] #[path = "../../../../tonos-se-tests/unit/test_ton_node_engine.rs"] mod tests; -type Storage = FileBasedStorage; -type ArcBlockFinality = Arc>>; -type BlockApplier = - Mutex>>; - pub struct EngineLiveProperties { pub time_delta: Arc, } @@ -108,14 +93,12 @@ impl LiveControl for EngineLiveControl { /// It is top level struct provided node functionality related to transactions processing. /// Initialises instances of: all messages receivers, InMessagesQueue, MessagesProcessor. pub struct TonNodeEngine { - shard_ident: ShardIdent, live_properties: Arc, receivers: Vec>>, live_control_receiver: Option>, - pub blockchain_config: BlockchainConfig, - pub finalizer: ArcBlockFinality, - pub block_applier: BlockApplier, pub message_queue: Arc, + pub masterchain: Masterchain, + pub workchain: Shardchain, } impl TonNodeEngine { @@ -129,12 +112,12 @@ impl TonNodeEngine { control_receiver.run(Box::new(live_control))?; } - if self.finalizer.lock().get_last_seq_no() == 1 { - let workchain_id = self.current_shard_id().workchain_id() as i8; - self.deploy_contracts(workchain_id)?; + if self.workchain.finality_was_loaded { + self.masterchain.restore_state()?; } + thread::spawn(move || loop { - if let Err(err) = self.prepare_block(true) { + if let Err(err) = self.generate_blocks(true) { log::error!(target: "node", "failed block generation: {}", err); } }); @@ -150,207 +133,64 @@ impl TonNodeEngine { /// Construct new engine for selected shard /// with given time to generate block candidate pub fn with_params( - shard: ShardIdent, + global_id: i32, + workchain_shard: ShardIdent, receivers: Vec>, live_control_receiver: Option>, - blockchain_config: BlockchainConfig, - documents_db: Option>, + blockchain_config: Arc, + documents_db: Arc, storage_path: PathBuf, ) -> NodeResult { - let documents_db = documents_db.unwrap_or_else(|| Arc::new(DocumentsDbMock)); let message_queue = Arc::new(InMessagesQueue::with_db(10000, documents_db.clone())); - - let storage = Arc::new(Storage::with_path(shard.clone(), storage_path.clone())?); - let block_finality = Arc::new(Mutex::new(OrdinaryBlockFinality::with_params( - shard.clone(), - storage_path, - storage.clone(), - storage.clone(), - storage.clone(), - storage.clone(), - Some(documents_db.clone()), - Vec::new(), - ))); - match block_finality.lock().load() { - Ok(_) => { - log::info!(target: "node", "load block finality successfully"); - } - Err(NodeError::Io(err)) => { - if err.kind() != ErrorKind::NotFound { - return Err(NodeError::Io(err)); - } - } - Err(err) => { - return Err(err); - } - } - let live_properties = Arc::new(EngineLiveProperties::new()); let receivers = receivers .into_iter() .map(|r| Mutex::new(r)) .collect::>(); - Ok(TonNodeEngine { - shard_ident: shard.clone(), + message_queue: message_queue.clone(), receivers, live_properties, live_control_receiver, - blockchain_config, - finalizer: block_finality.clone(), - block_applier: Mutex::new(NewBlockApplier::with_params(block_finality, documents_db)), - message_queue, + masterchain: Masterchain::with_params( + global_id, + blockchain_config.clone(), + message_queue.clone(), + documents_db.clone(), + storage_path.clone(), + )?, + workchain: Shardchain::with_params( + workchain_shard, + global_id, + blockchain_config.clone(), + message_queue.clone(), + documents_db.clone(), + storage_path.clone(), + )?, }) } - /// Getter for current shard identifier - pub fn current_shard_id(&self) -> &ShardIdent { - &self.shard_ident - } - - fn print_block_info(block: &Block) { - let extra = block.read_extra().unwrap(); - log::info!(target: "node", - "block: gen time = {}, in msg count = {}, out msg count = {}, account_blocks = {}", - block.read_info().unwrap().gen_utime(), - extra.read_in_msg_descr().unwrap().len().unwrap(), - extra.read_out_msg_descr().unwrap().len().unwrap(), - extra.read_account_blocks().unwrap().len().unwrap()); - } - - /// - /// Generate new block if possible - /// - pub fn prepare_block(&self, debug: bool) -> NodeResult<()> { - let shard_state = self.finalizer.lock().get_last_shard_state(); - let out_msg_queue_info = shard_state.read_out_msg_queue_info()?; - if out_msg_queue_info.out_queue().is_empty() && self.message_queue.is_empty() { - self.message_queue.wait_new_message(); - } - - let timestamp = UnixTime32::now().as_u32() + self.live_properties.get_time_delta(); - let blk_prev_info = self.finalizer.lock().get_last_block_info()?; - log::debug!(target: "node", "PARENT block: {:?}", blk_prev_info); - - let collator = BlockBuilder::with_params(shard_state, blk_prev_info, timestamp)?; - match collator.build_block(&self.message_queue, self.blockchain_config.clone(), debug)? { - Some((block, new_shard_state)) => { - log::trace!(target: "node", "block generated successfully"); - // TODO remove debug print - Self::print_block_info(&block); - self.finality_and_apply_block(&block, new_shard_state)?; + fn generate_blocks(&self, debug: bool) -> NodeResult<()> { + let mut continue_generating = true; + while continue_generating { + continue_generating = false; + let time_delta = self.live_properties.get_time_delta(); + while let Some(block) = self.workchain.generate_block(time_delta, debug)? { + self.masterchain.register_new_shard_block(&block)?; + continue_generating = true; } - None => { - log::trace!(target: "node", "empty block was not generated"); + if self + .masterchain + .generate_block(time_delta, debug)? + .is_some() + { + continue_generating = true; } } + self.message_queue.wait_new_message(); Ok(()) } - - /// finality and apply block - fn finality_and_apply_block( - &self, - block: &Block, - applied_shard: ShardStateUnsplit, - ) -> NodeResult<(Arc, Vec)> { - let mut time = Vec::new(); - let now = Instant::now(); - let new_state = self.block_applier.lock().apply(block, applied_shard)?; - time.push(now.elapsed().as_micros()); - Ok((new_state, time)) - } -} - -impl TonNodeEngine { - fn deploy_contracts(&self, workchain_id: i8) -> NodeResult<()> { - self.deploy_contract(workchain_id, GIVER_ABI1_DEPLOY_MSG, GIVER_BALANCE, 1)?; - self.deploy_contract(workchain_id, GIVER_ABI2_DEPLOY_MSG, GIVER_BALANCE, 3)?; - self.deploy_contract(workchain_id, MULTISIG_DEPLOY_MSG, MULTISIG_BALANCE, 5)?; - self.deploy_contract( - workchain_id, - DEPRECATED_GIVER_ABI2_DEPLOY_MSG, - GIVER_BALANCE, - 7, - )?; - - Ok(()) - } - - fn deploy_contract( - &self, - workchain_id: i8, - deploy_msg_boc: &[u8], - initial_balance: u128, - transfer_lt: u64, - ) -> NodeResult { - let (deploy_msg, deploy_addr) = - Self::create_contract_deploy_message(workchain_id, deploy_msg_boc); - let transfer_msg = Self::create_transfer_message( - workchain_id, - MINTER_ADDRESS.address(), - deploy_addr.clone(), - initial_balance, - transfer_lt, - ); - self.queue_with_retry(transfer_msg)?; - self.queue_with_retry(deploy_msg)?; - - Ok(deploy_addr) - } - - fn queue_with_retry(&self, message: Message) -> NodeResult<()> { - let mut message = QueuedMessage::with_message(message)?; - while let Err(msg) = self.message_queue.queue(message) { - message = msg; - thread::sleep(Duration::from_micros(100)); - } - - Ok(()) - } - - fn create_contract_deploy_message(workchain_id: i8, msg_boc: &[u8]) -> (Message, AccountId) { - let mut msg = Message::construct_from_bytes(msg_boc).unwrap(); - if let CommonMsgInfo::ExtInMsgInfo(ref mut header) = msg.header_mut() { - match header.dst { - MsgAddressInt::AddrStd(ref mut addr) => addr.workchain_id = workchain_id, - _ => panic!("Contract deploy message has invalid destination address"), - } - } - - let address = msg.int_dst_account_id().unwrap(); - - (msg, address) - } - - // create transfer funds message for initialize balance - pub fn create_transfer_message( - workchain_id: i8, - src: AccountId, - dst: AccountId, - value: u128, - lt: u64, - ) -> Message { - let hdr = Self::create_transfer_int_header(workchain_id, src, dst, value); - let mut msg = Message::with_int_header(hdr); - - msg.set_at_and_lt(UnixTime32::now().as_u32(), lt); - msg - } - - pub fn create_transfer_int_header( - workchain_id: i8, - src: AccountId, - dst: AccountId, - value: u128, - ) -> InternalMessageHeader { - InternalMessageHeader::with_addresses_and_bounce( - MsgAddressInt::with_standart(None, workchain_id, src).unwrap(), - MsgAddressInt::with_standart(None, workchain_id, dst).unwrap(), - CurrencyCollection::from_grams(Grams::new(value).unwrap()), - false, - ) - } } #[cfg(test)] diff --git a/node/src/engine/masterchain.rs b/node/src/engine/masterchain.rs new file mode 100644 index 0000000..4f4d0f9 --- /dev/null +++ b/node/src/engine/masterchain.rs @@ -0,0 +1,159 @@ +use crate::data::DocumentsDb; +use crate::engine::shardchain::Shardchain; +use crate::engine::InMessagesQueue; +use crate::error::NodeResult; +use parking_lot::RwLock; +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use ton_block::{ + BinTree, BinTreeType, Block, InRefValue, McBlockExtra, Serializable, ShardDescr, ShardIdent, +}; +use ton_executor::BlockchainConfig; +use ton_types::{SliceData, UInt256}; + +pub struct Masterchain { + shardchain: Shardchain, + shards: RwLock>, + shards_has_been_changed: AtomicBool, +} + +impl Masterchain { + pub fn with_params( + global_id: i32, + blockchain_config: Arc, + message_queue: Arc, + documents_db: Arc, + storage_path: PathBuf, + ) -> NodeResult { + let shardchain = Shardchain::with_params( + ShardIdent::masterchain(), + global_id, + blockchain_config, + message_queue, + documents_db, + storage_path, + )?; + Ok(Self { + shardchain, + shards: RwLock::new(HashMap::new()), + shards_has_been_changed: AtomicBool::new(false), + }) + } + + pub fn register_new_shard_block(&self, block: &Block) -> NodeResult<()> { + let info = block.info.read_struct()?; + let block_cell = block.serialize().unwrap(); + let block_boc = ton_types::cells_serialization::serialize_toc(&block_cell)?; + let descr = ShardDescr { + seq_no: info.seq_no(), + reg_mc_seqno: 1, + start_lt: info.start_lt(), + end_lt: info.end_lt(), + root_hash: block_cell.repr_hash(), + file_hash: UInt256::calc_file_hash(&block_boc), + before_split: info.before_split(), + // before_merge: info.before_merge, + want_split: info.want_split(), + want_merge: info.want_merge(), + // nx_cc_updated: info.nx_cc_updated, + flags: info.flags(), + // next_catchain_seqno: info.next_catchain_seqno, + next_validator_shard: info.shard().shard_prefix_with_tag(), + min_ref_mc_seqno: info.min_ref_mc_seqno(), + gen_utime: info.gen_utime().as_u32(), + // split_merge_at: info.split_merge_at, + // fees_collected: info.fees_collected, + // funds_created: info.funds_created, + // copyleft_rewards: info.copyleft_rewards, + // proof_chain: info.proof_chain, + ..ShardDescr::default() + }; + + self.shards.write().insert(info.shard().clone(), descr); + self.shards_has_been_changed.store(true, Ordering::Relaxed); + Ok(()) + } + + /// + /// Generate new block + /// + pub fn generate_block(&self, time_delta: u32, debug: bool) -> NodeResult> { + let (mut master_block, new_shard_state, is_empty) = + self.shardchain.build_block(time_delta, debug)?; + + if is_empty && !self.shards_has_been_changed.load(Ordering::Relaxed) { + return Ok(None); + } + + let mut info = master_block.info.read_struct()?; + info.set_key_block(true); + master_block.info.write_struct(&info)?; + let mut extra = master_block.extra.read_struct()?; + let mut mc_extra = McBlockExtra::default(); + mc_extra.set_config(self.shardchain.blockchain_config.raw_config().clone()); + let shards = mc_extra.shards_mut(); + for (shard, descr) in &*self.shards.read() { + shards.set( + &shard.workchain_id(), + &InRefValue(BinTree::with_item(descr)?), + )?; + } + + extra.write_custom(Some(&mc_extra))?; + master_block.write_extra(&extra)?; + self.shardchain + .finality_and_apply_block(&master_block, new_shard_state)?; + self.shards_has_been_changed.store(false, Ordering::Relaxed); + log::trace!(target: "node", "master block generated successfully"); + Ok(Some(master_block)) + } + + fn get_last_finalized_mc_extra(&self) -> Option { + self.shardchain + .get_last_finalized_block() + .map_or(None, |block| block.read_extra().ok()) + .map_or(None, |extra| extra.read_custom().ok()) + .flatten() + } + + pub(crate) fn restore_state(&self) -> NodeResult<()> { + if !self.shardchain.finality_was_loaded { + return Ok(()); + } + if let Some(mc) = self.get_last_finalized_mc_extra() { + { + let mut shards = self.shards.write(); + mc.shards().iterate_with_keys(&mut |workchain_id: i32, + InRefValue(tree): InRefValue< + BinTree, + >| { + tree.iterate(&mut |shard: SliceData, descr: ShardDescr| { + let shard = ShardIdent::with_tagged_prefix( + workchain_id, + shard_ident_to_u64(shard.cell().data()), + ) + .unwrap(); + shards.insert(shard, descr); + Ok(true) + }) + })?; + self.shards_has_been_changed.store(true, Ordering::Relaxed); + } + if let Some(last_config) = mc.config() { + if last_config != self.shardchain.blockchain_config.raw_config() { + self.generate_block(0, false)?; + } + } + } + Ok(()) + } +} + +fn shard_ident_to_u64(shard: &[u8]) -> u64 { + let mut shard_key = [0; 8]; + let len = std::cmp::min(shard.len(), 8); + shard_key[..len].copy_from_slice(&shard[..len]); + u64::from_be_bytes(shard_key) +} diff --git a/node/src/engine/messages.rs b/node/src/engine/messages.rs index f4e4aa1..de2dc8f 100644 --- a/node/src/engine/messages.rs +++ b/node/src/engine/messages.rs @@ -15,41 +15,20 @@ */ use crate::data::DocumentsDb; -use parking_lot::{Mutex, Condvar}; -use std::{sync::Arc, collections::VecDeque}; -use ton_block::{GetRepresentationHash, Message, Serializable}; -use ton_types::{serialize_toc, Result, UInt256}; +use parking_lot::{Condvar, Mutex}; +use std::{collections::VecDeque, sync::Arc}; +use ton_block::{Message, Serializable}; +use ton_types::serialize_toc; #[cfg(test)] #[path = "../../../../tonos-se-tests/unit/test_messages.rs"] mod tests; -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct QueuedMessage { - message: Message, - hash: UInt256, -} - -impl QueuedMessage { - pub fn with_message(message: Message) -> Result { - let hash = message.hash()?; - Ok(Self { message, hash }) - } - - pub fn message(&self) -> &Message { - &self.message - } - - pub fn message_hash(&self) -> &UInt256 { - &self.hash - } -} - /// This FIFO accumulates inbound messages from all types of receivers. /// The struct might be used from many threads. It provides internal mutability. pub struct InMessagesQueue { present: Condvar, - storage: Mutex>, + storage: Mutex>, db: Option>, capacity: usize, } @@ -82,7 +61,7 @@ impl InMessagesQueue { } /// Include message into end queue. - pub fn queue(&self, msg: QueuedMessage) -> std::result::Result<(), QueuedMessage> { + pub fn queue(&self, msg: Message) -> std::result::Result<(), Message> { if self.has_delivery_problems() { log::debug!(target: "node", "Has delivery problems"); return Err(msg); @@ -93,16 +72,22 @@ impl InMessagesQueue { return Err(msg); } - log::debug!(target: "node", "Queued message: {:?}", msg.message()); + log::debug!(target: "node", "Queued message: {:?}", msg); storage.push_back(msg); self.present.notify_one(); Ok(()) } - /// Extract oldest message from queue. - pub fn dequeue(&self) -> Option { - self.storage.lock().pop_front() + /// Extract oldest message for specified workchain from queue. + pub fn dequeue(&self, workchain_id: i32) -> Option { + let mut storage = self.storage.lock(); + for i in 0..storage.len() { + if storage[i].workchain_id() == Some(workchain_id) { + return storage.remove(i); + } + } + None } pub fn print_message(msg: &Message) { diff --git a/node/src/engine/mod.rs b/node/src/engine/mod.rs index 8431662..6d83911 100644 --- a/node/src/engine/mod.rs +++ b/node/src/engine/mod.rs @@ -20,14 +20,8 @@ use std::sync::Arc; pub mod messages; pub use self::messages::*; pub mod engine; - -const GIVER_BALANCE: u128 = 5_000_000_000_000_000_000; -const MULTISIG_BALANCE: u128 = 1_000_000_000_000_000; -const GIVER_ABI1_DEPLOY_MSG: &[u8] = include_bytes!("../../data/giver_abi1_deploy_msg.boc"); -const DEPRECATED_GIVER_ABI2_DEPLOY_MSG: &[u8] = - include_bytes!("../../data/deprecated_giver_abi2_deploy_msg.boc"); -const GIVER_ABI2_DEPLOY_MSG: &[u8] = include_bytes!("../../data/giver_abi2_deploy_msg.boc"); -const MULTISIG_DEPLOY_MSG: &[u8] = include_bytes!("../../data/safemultisig_deploy_msg.boc"); +pub mod shardchain; +mod masterchain; pub trait MessagesReceiver: Send { fn run(&mut self, queue: Arc) -> NodeResult<()>; diff --git a/node/src/engine/shardchain.rs b/node/src/engine/shardchain.rs new file mode 100644 index 0000000..fa95fdf --- /dev/null +++ b/node/src/engine/shardchain.rs @@ -0,0 +1,125 @@ +use crate::block::{BlockBuilder, OrdinaryBlockFinality}; +use crate::data::{DocumentsDb, FileBasedStorage}; +use crate::engine::InMessagesQueue; +use crate::error::{NodeError, NodeResult}; +use parking_lot::Mutex; +use std::io::ErrorKind; +use std::path::PathBuf; +use std::sync::Arc; +use ton_block::{Block, ShardIdent, ShardStateUnsplit, UnixTime32}; +use ton_executor::BlockchainConfig; +use ton_types::HashmapType; + +type Storage = FileBasedStorage; +type ArcBlockFinality = Arc>>; + +pub struct Shardchain { + pub(crate) finality_was_loaded: bool, + pub(crate) blockchain_config: Arc, + message_queue: Arc, + pub(crate) finalizer: ArcBlockFinality, +} + +impl Shardchain { + pub fn with_params( + shard: ShardIdent, + global_id: i32, + blockchain_config: Arc, + message_queue: Arc, + documents_db: Arc, + storage_path: PathBuf, + ) -> NodeResult { + let storage = Arc::new(Storage::with_path(shard.clone(), storage_path.clone())?); + let block_finality = Arc::new(Mutex::new(OrdinaryBlockFinality::with_params( + global_id, + shard.clone(), + storage_path, + storage.clone(), + storage.clone(), + storage.clone(), + storage.clone(), + Some(documents_db.clone()), + Vec::new(), + ))); + let finality_was_loaded = match block_finality.lock().load() { + Ok(_) => { + log::info!(target: "node", "load block finality successfully"); + true + } + Err(NodeError::Io(err)) => { + if err.kind() != ErrorKind::NotFound { + return Err(NodeError::Io(err)); + } + false + } + Err(err) => { + return Err(err); + } + }; + + Ok(Self { + finality_was_loaded, + blockchain_config, + message_queue, + finalizer: block_finality.clone(), + }) + } + + pub(crate) fn build_block( + &self, + time_delta: u32, + debug: bool, + ) -> NodeResult<(Block, ShardStateUnsplit, bool)> { + let timestamp = UnixTime32::now().as_u32() + time_delta; + let (shard_state, blk_prev_info) = self.finalizer.lock().get_last_info()?; + log::debug ! (target: "node", "PARENT block: {:?}", blk_prev_info); + + let collator = BlockBuilder::with_params(shard_state, blk_prev_info, timestamp)?; + collator.build_block(&self.message_queue, &self.blockchain_config, debug) + } + + /// + /// Generate new block if possible + /// + pub fn generate_block(&self, time_delta: u32, debug: bool) -> NodeResult> { + let (block, new_shard_state, is_empty) = self.build_block(time_delta, debug)?; + Ok(if !is_empty { + log::trace!(target: "node", "block generated successfully"); + Self::print_block_info(&block); + self.finality_and_apply_block(&block, new_shard_state)?; + Some(block) + } else { + log::trace!(target: "node", "empty block was not generated"); + None + }) + } + + fn print_block_info(block: &Block) { + let extra = block.read_extra().unwrap(); + log::info!(target: "node", + "block: gen time = {}, in msg count = {}, out msg count = {}, account_blocks = {}", + block.read_info().unwrap().gen_utime(), + extra.read_in_msg_descr().unwrap().len().unwrap(), + extra.read_out_msg_descr().unwrap().len().unwrap(), + extra.read_account_blocks().unwrap().len().unwrap()); + } + + /// finality and apply block + pub(crate) fn finality_and_apply_block( + &self, + block: &Block, + applied_shard: ShardStateUnsplit, + ) -> NodeResult> { + log::info!(target: "node", "Apply block seq_no = {}", block.read_info()?.seq_no()); + let new_state = Arc::new(applied_shard); + self.finalizer + .lock() + .put_block_with_info(block, new_state.clone())?; + Ok(new_state) + } + + /// get last finalized block + pub fn get_last_finalized_block(&self) -> NodeResult { + Ok(self.finalizer.lock().last_finalized_block.block.clone()) + } +} diff --git a/node/src/main.rs b/node/src/main.rs index ec5a083..0a0a478 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -22,6 +22,7 @@ use crate::engine::MessagesReceiver; use crate::error::{NodeError, NodeResult}; use clap::{Arg, ArgMatches, Command}; use iron::Iron; +use parking_lot::Mutex; use router::Router; use serde_json::Value; use std::{ @@ -31,7 +32,6 @@ use std::{ thread, time::Duration, }; -use parking_lot::Mutex; use ton_executor::BlockchainConfig; pub mod error; @@ -170,11 +170,12 @@ fn start_node(config: StartNodeConfig) -> NodeResult<()> { )?); let ton = TonNodeEngine::with_params( + config.node.global_id, config.node.shard_id_config().shard_ident(), receivers, Some(control_api), - config.blockchain, - Some(db), + Arc::new(config.blockchain), + db, PathBuf::from("./"), )?;