diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 660b8da116..c7e410da93 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -6,16 +6,17 @@ on: branches: - main - dev + - beta jobs: build-tests: name: Test and Build strategy: matrix: platform: - - ubuntu-latest + - ubuntu-22.04 # - macos-latest toolchain: - - 1.59 + - 1.85 runs-on: ${{ matrix.platform }} steps: - name: Checkout sources @@ -32,9 +33,12 @@ jobs: uses: actions-rs/cargo@v1 with: command: test - args: --locked --all --release --features "json-tests" --verbose --no-run + #args: deactivated JSON Tests, so we do not run out of quota on CI tests for merge intesive time. --locked --all --release --features "json-tests" --verbose --no-run + args: --locked --all --release --verbose --no-run - name: Run tests for ${{ matrix.platform }} uses: actions-rs/cargo@v1 with: command: test - args: --locked --all --release --features "json-tests" --verbose + #args: deactivated JSON Tests --locked --all --release --features "json-tests" --verbose + args: --locked --all --release --verbose + diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 7a0c9ef4a3..0000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,285 +0,0 @@ -name: Build Release Suite - -on: - push: - tags: - - v* - -# Global vars -env: - AWS_REGION: "us-east-1" - AWS_S3_ARTIFACTS_BUCKET: "openethereum-releases" - ACTIONS_ALLOW_UNSECURE_COMMANDS: true - -jobs: - build: - name: Build Release - strategy: - matrix: - platform: - - ubuntu-16.04 - # - macos-latest - toolchain: - - 1.59 - runs-on: ${{ matrix.platform }} - steps: - - name: Checkout sources - uses: actions/checkout@main - - name: Install toolchain - uses: actions-rs/toolchain@v1 - with: - toolchain: ${{ matrix.toolchain }} - profile: minimal - override: true - - # ============================== - # Windows Build - # ============================== - - # - name: Install LLVM for Windows - # if: matrix.platform == 'windows2019' - # run: choco install llvm - - # - name: Build OpenEthereum for Windows - # if: matrix.platform == 'windows2019' - # run: sh scripts/actions/build-windows.sh ${{matrix.platform}} - - # - name: Upload Windows build - # uses: actions/upload-artifact@v2 - # if: matrix.platform == 'windows2019' - # with: - # name: windows-artifacts - # path: artifacts - - # ============================== - # Linux/Macos Build - # ============================== - - - name: Build OpenEthereum for ${{matrix.platform}} - if: matrix.platform != 'windows2019' - run: sh scripts/actions/build-linux.sh ${{matrix.platform}} - - - name: Upload Linux build - uses: actions/upload-artifact@v2 - if: matrix.platform == 'ubuntu-16.04' - with: - name: linux-artifacts - path: artifacts - - - name: Upload MacOS build - uses: actions/upload-artifact@v2 - if: matrix.platform == 'macos-latest' - with: - name: macos-artifacts - path: artifacts - - zip-artifacts-creator: - name: Create zip artifacts - needs: build - runs-on: ubuntu-16.04 - steps: - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - # ============================== - # Create ZIP files - # ============================== - - # - name: Download Windows artifacts - # uses: actions/download-artifact@v2 - # with: - # name: windows-artifacts - # path: windows-artifacts - - - name: Download Linux artifacts - uses: actions/download-artifact@v2 - with: - name: linux-artifacts - path: linux-artifacts - - - name: Download MacOS artifacts - uses: actions/download-artifact@v2 - with: - name: macos-artifacts - path: macos-artifacts - - - name: Display structure of downloaded files - run: ls - - - name: Create zip Linux - id: create_zip_linux - run: | - cd linux-artifacts/ - zip -rT openethereum-linux-${{ env.RELEASE_VERSION }}.zip * - ls openethereum-linux-${{ env.RELEASE_VERSION }}.zip - cd .. - mv linux-artifacts/openethereum-linux-${{ env.RELEASE_VERSION }}.zip . - - echo "Setting outputs..." - echo ::set-output name=LINUX_ARTIFACT::openethereum-linux-${{ env.RELEASE_VERSION }}.zip - echo ::set-output name=LINUX_SHASUM::$(shasum -a 256 openethereum-linux-${{ env.RELEASE_VERSION }}.zip | awk '{print $1}') - - - name: Create zip MacOS - id: create_zip_macos - run: | - cd macos-artifacts/ - zip -rT openethereum-macos-${{ env.RELEASE_VERSION }}.zip * - ls openethereum-macos-${{ env.RELEASE_VERSION }}.zip - cd .. - mv macos-artifacts/openethereum-macos-${{ env.RELEASE_VERSION }}.zip . - - echo "Setting outputs..." - echo ::set-output name=MACOS_ARTIFACT::openethereum-macos-${{ env.RELEASE_VERSION }}.zip - echo ::set-output name=MACOS_SHASUM::$(shasum -a 256 openethereum-macos-${{ env.RELEASE_VERSION }}.zip | awk '{print $1}') - - # - name: Create zip Windows - # id: create_zip_windows - # run: | - # cd windows-artifacts/ - # zip -rT openethereum-windows-${{ env.RELEASE_VERSION }}.zip * - # ls openethereum-windows-${{ env.RELEASE_VERSION }}.zip - # cd .. - # mv windows-artifacts/openethereum-windows-${{ env.RELEASE_VERSION }}.zip . - - # echo "Setting outputs..." - # echo ::set-output name=WINDOWS_ARTIFACT::openethereum-windows-${{ env.RELEASE_VERSION }}.zip - # echo ::set-output name=WINDOWS_SHASUM::$(shasum -a 256 openethereum-windows-${{ env.RELEASE_VERSION }}.zip | awk '{print $1}') - - # ======================================================================= - # Upload artifacts - # This is required to share artifacts between different jobs - # ======================================================================= - - - name: Upload artifacts - uses: actions/upload-artifact@v2 - with: - name: openethereum-linux-${{ env.RELEASE_VERSION }}.zip - path: openethereum-linux-${{ env.RELEASE_VERSION }}.zip - - - name: Upload artifacts - uses: actions/upload-artifact@v2 - with: - name: openethereum-macos-${{ env.RELEASE_VERSION }}.zip - path: openethereum-macos-${{ env.RELEASE_VERSION }}.zip - - # - name: Upload artifacts - # uses: actions/upload-artifact@v2 - # with: - # name: openethereum-windows-${{ env.RELEASE_VERSION }}.zip - # path: openethereum-windows-${{ env.RELEASE_VERSION }}.zip - - # ======================================================================= - # Upload artifacts to S3 - # This is required by some software distribution systems which require - # artifacts to be downloadable, like Brew on MacOS. - # ======================================================================= - - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v1 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: ${{ env.AWS_REGION }} - - - name: Copy files to S3 with the AWS CLI - run: | - # Deploy zip artifacts to S3 bucket to a directory whose name is the tagged release version. - # Deploy macos binary artifact (if required, add more `aws s3 cp` commands to deploy specific OS versions) - aws s3 cp macos-artifacts/openethereum s3://${{ env.AWS_S3_ARTIFACTS_BUCKET }}/${{ env.RELEASE_VERSION }}/macos/ --region ${{ env.AWS_REGION }} - - outputs: - linux-artifact: ${{ steps.create_zip_linux.outputs.LINUX_ARTIFACT }} - linux-shasum: ${{ steps.create_zip_linux.outputs.LINUX_SHASUM }} - macos-artifact: ${{ steps.create_zip_macos.outputs.MACOS_ARTIFACT }} - macos-shasum: ${{ steps.create_zip_macos.outputs.MACOS_SHASUM }} - # windows-artifact: ${{ steps.create_zip_windows.outputs.WINDOWS_ARTIFACT }} - # windows-shasum: ${{ steps.create_zip_windows.outputs.WINDOWS_SHASUM }} - - draft-release: - name: Draft Release - needs: zip-artifacts-creator - runs-on: ubuntu-16.04 - steps: - - name: Set env - run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV - - # ============================== - # Download artifacts - # ============================== - - - name: Download artifacts - uses: actions/download-artifact@v2 - with: - name: openethereum-linux-${{ env.RELEASE_VERSION }}.zip - - - name: Download artifacts - uses: actions/download-artifact@v2 - with: - name: openethereum-macos-${{ env.RELEASE_VERSION }}.zip - - # - name: Download artifacts - # uses: actions/download-artifact@v2 - # with: - # name: openethereum-windows-${{ env.RELEASE_VERSION }}.zip - - - name: Display structure of downloaded files - run: ls - - # ============================== - # Create release draft - # ============================== - - - name: Create Release Draft - id: create_release_draft - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token - with: - tag_name: ${{ github.ref }} - release_name: OpenEthereum ${{ github.ref }} - body: | - This release contains - - | System | Architecture | Binary | Sha256 Checksum | - |:---:|:---:|:---:|:---| - | Apple Icon by Pixel Perfect from https://www.flaticon.com/authors/pixel-perfect | x64 | [${{ needs.zip-artifacts-creator.outputs.macos-artifact }}](https://github.com/openethereum/openethereum/releases/download/${{ env.RELEASE_VERSION }}/${{ needs.zip-artifacts-creator.outputs.macos-artifact }}) | `${{ needs.zip-artifacts-creator.outputs.macos-shasum }}` | - | Linux Icon by Pixel Perfect from https://www.flaticon.com/authors/pixel-perfect | x64 | [${{ needs.zip-artifacts-creator.outputs.linux-artifact }}](https://github.com/openethereum/openethereum/releases/download/${{ env.RELEASE_VERSION }}/${{ needs.zip-artifacts-creator.outputs.linux-artifact }}) | `${{ needs.zip-artifacts-creator.outputs.linux-shasum }}` | - | Windows Icon by Pixel Perfect from https://www.flaticon.com/authors/pixel-perfect | x64 | [${{ needs.zip-artifacts-creator.outputs.windows-artifact }}](https://github.com/openethereum/openethereum/releases/download/${{ env.RELEASE_VERSION }}/${{ needs.zip-artifacts-creator.outputs.windows-artifact }}) | `${{ needs.zip-artifacts-creator.outputs.windows-shasum }}` | - | | | | | - | **System** | **Option** | - | **Resource** | - | Settings Icon by Pixel Perfect from https://www.flaticon.com/authors/pixel-perfect | Docker | - | [hub.docker.com/r/openethereum/openethereum](https://hub.docker.com/r/openethereum/openethereum) | - - draft: true - prerelease: true - - - name: Upload Release Asset - Linux - id: upload_release_asset_linux - uses: actions/upload-release-asset@v1.0.1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release_draft.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./openethereum-linux-${{ env.RELEASE_VERSION }}.zip - asset_name: openethereum-linux-${{ env.RELEASE_VERSION }}.zip - asset_content_type: application/zip - - - name: Upload Release Asset - MacOS - id: upload_release_asset_macos - uses: actions/upload-release-asset@v1.0.1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ steps.create_release_draft.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - asset_path: ./openethereum-macos-${{ env.RELEASE_VERSION }}.zip - asset_name: openethereum-macos-${{ env.RELEASE_VERSION }}.zip - asset_content_type: application/zip - - # - name: Upload Release Asset - Windows - # id: upload_release_asset_windows - # uses: actions/upload-release-asset@v1 - # env: - # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # with: - # upload_url: ${{ steps.create_release_draft.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps - # asset_path: ./openethereum-windows-${{ env.RELEASE_VERSION }}.zip - # asset_name: openethereum-windows-${{ env.RELEASE_VERSION }}.zip - # asset_content_type: application/zip diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 401bd0c0ce..f619b05ed1 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -9,7 +9,7 @@ on: jobs: check: name: Check - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Checkout sources uses: actions/checkout@main @@ -18,20 +18,15 @@ jobs: - name: Install 1.59 toolchain uses: actions-rs/toolchain@v1 with: - toolchain: 1.59 + toolchain: 1.85 profile: minimal override: true - - name: Run cargo check 1/3 + - name: Run cargo check uses: actions-rs/cargo@v1 with: command: check - args: --locked --no-default-features --verbose - - name: Run cargo check 2/3 - uses: actions-rs/cargo@v1 - with: - command: check - args: --locked --manifest-path crates/runtime/io/Cargo.toml --no-default-features --verbose - - name: Run cargo check 3/3 + args: --locked --all --benches --verbose --tests + - name: Run cargo check mio io uses: actions-rs/cargo@v1 with: command: check @@ -41,10 +36,7 @@ jobs: with: command: check args: --locked -p evmbin --verbose - - name: Run cargo check benches - uses: actions-rs/cargo@v1 - with: - command: check - args: --locked --all --benches --verbose - name: Run validate chainspecs run: ./scripts/actions/validate-chainspecs.sh + args: --locked --all --benches --verbose --tests + diff --git a/.github/workflows/compile-targets.yml b/.github/workflows/compile-targets.yml new file mode 100644 index 0000000000..2eaf373723 --- /dev/null +++ b/.github/workflows/compile-targets.yml @@ -0,0 +1,24 @@ +name: Compile + +on: + pull_request: + push: + branches: + - main + - dev +jobs: + check: + name: Compile + runs-on: ubuntu-22.04 + steps: + - name: Checkout sources + uses: actions/checkout@main + with: + submodules: true + - name: Install rust toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.85 + profile: minimal + override: true + diff --git a/.github/workflows/fmt.yml b/.github/workflows/fmt.yml index d7ef4a52c2..5700e82a98 100644 --- a/.github/workflows/fmt.yml +++ b/.github/workflows/fmt.yml @@ -5,13 +5,13 @@ name: rustfmt jobs: fmt: name: Rustfmt - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: 1.59 + toolchain: 1.85 override: true - run: rustup component add rustfmt - uses: actions-rs/cargo@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 94a6120a66..1165802c92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,178 @@ +## Diamond Node Software 4.0.0 + +Official Node Software start version for the DMD Diamond network version 4. +see Whitepaper: https://github.com/DMDcoin/whitepaper/wiki + +## Diamond Node Software 0.12.9 + +- [Phoenix Protocol Bugfix for Validators](https://github.com/DMDcoin/openethereum-3.x/issues/52) + +## Diamond Node Software 0.12.8 + +- [Phoenix Protocol](https://github.com/DMDcoin/openethereum-3.x/issues/52) + +## Diamond Node Software 0.12.7 + +- [hbbft key generation: double transactions / missing transactions](https://github.com/DMDcoin/diamond-node/issues/290) + +## Diamond Node Software 0.12.6 + +- reduced Trace Log output for tracing devp2p propagation +- removed tests for supporting for outdated clients that do not support large requests + + +## Diamond Node Software 0.12.5 + +- now announcing availability before announcing IP address + +## Diamond Node Software 0.12.4 + +- [Key Gen Transaction do not require block triggers anymore, there is now also a time trigger](https://github.com/DMDcoin/diamond-node/issues/160) + +## Diamond Node Software 0.12.3 + +- Refactored KeyGenTransactions. +- Nonces from the Queue are now respected for Key Gen Transactions. + +## Diamond Node Software 0.12.2 + +- [Hotfix: Nodes not fast enough to write parts (keygen)](https://github.com/DMDcoin/diamond-node/issues/280) + +## Diamond Node Software 0.12.1 + +- Logging improvements +- Fixed a bug, where handshakes, that get upgraded to sessions, deregister there stream. + + +## Diamond Node Software 0.12.0 + +- New Versioning Scheme: Since Open Ethereum did not get a new update, diamond-node will not mention 3.3.5 anymore +- [race condition: Incoming data from peer that gets disconnected leads to crash](https://github.com/DMDcoin/diamond-node/issues/275) +- [not joining hbbft epoch after sync has finished](https://github.com/DMDcoin/diamond-node/issues/270) +- [diamond front running resistance](https://github.com/DMDcoin/diamond-node/issues/89) +- [Transaction fees distribution](https://github.com/DMDcoin/diamond-node/issues/40) + +## Diamond Node Software 3.3.5-hbbft-0.11.8 +- [deregister_session_stream can cause deadlocks](https://github.com/DMDcoin/diamond-node/issues/267) + +## Diamond Node Software 3.3.5-hbbft-0.11.7 +- [Handshake and Session Management improvements](https://github.com/DMDcoin/diamond-node/issues/262) +- [Reliable Message Broadcast Protocol: message_cache of SyncProtocolHandler does not get cleaned up](https://github.com/DMDcoin/diamond-node/issues/261) +- reduced log outputs for RMBP cached messages +- Fix possible deadlock in deregister_session_stream in combination with session_readable +- reduce timings for shutdown from 90 seconds to 5 seconds, so auto restart of nodes in deadlock cases is faster + +## Diamond Node Software 3.3.5-hbbft-0.11.6 +- [session double kill problem.](https://github.com/DMDcoin/diamond-node/issues/252) +- [Network Host logic: peer_id to NodeID consistency](https://github.com/DMDcoin/diamond-node/issues/251) +- [sealing messages probably not received](https://github.com/DMDcoin/diamond-node/issues/248) +- [disconnected from reservered peers](https://github.com/DMDcoin/diamond-node/issues/247) +- [separate handshakes and encrypted connections](https://github.com/DMDcoin/diamond-node/issues/254) + +## Diamond Node Software 3.3.5-hbbft-0.11.5 +- [Improved reliability of Hbbft targeted message delivery](https://github.com/DMDcoin/diamond-node/issues/248) + +## Diamond Node Software 3.3.5-hbbft-0.11.4 +- [Balanced lock approach for solving the lock problems in devp2p](https://github.com/DMDcoin/diamond-node/issues/236) + +## Diamond Node Software 3.3.5-hbbft-0.11.3 + +- [deadlocks in transaction pools](https://github.com/DMDcoin/diamond-node/issues/236) + +## Diamond Node Software 3.3.5-hbbft-0.11.2 + +- [shutdown on deadlock](https://github.com/DMDcoin/diamond-node/issues/231) +- [deadlock possibility in reserved peers management](https://github.com/DMDcoin/diamond-node/issues/229) + +## Diamond Node Software 3.3.5-hbbft-0.11.1 + +- [caching of SyncStatus to prevent deadlocks](https://github.com/DMDcoin/diamond-node/issues/223) + +## Diamond Node Software 3.3.5-hbbft-0.11.0 + +- [Fixed: Compile error on newer Linux Versions](https://github.com/DMDcoin/diamond-node/issues/145) +- [rust update to 1.85 and rust edition 2024](https://github.com/DMDcoin/diamond-node/issues/191) +- [updated dependencies](https://github.com/DMDcoin/diamond-node/issues/107) +- [Fixed: Service Transaction not allowed:](https://github.com/DMDcoin/diamond-node/issues/185) +- [reduced network usage:](https://github.com/DMDcoin/diamond-node/issues/163) +- [additional prometheus counter and gauges, most of them for analysing](https://github.com/DMDcoin/diamond-node/issues/163) +- [Improved transaction propagation for clients that are syncing](https://github.com/DMDcoin/diamond-node/issues/173) + +## Diamond Node Software 3.3.5-hbbft-0.10.1 + +- Emergency fix to improve blockimports: only one block at a time is now requested throught the devp2p block sync protocol. https://github.com/DMDcoin/diamond-node/issues/209 + + +## Diamond Node Software 3.3.5-hbbft-0.10.0 + +- Bonus Score finalization + +## Diamond Node Software 3.3.5-hbbft-0.9.8 + +- Improved Hbbft "No Session Exists" handling: https://github.com/DMDcoin/diamond-node/issues/150 +- Lock overhead reduction for validator actions +- Connect & Disconnect Report management: fixed double sending of reports: https://github.com/DMDcoin/diamond-node/issues/157 +- Stage 3 Verification: Fixed State Pruning related error. https://github.com/DMDcoin/diamond-node/issues/161 +- Added Network and DevP2P related Information to the Prometheus Metrics: https://github.com/DMDcoin/diamond-node/issues/163 +- Early Epoch End: Treat any HBBFT Message as being a responsive partner node: https://github.com/DMDcoin/diamond-node/issues/87 + +## Diamond Node Software 3.3.5-hbbft-0.9.7 + +- [Nodes that are not a active validator seem to try to send connectivity reports] (https://github.com/DMDcoin/diamond-node/issues/153) + +## Diamond Node Software 3.3.5-hbbft-0.9.6 + +- [Early Epoch End: only report disconnectivities that exist for longer than 60 Minutes] (https://github.com/DMDcoin/diamond-node/issues/87) +- [Improved Logging for Hbbft Session Management] (https://github.com/DMDcoin/diamond-node/issues/150) + +## Diamond Node Software 3.3.5-hbbft-0.9.5 + +- [Improved Logging of ] (https://github.com/DMDcoin/diamond-node/issues/147) +- [Early Epoch end: Applying new time based rules] (https://github.com/DMDcoin/diamond-node/issues/87) + + + +## Diamond Node Software 3.3.5-hbbft-0.9.5 + +- [Improved Logging for Stage 5 Errors] (https://github.com/DMDcoin/diamond-node/issues/147) +- [Early Epoch end: Applying new time based rules] (https://github.com/DMDcoin/diamond-node/issues/87) + + +## Diamond Node Software 3.3.5-hbbft-0.9.4 + +- [Fixed: is major syncing information is wrong.] (https://github.com/DMDcoin/diamond-node/issues/73) +- [Improvements for HBBFT Message Tracking] (https://github.com/DMDcoin/openethereum-3.x/issues/17) + +## Diamond Node Software 3.3.5-hbbft-0.9.3 + +[Autoshutdown after a period without block import] https://github.com/DMDcoin/diamond-node/issues/78 + +Those examples show how to confige the node to activate this feature, restarting the node if no block import has been detected for 1800 seconds (30 minutes) + +to activate feature via CLI Arg: +`--shutdown-on-missing-block-import=1800` + +or in node.toml +node.toml: +``` +[Misc] +shutdown_on_missing_block_import = 1800 +``` + +## Diamond Node Software 3.3.5-hbbft-0.9.2 + +- [FIXED: pruning as root cause for stage 3 errors] https://github.com/DMDcoin/diamond-node/issues/68 + +## Diamond Node Software 3.3.5-hbbft-0.9.1 + +- [pruning protection for hbbft engine] https://github.com/DMDcoin/diamond-node/issues/62 +- [reported fault: UnknownSender] https://github.com/DMDcoin/diamond-node/issues/69 + +## Diamond Node Software 3.3.5-hbbft-0.9.0 + +- Start of Alpha 2 Testnet +- Improved Stability +- Feature preparation for Hbbft Block Reward support ## Diamond Node Software 3.3.5-hbbft-0.8.9 diff --git a/Cargo.lock b/Cargo.lock index e258a34c13..c9f3ba8b34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,21 +1,21 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" -version = "0.14.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "adler32" @@ -85,21 +85,18 @@ checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" [[package]] name = "aho-corasick" -version = "0.6.10" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ce3d38065e618af2d7b77e10c5ad9a069859b4be3c2250f674af3840d9c8a5" +checksum = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" dependencies = [ "memchr", ] [[package]] -name = "aho-corasick" -version = "0.7.6" +name = "allocator-api2" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" -dependencies = [ - "memchr", -] +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "ansi_term" @@ -113,13 +110,13 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "app_dirs" version = "1.2.1" -source = "git+https://github.com/openethereum/app-dirs-rs#0b37f9481ce29e9d5174ad185bca695b206368eb" +source = "git+https://github.com/dmdcoin/app-dirs-rs#0b37f9481ce29e9d5174ad185bca695b206368eb" dependencies = [ "ole32-sys", "shell32-sys", @@ -142,6 +139,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "assert_matches" version = "1.3.0" @@ -155,7 +158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -172,9 +175,9 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.56" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if 1.0.0", @@ -182,6 +185,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -203,6 +207,12 @@ dependencies = [ "byteorder", ] +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + [[package]] name = "beef" version = "0.5.1" @@ -317,7 +327,7 @@ dependencies = [ "byteorder", "criterion 0.3.0", "ethbloom 0.9.2", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "tempdir", "tiny-keccak 1.5.0", ] @@ -352,6 +362,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + [[package]] name = "byte-slice-cast" version = "0.3.5" @@ -424,14 +440,17 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.9" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8493056968583b0193c1bb04d6f7684586f3726992d6c573261941a895dbd68" +checksum = "6127248204b9aba09a362f6c930ef6a78f2c1b2215f8a7b398c06e1083f17af0" dependencies = [ - "libc", + "js-sys", "num-integer", "num-traits 0.2.15", - "time", + "serde", + "time 0.1.45", + "wasm-bindgen", + "winapi 0.3.9", ] [[package]] @@ -491,7 +510,7 @@ dependencies = [ "parity-crypto", "parity-util-mem", "rlp", - "rlp_derive", + "rlp-derive 0.2.0", "rustc-hex 1.0.0", "serde", "serde_json", @@ -615,34 +634,26 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e44b8cf3e1a625844d1750e1f7820da46044ff6d28f4d43e455ba3e5bb2c13" -dependencies = [ - "crossbeam-epoch", - "crossbeam-utils 0.6.6", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71" +checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" dependencies = [ "crossbeam-epoch", - "crossbeam-utils 0.6.6", + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] name = "crossbeam-epoch" -version = "0.7.2" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "arrayvec 0.4.12", + "autocfg 1.0.0", "cfg-if 0.1.10", - "crossbeam-utils 0.6.6", + "crossbeam-utils 0.7.2", "lazy_static", + "maybe-uninit", "memoffset", "scopeguard 1.1.0", ] @@ -728,7 +739,7 @@ checksum = "37519ccdfd73a75821cac9319d4fce15a81b9fcf75f951df5b9988aa3a0af87d" dependencies = [ "bstr", "csv-core", - "itoa", + "itoa 0.4.4", "ryu", "serde", ] @@ -768,18 +779,51 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ "nix", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] -name = "derivative" -version = "1.0.4" +name = "darling" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c6d883546668a3e2011b6a716a7330b82eabb0151b138217f632c8243e17135" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", - "syn 0.15.26", + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.94", + "quote 1.0.40", + "strsim 0.11.1", + "syn 2.0.100", +] + +[[package]] +name = "darling_macro" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core", + "quote 1.0.40", + "syn 2.0.100", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "serde", ] [[package]] @@ -788,8 +832,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] @@ -799,14 +843,14 @@ version = "0.99.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "298998b1cf6b5b2c8a7b023dfd45821825ce3ba8a8af55c921a0e734e4653f76" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] [[package]] name = "diamond-node" -version = "3.3.5-hbbft-0.8.9" +version = "4.0.0" dependencies = [ "ansi_term 0.10.2", "atty", @@ -837,7 +881,7 @@ dependencies = [ "fetch", "fs-swap", "futures", - "hyper 0.12.35", + "hyper", "ipnetwork", "journaldb", "jsonrpc-core", @@ -862,10 +906,10 @@ dependencies = [ "parity-rpc", "parity-runtime", "parity-version", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "pretty_assertions", "prometheus", - "regex 1.3.9", + "regex", "rlp", "rpassword", "rustc-hex 1.0.0", @@ -879,7 +923,7 @@ dependencies = [ "term_size", "textwrap 0.9.0", "toml 0.4.10", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -925,7 +969,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f525a586d310c87df72ebcd98009e57f1cc030c8c268305287a476beb653969" dependencies = [ "lazy_static", - "regex 1.3.9", + "regex", "serde", "strsim 0.9.2", ] @@ -949,17 +993,17 @@ version = "0.1.0" dependencies = [ "ethabi 12.0.0", "ethereum-types 0.9.2", - "failure", - "indexmap", + "indexmap 1.9.3", "itertools 0.7.11", "keccak-hash", "lazy_static", "logos", - "regex 1.3.9", + "regex", "rustc-hex 2.1.0", "serde", "serde_derive", "serde_json", + "thiserror 1.0.69", "validator", "validator_derive", ] @@ -997,7 +1041,7 @@ dependencies = [ "atty", "humantime", "log", - "regex 1.3.9", + "regex", "termcolor", ] @@ -1010,10 +1054,16 @@ dependencies = [ "atty", "humantime", "log", - "regex 1.3.9", + "regex", "termcolor", ] +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + [[package]] name = "error-chain" version = "0.12.1" @@ -1056,15 +1106,15 @@ version = "0.2.0" source = "git+https://github.com/matter-labs/eip1962.git?rev=ece6cbabc41948db4200e41f0bfdab7ab94c7af8#ece6cbabc41948db4200e41f0bfdab7ab94c7af8" dependencies = [ "byteorder", - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] [[package]] name = "ethabi" version = "11.0.0" -source = "git+https://github.com/rimrakhimov/ethabi?branch=rimrakhimov/remove-syn-export-span#222e6482ac45d9c01f9e895ade8e439f86dbfc2f" +source = "git+https://github.com/rimrakhimov/ethabi?branch=rimrakhimov%2Fremove-syn-export-span#222e6482ac45d9c01f9e895ade8e439f86dbfc2f" dependencies = [ "ethereum-types 0.9.2", "rustc-hex 2.1.0", @@ -1090,19 +1140,19 @@ dependencies = [ [[package]] name = "ethabi-contract" -version = "11.0.0" +version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d4002f1f77d8233685dafd8589efe1c9dfa63e21ca6c11134372acc7f68032" +checksum = "4632b1b766fbf59872eb7a41e7ebaa10727b7f7000aef5bb626b87e472041c83" [[package]] name = "ethabi-derive" version = "11.0.0" -source = "git+https://github.com/rimrakhimov/ethabi?branch=rimrakhimov/remove-syn-export-span#222e6482ac45d9c01f9e895ade8e439f86dbfc2f" +source = "git+https://github.com/rimrakhimov/ethabi?branch=rimrakhimov%2Fremove-syn-export-span#222e6482ac45d9c01f9e895ade8e439f86dbfc2f" dependencies = [ "ethabi 11.0.0", "heck", - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] @@ -1117,7 +1167,7 @@ dependencies = [ "keccak-hash", "log", "memmap", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "primal", "rustc-hex 1.0.0", "serde_json", @@ -1183,10 +1233,11 @@ dependencies = [ "ethjson", "ethkey", "evm", + "fastmap", "fetch", "globset", "hash-db 0.11.0", - "hbbft 0.1.1 (git+https://github.com/surfingnerd/hbbft?rev=cf0c45aa669b9c10abab1a0f4f2b33595879b60b)", + "hbbft", "hbbft_testing", "hex-literal", "hex_fmt", @@ -1211,17 +1262,17 @@ dependencies = [ "parity-runtime", "parity-snappy", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "patricia-trie-ethereum", "rand 0.6.5", "rand 0.7.3", "rand_xorshift 0.2.0", "rayon", - "regex 1.3.9", + "regex", "reth-util", "rlp", + "rlp-derive 0.2.0", "rlp_compress", - "rlp_derive", "rmp-serde", "rustc-hex 1.0.0", "scopeguard 1.1.0", @@ -1254,7 +1305,7 @@ dependencies = [ "ethstore", "log", "parity-crypto", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "serde", "serde_derive", "serde_json", @@ -1280,12 +1331,12 @@ dependencies = [ "parity-bytes", "parity-crypto", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rand 0.7.3", "rayon", "rlp", + "rlp-derive 0.2.0", "rlp_compress", - "rlp_derive", "rustc-hex 1.0.0", "stats", "tempdir", @@ -1315,7 +1366,7 @@ dependencies = [ "log", "macros", "maplit", - "num", + "num-bigint 0.4.3", "parity-bytes", "parity-crypto", "rustc-hex 1.0.0", @@ -1340,9 +1391,9 @@ dependencies = [ "kvdb-memorydb", "kvdb-rocksdb", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rlp", - "rlp_derive", + "rlp-derive 0.2.0", "stats", ] @@ -1350,15 +1401,14 @@ dependencies = [ name = "ethcore-io" version = "1.12.0" dependencies = [ - "crossbeam-deque 0.6.3", + "crossbeam-deque", "fnv", "futures", "log", "mio", - "num_cpus", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "slab 0.4.2", - "time", + "time 0.1.45", "timer", "tokio", ] @@ -1368,14 +1418,14 @@ name = "ethcore-logger" version = "1.12.0" dependencies = [ "ansi_term 0.10.2", - "arrayvec 0.4.12", + "arrayvec 0.7.4", "atty", "env_logger 0.5.13", "lazy_static", "log", - "parking_lot 0.11.1", - "regex 1.3.9", - "time", + "parking_lot 0.12.1", + "regex", + "time 0.1.45", ] [[package]] @@ -1395,14 +1445,14 @@ dependencies = [ "ethkey", "fetch", "futures", - "hyper 0.12.35", + "hyper", "keccak-hash", "linked-hash-map", "log", "parity-crypto", "parity-runtime", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "price-info", "rlp", "rustc-hex 1.0.0", @@ -1426,8 +1476,10 @@ dependencies = [ "ipnetwork", "lazy_static", "libc", + "log", "parity-crypto", "parity-snappy", + "parity-version", "rlp", "semver", "serde", @@ -1458,16 +1510,17 @@ dependencies = [ "parity-crypto", "parity-path", "parity-snappy", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rand 0.7.3", "rlp", - "rust-crypto", "rustc-hex 1.0.0", "serde", "serde_derive", "serde_json", "slab 0.2.0", + "stats", "tempdir", + "time-utils", "tiny-keccak 1.5.0", ] @@ -1500,7 +1553,7 @@ dependencies = [ "jsonrpc-tcp-server", "keccak-hash", "log", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "tokio", "tokio-io", ] @@ -1534,13 +1587,14 @@ dependencies = [ "parity-bytes", "parity-crypto", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "primitive-types 0.7.2", "rand 0.7.3", "rand_xorshift 0.2.0", "rlp", "rustc-hex 1.0.0", "stats", + "time-utils", "trace-time", "triehash-ethereum", ] @@ -1556,7 +1610,7 @@ dependencies = [ "maplit", "parity-util-mem", "rlp", - "rlp-derive", + "rlp-derive 0.1.0", ] [[package]] @@ -1600,6 +1654,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "serde_with", ] [[package]] @@ -1651,15 +1706,15 @@ dependencies = [ "matches", "parity-crypto", "parity-wordlist", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rand 0.7.3", "rustc-hex 1.0.0", "serde", "serde_derive", "serde_json", - "smallvec 0.6.13", + "smallvec 0.6.14", "tempdir", - "time", + "time 0.1.45", ] [[package]] @@ -1672,7 +1727,7 @@ dependencies = [ "ethstore", "num_cpus", "panic_hook", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rustc-hex 1.0.0", "serde", "serde_derive", @@ -1692,10 +1747,10 @@ dependencies = [ "lazy_static", "log", "memory-cache", - "num-bigint 0.2.3", + "num-bigint 0.4.3", "parity-bytes", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rustc-hex 1.0.0", "vm", ] @@ -1723,35 +1778,13 @@ dependencies = [ "vm", ] -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", - "syn 1.0.94", - "synstructure", -] - [[package]] name = "fake-fetch" version = "0.0.1" dependencies = [ "fetch", "futures", - "hyper 0.12.35", + "hyper", ] [[package]] @@ -1765,6 +1798,7 @@ name = "fastmap" version = "0.1.0" dependencies = [ "ethereum-types 0.9.2", + "lru 0.13.0", "plain_hasher", ] @@ -1784,13 +1818,38 @@ dependencies = [ "bytes", "futures", "http", - "hyper 0.12.35", + "hyper", "hyper-rustls", "log", "tokio", "url 2.1.0", ] +[[package]] +name = "ff" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4b967a3ee6ae993f0094174257d404a5818f58be79d67a1aea1ec8996d28906" +dependencies = [ + "byteorder", + "ff_derive", + "rand_core 0.5.1", +] + +[[package]] +name = "ff_derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3776aaf60a45037a9c3cabdd8542b38693acaa3e241ff957181b72579d29feb" +dependencies = [ + "num-bigint 0.2.3", + "num-integer", + "num-traits 0.2.15", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.94", +] + [[package]] name = "fixed-hash" version = "0.4.0" @@ -1833,15 +1892,21 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.2.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "fnv" -version = "1.0.6" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "fs-swap" @@ -1852,7 +1917,7 @@ dependencies = [ "lazy_static", "libc", "libloading", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -1893,12 +1958,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "gcc" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" - [[package]] name = "generic-array" version = "0.12.3" @@ -1936,14 +1995,14 @@ checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] name = "gimli" -version = "0.23.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "globset" @@ -1951,11 +2010,22 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" dependencies = [ - "aho-corasick 0.7.6", + "aho-corasick", "bstr", "fnv", "log", - "regex 1.3.9", + "regex", +] + +[[package]] +name = "group" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f15be54742789e36f03307c8fdf0621201e1345e94f1387282024178b5e9ec8c" +dependencies = [ + "ff", + "rand 0.7.3", + "rand_xorshift 0.2.0", ] [[package]] @@ -1969,7 +2039,7 @@ dependencies = [ "fnv", "futures", "http", - "indexmap", + "indexmap 1.9.3", "log", "slab 0.4.2", "string", @@ -2033,64 +2103,45 @@ dependencies = [ ] [[package]] -name = "hbbft" -version = "0.1.1" -source = "git+https://github.com/poanetwork/hbbft?rev=4857b7f9c7a0f513caca97c308d352c6a77fe5c2#4857b7f9c7a0f513caca97c308d352c6a77fe5c2" -dependencies = [ - "bincode", - "byteorder", - "derivative 1.0.4", - "env_logger 0.7.1", - "failure", - "hex_fmt", - "init_with", - "log", - "rand 0.6.5", - "rand_derive", - "reed-solomon-erasure 3.1.1", - "serde", - "threshold_crypto", - "tiny-keccak 2.0.2", -] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] -name = "hbbft" -version = "0.1.1" -source = "git+https://github.com/surfingnerd/hbbft?rev=cf0c45aa669b9c10abab1a0f4f2b33595879b60b#cf0c45aa669b9c10abab1a0f4f2b33595879b60b" +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ - "bincode", - "byteorder", - "derivative 2.2.0", - "env_logger 0.7.1", - "failure", - "hex_fmt", - "init_with", - "log", - "rand 0.6.5", - "rand_derive", - "reed-solomon-erasure 4.0.2", - "serde", - "threshold_crypto", - "tiny-keccak 2.0.2", + "allocator-api2", + "equivalent", + "foldhash", ] [[package]] name = "hbbft" version = "0.1.1" -source = "git+https://github.com/poanetwork/hbbft#d52be00d0e3a1e2872c8d42a076e0dc3cb86b175" +source = "git+https://github.com/DMDcoin/hbbft.git?rev=4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074#4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074" dependencies = [ "bincode", "byteorder", - "derivative 2.2.0", + "derivative", "env_logger 0.7.1", - "failure", "hex_fmt", "init_with", "log", - "rand 0.6.5", + "rand 0.7.3", "rand_derive", - "reed-solomon-erasure 4.0.2", + "reed-solomon-erasure", "serde", + "thiserror 1.0.69", "threshold_crypto", "tiny-keccak 2.0.2", ] @@ -2103,29 +2154,31 @@ dependencies = [ "clap", "ethcore", "ethereum-types 0.9.2", + "ethjson", "ethkey", "ethstore", - "hbbft 0.1.1 (git+https://github.com/poanetwork/hbbft?rev=4857b7f9c7a0f513caca97c308d352c6a77fe5c2)", + "hbbft", "hbbft_testing", "parity-crypto", - "rand 0.6.5", + "rand 0.7.3", "rustc-hex 2.1.0", "serde", "serde_json", + "serde_with", "toml 0.5.8", ] [[package]] name = "hbbft_testing" version = "0.1.0" -source = "git+https://github.com/poanetwork/hbbft#d52be00d0e3a1e2872c8d42a076e0dc3cb86b175" +source = "git+https://github.com/DMDcoin/hbbft.git?rev=4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074#4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074" dependencies = [ - "failure", - "hbbft 0.1.1 (git+https://github.com/poanetwork/hbbft)", + "hbbft", "integer-sqrt", "proptest", - "rand 0.6.5", - "rand_xorshift 0.1.1", + "rand 0.7.3", + "rand_xorshift 0.2.0", + "thiserror 1.0.69", "threshold_crypto", ] @@ -2135,7 +2188,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2204,7 +2257,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29302b90cfa76231a757a887d1e3153331a63c7f80b6c75f86366334cbe70708" dependencies = [ "scopeguard 0.3.3", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2214,7 +2267,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3753954f7bd71f0e671afb8b5a992d1724cf43b7f95a563cd4a0bde94659ca8" dependencies = [ "scopeguard 1.1.0", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2225,7 +2278,7 @@ checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 0.4.4", ] [[package]] @@ -2255,32 +2308,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "hyper" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34a590ca09d341e94cddf8e5af0bbccde205d5fbc2fa3c09dd67c7f85cea59d7" -dependencies = [ - "base64 0.9.3", - "bytes", - "futures", - "futures-cpupool", - "httparse", - "iovec", - "language-tags", - "log", - "mime", - "net2", - "percent-encoding 1.0.1", - "relay", - "time", - "tokio-core", - "tokio-io", - "tokio-service", - "unicase", - "want 0.0.4", -] - [[package]] name = "hyper" version = "0.12.35" @@ -2295,11 +2322,11 @@ dependencies = [ "http-body", "httparse", "iovec", - "itoa", + "itoa 0.4.4", "log", "net2", "rustc_version", - "time", + "time 0.1.45", "tokio", "tokio-buf", "tokio-executor", @@ -2308,7 +2335,7 @@ dependencies = [ "tokio-tcp", "tokio-threadpool", "tokio-timer 0.2.13", - "want 0.2.0", + "want", ] [[package]] @@ -2320,7 +2347,7 @@ dependencies = [ "bytes", "ct-logs", "futures", - "hyper 0.12.35", + "hyper", "rustls", "tokio-io", "tokio-rustls", @@ -2328,6 +2355,12 @@ dependencies = [ "webpki-roots", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.1.5" @@ -2358,18 +2391,13 @@ checksum = "4bac95d9aa0624e7b78187d6fb8ab012b41d9f6f54b1bcb61e61c4845f8357ec" [[package]] name = "igd" -version = "0.7.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8aef7814a769f156ef3a86169a8b04c066e3aebc324f522c159978466e32a1c" +checksum = "b1c44a9cf56a894ff1b90dc83d108a313e05f07c7b0c882f3783ce406525b947" dependencies = [ - "futures", - "hyper 0.11.27", + "lynx", "rand 0.4.6", - "regex 0.2.11", - "tokio-core", - "tokio-retry", - "tokio-timer 0.1.2", - "xml-rs", + "url 1.7.2", "xmltree", ] @@ -2415,18 +2443,31 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] [[package]] name = "indexmap" -version = "1.3.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712d7b3ea5827fcb9d4fda14bf4da5f136f0db2ae9c8f4bd4e2d1c6fde4e6db2" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 0.1.7", + "autocfg 1.0.0", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", + "serde", ] [[package]] @@ -2444,15 +2485,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0175f63815ce00183bf755155ad0cb48c65226c5d17a724e369c25418d2b7699" -[[package]] -name = "instant" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "integer-sqrt" version = "0.1.5" @@ -2516,6 +2548,12 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + [[package]] name = "journaldb" version = "0.2.0" @@ -2533,10 +2571,19 @@ dependencies = [ "memory-db 0.11.0", "parity-bytes", "parity-util-mem", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "rlp", ] +[[package]] +name = "js-sys" +version = "0.3.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +dependencies = [ + "wasm-bindgen", +] + [[package]] name = "jsonrpc-core" version = "15.0.0" @@ -2557,8 +2604,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2cc6ea7f785232d9ca8786a44e9fa698f92149dcdc1acc4aa1fc69c4993d79e" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] @@ -2568,7 +2615,7 @@ version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9996b26c0c7a59626d0ed6c5ec8bf06218e62ce1474bd2849f9b9fd38a0158c0" dependencies = [ - "hyper 0.12.35", + "hyper", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2733,15 +2780,9 @@ dependencies = [ "num_cpus", "parity-rocksdb", "parking_lot 0.9.0", - "regex 1.3.9", + "regex", ] -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" - [[package]] name = "lazy_static" version = "1.4.0" @@ -2758,14 +2799,14 @@ checksum = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" name = "len-caching-lock" version = "0.1.1" dependencies = [ - "parking_lot 0.11.1", + "parking_lot 0.12.1", ] [[package]] name = "libc" -version = "0.2.126" +version = "0.2.173" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "d8cfeafaffdbc32176b64fb251369d52ea9f0a8fbc6f8759edffef7b525d64bb" [[package]] name = "libloading" @@ -2774,7 +2815,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ "cc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2848,9 +2889,9 @@ checksum = "56a7d287fd2ac3f75b11f19a1c8a874a7d55744bd91f7a1b3e7cf87d4343c36d" dependencies = [ "beef", "fnv", - "proc-macro2 1.0.58", - "quote 1.0.27", - "regex-syntax 0.6.18", + "proc-macro2 1.0.94", + "quote 1.0.40", + "regex-syntax", "syn 1.0.94", "utf8-ranges", ] @@ -2865,14 +2906,34 @@ dependencies = [ ] [[package]] -name = "lru-cache" -version = "0.1.2" +name = "lru" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" +dependencies = [ + "hashbrown 0.15.2", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" dependencies = [ "linked-hash-map", ] +[[package]] +name = "lynx" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5296e8244cb83aa1b71bd5b070b56e7a5a7d693a809c3051badc5332319a8419" +dependencies = [ + "http", + "log", + "url 1.7.2", +] + [[package]] name = "macros" version = "0.1.0" @@ -2911,7 +2972,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff" dependencies = [ "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -2972,20 +3033,13 @@ dependencies = [ "tempdir", ] -[[package]] -name = "mime" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd1d63acd1b78403cc0c325605908475dd9b9a3acbf65ed8bcab97e27014afcf" - [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ - "adler", - "autocfg 1.0.0", + "adler2", ] [[package]] @@ -3028,7 +3082,7 @@ dependencies = [ "log", "mio", "miow 0.3.7", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3060,7 +3114,7 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3077,7 +3131,7 @@ checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" dependencies = [ "cfg-if 0.1.10", "libc", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3106,7 +3160,7 @@ dependencies = [ "kvdb-memorydb", "log", "lru-cache", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "tempdir", ] @@ -3116,30 +3170,6 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" -[[package]] -name = "num" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4703ad64153382334aa8db57c637364c322d3372e097840c72000dabdcf6156e" -dependencies = [ - "num-bigint 0.1.44", - "num-integer", - "num-iter", - "num-traits 0.2.15", -] - -[[package]] -name = "num-bigint" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e63899ad0da84ce718c14936262a41cee2c79c981fc0a0e7c7beb47d5a07e8c1" -dependencies = [ - "num-integer", - "num-traits 0.2.15", - "rand 0.4.6", - "rustc-serialize", -] - [[package]] name = "num-bigint" version = "0.2.3" @@ -3152,23 +3182,22 @@ dependencies = [ ] [[package]] -name = "num-integer" -version = "0.1.41" +name = "num-bigint" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ - "autocfg 0.1.7", + "autocfg 1.0.0", + "num-integer", "num-traits 0.2.15", ] [[package]] -name = "num-iter" -version = "0.1.39" +name = "num-integer" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bd5272412d173d6bf9afdf98db8612bbabc9a7a830b7bfc9c188911716132e" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg 0.1.7", - "num-integer", "num-traits 0.2.15", ] @@ -3211,9 +3240,12 @@ dependencies = [ [[package]] name = "object" -version = "0.23.0" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] [[package]] name = "oe-rpc-common" @@ -3246,9 +3278,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.4.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "opaque-debug" @@ -3273,12 +3305,14 @@ dependencies = [ [[package]] name = "pairing" -version = "0.14.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceda21136251c6d5a422d3d798d8ac22515a6e8d3521bb60c59a8349d36d0d57" +checksum = "b8290dea210a712682cd65031dc2b34fd132cf2729def3df7ee08f0737ff5ed6" dependencies = [ "byteorder", - "rand 0.4.6", + "ff", + "group", + "rand_core 0.5.1", ] [[package]] @@ -3322,14 +3356,13 @@ dependencies = [ [[package]] name = "parity-daemonize" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69b1910b2793ff52713fca0a4ee92544ebec59ccd218ea74560be6f947b4ca77" +source = "git+https://github.com/DMDcoin/parity-daemonize.git?rev=1d0802cd6880b0b914c6e82ba274bb3fc63238fb#1d0802cd6880b0b914c6e82ba274bb3fc63238fb" dependencies = [ "ansi_term 0.11.0", - "failure", "libc", "log", "mio", + "thiserror 1.0.69", ] [[package]] @@ -3362,8 +3395,7 @@ dependencies = [ [[package]] name = "parity-rocksdb" version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d17caf6640e24b70242f3f48615e3f0764f98871e8c7aea25584e29833eb5a8" +source = "git+https://github.com/DMDcoin/rust-rocksdb.git#412b49ac7d87c20360fbbdfdda79d22f542f280d" dependencies = [ "libc", "local-encoding", @@ -3372,9 +3404,8 @@ dependencies = [ [[package]] name = "parity-rocksdb-sys" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9581e6b8c63f3808500638372ee56faaaffb57c4d349974bff591606b94d5f57" +version = "0.5.7" +source = "git+https://github.com/DMDcoin/rust-rocksdb.git#412b49ac7d87c20360fbbdfdda79d22f542f280d" dependencies = [ "cmake", "libc", @@ -3422,7 +3453,7 @@ dependencies = [ "parity-crypto", "parity-runtime", "parity-version", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "pretty_assertions", "rand 0.7.3", "rand_xorshift 0.2.0", @@ -3452,7 +3483,7 @@ dependencies = [ "log", "matches", "parity-rpc", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "serde", "serde_json", "url 2.1.0", @@ -3514,7 +3545,7 @@ dependencies = [ "tokio", "tokio-named-pipes", "tokio-uds", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3527,12 +3558,12 @@ dependencies = [ "ethereum-types 0.9.2", "hashbrown 0.8.2", "impl-trait-for-tuples", - "lru", + "lru 0.5.3", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types 0.7.2", "smallvec 1.6.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -3541,14 +3572,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.94", "syn 1.0.94", "synstructure", ] [[package]] name = "parity-version" -version = "3.3.5-hbbft-0.8.9" +version = "4.0.0" dependencies = [ "parity-bytes", "rlp", @@ -3626,17 +3657,6 @@ dependencies = [ "parking_lot_core 0.7.2", ] -[[package]] -name = "parking_lot" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = [ - "instant", - "lock_api 0.4.6", - "parking_lot_core 0.8.3", -] - [[package]] name = "parking_lot" version = "0.12.1" @@ -3656,8 +3676,8 @@ dependencies = [ "libc", "rand 0.5.6", "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.8", + "smallvec 0.6.14", + "winapi 0.3.9", ] [[package]] @@ -3671,8 +3691,8 @@ dependencies = [ "libc", "redox_syscall 0.1.56", "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.8", + "smallvec 0.6.14", + "winapi 0.3.9", ] [[package]] @@ -3686,36 +3706,22 @@ dependencies = [ "libc", "redox_syscall 0.1.56", "smallvec 1.6.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "backtrace", "cfg-if 1.0.0", - "instant", "libc", "petgraph", "redox_syscall 0.2.16", "smallvec 1.6.1", "thread-id", - "winapi 0.3.8", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall 0.2.16", - "smallvec 1.6.1", "windows-sys", ] @@ -3770,12 +3776,12 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "petgraph" -version = "0.5.1" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.2.2", ] [[package]] @@ -3811,7 +3817,7 @@ dependencies = [ "futures", "log", "parity-runtime", - "parking_lot 0.11.1", + "parking_lot 0.12.1", "serde_json", ] @@ -3859,7 +3865,7 @@ dependencies = [ "hamming", "primal-bit", "primal-estimate", - "smallvec 0.6.13", + "smallvec 0.6.14", ] [[package]] @@ -3914,18 +3920,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.58" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.3" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" dependencies = [ "cfg-if 1.0.0", "fnv", @@ -3933,14 +3939,14 @@ dependencies = [ "memchr", "parking_lot 0.12.1", "protobuf", - "thiserror", + "thiserror 2.0.16", ] [[package]] name = "proptest" -version = "0.9.6" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01c477819b845fe023d33583ebf10c9f62518c8d79a0960ba5c36d6ac8a55a5b" +checksum = "12e6c80c1139113c28ee4670dc50cc42915228b51f56a9e407f0ec60f966646f" dependencies = [ "bit-set 0.5.2", "bitflags 1.2.1", @@ -3948,19 +3954,33 @@ dependencies = [ "lazy_static", "num-traits 0.2.15", "quick-error", - "rand 0.6.5", - "rand_chacha 0.1.1", - "rand_xorshift 0.1.1", - "regex-syntax 0.6.18", + "rand 0.7.3", + "rand_chacha 0.2.1", + "rand_xorshift 0.2.0", + "regex-syntax", "rusty-fork", "tempfile", ] [[package]] name = "protobuf" -version = "2.16.2" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" +dependencies = [ + "once_cell", + "protobuf-support", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-support" +version = "3.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d883f78645c21b7281d21305181aa1f4dd9e9363e7cf2566c93121552cff003e" +checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" +dependencies = [ + "thiserror 1.0.69", +] [[package]] name = "pulldown-cmark" @@ -4005,11 +4025,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.27" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ - "proc-macro2 1.0.58", + "proc-macro2 1.0.94", ] [[package]] @@ -4018,16 +4038,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" -[[package]] -name = "rand" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" -dependencies = [ - "libc", - "rand 0.4.6", -] - [[package]] name = "rand" version = "0.4.6" @@ -4038,7 +4048,7 @@ dependencies = [ "libc", "rand_core 0.3.1", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4051,7 +4061,7 @@ dependencies = [ "fuchsia-cprng", "libc", "rand_core 0.3.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4070,7 +4080,7 @@ dependencies = [ "rand_os 0.1.3", "rand_pcg", "rand_xorshift 0.1.1", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4086,25 +4096,6 @@ dependencies = [ "rand_hc 0.2.0", ] -[[package]] -name = "rand04" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58595cc8bb12add45412667f9b422d5a9842d61d36e8607bc7c84ff738bf9263" -dependencies = [ - "rand 0.4.6", -] - -[[package]] -name = "rand04_compat" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cc0eb4bbb0cbc6c2a8081aa11303b9520369eea474cf865f7b7e3f11b284b" -dependencies = [ - "rand 0.6.5", - "rand04", -] - [[package]] name = "rand_chacha" version = "0.1.1" @@ -4194,7 +4185,7 @@ checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ "libc", "rand_core 0.4.2", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4208,7 +4199,7 @@ dependencies = [ "libc", "rand_core 0.4.2", "rdrand", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4274,7 +4265,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123" dependencies = [ - "crossbeam-deque 0.7.1", + "crossbeam-deque", "either", "rayon-core", ] @@ -4285,7 +4276,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b" dependencies = [ - "crossbeam-deque 0.7.1", + "crossbeam-deque", "crossbeam-queue 0.1.2", "crossbeam-utils 0.6.6", "lazy_static", @@ -4316,18 +4307,6 @@ dependencies = [ "bitflags 1.2.1", ] -[[package]] -name = "reed-solomon-erasure" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77cbbd4c02f53e345fe49e74255a1b10080731ffb2a03475e11df7fc8a043c37" -dependencies = [ - "cc", - "libc", - "rayon", - "smallvec 0.6.13", -] - [[package]] name = "reed-solomon-erasure" version = "4.0.2" @@ -4337,29 +4316,16 @@ dependencies = [ "smallvec 1.6.1", ] -[[package]] -name = "regex" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" -dependencies = [ - "aho-corasick 0.6.10", - "memchr", - "regex-syntax 0.5.6", - "thread_local 0.3.6", - "utf8-ranges", -] - [[package]] name = "regex" version = "1.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" dependencies = [ - "aho-corasick 0.7.6", + "aho-corasick", "memchr", - "regex-syntax 0.6.18", - "thread_local 1.0.1", + "regex-syntax", + "thread_local", ] [[package]] @@ -4371,37 +4337,19 @@ dependencies = [ "byteorder", ] -[[package]] -name = "regex-syntax" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" -dependencies = [ - "ucd-util", -] - [[package]] name = "regex-syntax" version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" -[[package]] -name = "relay" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1576e382688d7e9deecea24417e350d3062d97e32e45d70b1cde65994ff1489a" -dependencies = [ - "futures", -] - [[package]] name = "remove_dir_all" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4420,7 +4368,7 @@ dependencies = [ "libc", "spin", "untrusted", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -4449,28 +4397,29 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] [[package]] -name = "rlp_compress" -version = "0.1.0" +name = "rlp-derive" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652db34deaaa57929e10ca18e5454a32cb0efc351ae80d320334bbf907b908b3" dependencies = [ - "elastic-array", - "lazy_static", - "rlp", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", ] [[package]] -name = "rlp_derive" +name = "rlp_compress" version = "0.1.0" dependencies = [ - "proc-macro2 0.4.30", - "quote 0.6.13", + "elastic-array", + "lazy_static", "rlp", - "syn 0.15.26", ] [[package]] @@ -4513,24 +4462,11 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1601f32bc5858aae3cbfa1c645c96c4d820cc5c16be0194f089560c00b6eb625" -[[package]] -name = "rust-crypto" -version = "0.2.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a" -dependencies = [ - "gcc", - "libc", - "rand 0.3.23", - "rustc-serialize", - "time", -] - [[package]] name = "rustc-demangle" -version = "0.1.16" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hex" @@ -4544,12 +4480,6 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" -[[package]] -name = "rustc-serialize" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" - [[package]] name = "rustc_version" version = "0.2.3" @@ -4575,9 +4505,9 @@ dependencies = [ [[package]] name = "rusty-fork" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dd93264e10c577503e926bd1430193eeb5d21b059148910082245309b424fae" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", "quick-error", @@ -4606,12 +4536,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "scoped-tls" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" - [[package]] name = "scopeguard" version = "0.3.3" @@ -4684,31 +4608,31 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.144" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" +checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.144" +version = "1.0.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" +checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", - "syn 1.0.94", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", ] [[package]] name = "serde_json" -version = "1.0.41" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f72eb2a68a7dc3f9a691bfda9305a1c017a6215e5a4545c258500d2099a37c2" +checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3" dependencies = [ - "itoa", + "itoa 1.0.10", "ryu", "serde", ] @@ -4719,11 +4643,40 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dc6b7951b17b051f3210b063f12cc17320e2fe30ae05b0fe2a3abb068551c76" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", ] +[[package]] +name = "serde_with" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" +dependencies = [ + "base64 0.21.7", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.2.2", + "serde", + "serde_json", + "serde_with_macros", + "time 0.3.26", +] + +[[package]] +name = "serde_with_macros" +version = "3.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "568577ff0ef47b879f736cd66740e022f3672788cdf002a05a4e609ea5a6fb15" +dependencies = [ + "darling", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", +] + [[package]] name = "sha-1" version = "0.8.1" @@ -4805,9 +4758,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" dependencies = [ "maybe-uninit", ] @@ -4848,6 +4801,7 @@ version = "0.1.0" dependencies = [ "log", "prometheus", + "vergen", ] [[package]] @@ -4880,6 +4834,12 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "032c03039aae92b350aad2e3779c352e104d919cb192ba2fabbd7b831ce4f0f6" +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "subtle" version = "1.0.0" @@ -4920,11 +4880,22 @@ version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a07e33e919ebcd69113d5be0e4d70c5707004ff45188910106854f38b960df4a" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "unicode-xid 0.2.0", ] +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2 1.0.94", + "quote 1.0.40", + "unicode-ident", +] + [[package]] name = "synom" version = "0.11.3" @@ -4940,8 +4911,8 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "575be94ccb86e8da37efb894a87e2b660be299b41d8ef347f9d6d79fbe61b1ba" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", + "proc-macro2 1.0.94", + "quote 1.0.40", "syn 1.0.94", "unicode-xid 0.2.0", ] @@ -4973,7 +4944,7 @@ dependencies = [ "rand 0.7.3", "redox_syscall 0.1.56", "remove_dir_all", - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5016,42 +4987,52 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.20" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +dependencies = [ + "thiserror-impl 2.0.16", ] [[package]] name = "thiserror-impl" -version = "1.0.20" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ - "proc-macro2 1.0.58", - "quote 1.0.27", - "syn 1.0.94", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", ] [[package]] -name = "thread-id" -version = "3.3.0" +name = "thiserror-impl" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ - "libc", - "redox_syscall 0.1.56", - "winapi 0.3.8", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 2.0.100", ] [[package]] -name = "thread_local" -version = "0.3.6" +name = "thread-id" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" dependencies = [ - "lazy_static", + "libc", + "winapi 0.3.9", ] [[package]] @@ -5074,31 +5055,60 @@ dependencies = [ [[package]] name = "threshold_crypto" -version = "0.3.2" -source = "git+https://github.com/poanetwork/threshold_crypto?rev=624eeee#624eeee7e4ac5e565abbe93a31580a8f806ee5c4" +version = "0.4.0" +source = "git+https://github.com/DMDcoin/threshold_crypto.git?rev=5b582c420cf93b75078654ac3df6ec297bfe0371#5b582c420cf93b75078654ac3df6ec297bfe0371" dependencies = [ "byteorder", - "failure", + "ff", + "group", "hex_fmt", "log", "pairing", - "rand 0.6.5", - "rand04_compat", - "rand_chacha 0.1.1", + "rand 0.7.3", + "rand_chacha 0.2.1", "serde", - "tiny-keccak 1.5.0", + "thiserror 1.0.69", + "tiny-keccak 2.0.2", "zeroize", ] [[package]] name = "time" -version = "0.1.42" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", - "redox_syscall 0.1.56", - "winapi 0.3.8", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi 0.3.9", +] + +[[package]] +name = "time" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a79d09ac6b08c1ab3906a2f7cc2e81a0e27c7ae89c63812df75e52bef0751e07" +dependencies = [ + "deranged", + "itoa 1.0.10", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" + +[[package]] +name = "time-macros" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c65469ed6b3a4809d987a41eb1dc918e9bc1d92211cbad7ae82931846f7451" +dependencies = [ + "time-core", ] [[package]] @@ -5188,25 +5198,6 @@ dependencies = [ "tokio-io", ] -[[package]] -name = "tokio-core" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" -dependencies = [ - "bytes", - "futures", - "iovec", - "log", - "mio", - "scoped-tls", - "tokio", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-timer 0.2.13", -] - [[package]] name = "tokio-current-thread" version = "0.1.6" @@ -5281,18 +5272,6 @@ dependencies = [ "tokio-sync", ] -[[package]] -name = "tokio-retry" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05746ae87dca83a2016b4f5dba5b237b897dd12fd324f60afe282112f16969a" -dependencies = [ - "futures", - "rand 0.3.23", - "tokio-core", - "tokio-service", -] - [[package]] name = "tokio-rustls" version = "0.9.4" @@ -5344,7 +5323,7 @@ version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ - "crossbeam-deque 0.7.1", + "crossbeam-deque", "crossbeam-queue 0.2.3", "crossbeam-utils 0.7.2", "futures", @@ -5484,12 +5463,6 @@ dependencies = [ "triehash", ] -[[package]] -name = "try-lock" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2aa4715743892880f70885373966c83d73ef1b0838a664ef0c76fffd35e7c2" - [[package]] name = "try-lock" version = "0.2.2" @@ -5502,7 +5475,7 @@ version = "1.0.0-alpha" dependencies = [ "ethereum-types 0.7.0", "log", - "smallvec 0.6.13", + "smallvec 0.6.14", "trace-time", ] @@ -5512,12 +5485,6 @@ version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" -[[package]] -name = "ucd-util" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85f514e095d348c279b1e5cd76795082cf15bd59b93207832abe0b1d8fed236" - [[package]] name = "uint" version = "0.8.5" @@ -5564,7 +5531,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "141339a08b982d942be2ca06ff8b076563cbe223d1befd5450716790d44e2426" dependencies = [ - "smallvec 0.6.13", + "smallvec 0.6.14", ] [[package]] @@ -5643,7 +5610,7 @@ checksum = "236a5eda3df2c877872e98dbc55d497d943792e6405d8fc65bd4f8a5e3b53c99" dependencies = [ "idna 0.1.5", "lazy_static", - "regex 1.3.9", + "regex", "serde", "serde_derive", "serde_json", @@ -5660,7 +5627,7 @@ dependencies = [ "lazy_static", "proc-macro2 0.4.30", "quote 0.6.13", - "regex 1.3.9", + "regex", "syn 0.15.26", "validator", ] @@ -5678,7 +5645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3365f36c57e5df714a34be40902b27a992eeddb9996eca52d0584611cf885d" dependencies = [ "bitflags 0.7.0", - "time", + "time 0.1.45", ] [[package]] @@ -5715,21 +5682,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" dependencies = [ "same-file", - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] -[[package]] -name = "want" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a05d9d966753fa4b5c8db73fcab5eed4549cfe0e1e4e66911e5564a0085c35d1" -dependencies = [ - "futures", - "log", - "try-lock 0.1.0", -] - [[package]] name = "want" version = "0.2.0" @@ -5738,7 +5694,7 @@ checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ "futures", "log", - "try-lock 0.2.2", + "try-lock", ] [[package]] @@ -5749,9 +5705,9 @@ checksum = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm" @@ -5768,6 +5724,60 @@ dependencies = [ "wasmi", ] +[[package]] +name = "wasm-bindgen" +version = "0.2.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.94", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +dependencies = [ + "quote 1.0.40", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +dependencies = [ + "proc-macro2 1.0.94", + "quote 1.0.40", + "syn 1.0.94", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" + [[package]] name = "wasmi" version = "0.3.0" @@ -5808,9 +5818,9 @@ checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" [[package]] name = "winapi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -5834,7 +5844,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", ] [[package]] @@ -5849,7 +5859,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96f5016b18804d24db43cebf3c77269e7569b8954a8464501c216cc5e070eaa9" dependencies = [ - "winapi 0.3.8", + "winapi 0.3.9", "winapi-util", ] @@ -5859,7 +5869,7 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", ] [[package]] @@ -5868,13 +5878,29 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -5883,42 +5909,90 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + [[package]] name = "ws2_32-sys" version = "0.2.1" @@ -5946,9 +6020,9 @@ dependencies = [ [[package]] name = "xmltree" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9cfb54ca6b8f17d2377219ce485b134d53561b77e1393c7ea416f543a527431" +checksum = "ff8eaee9d17062850f1e6163b509947969242990ee59a35801af437abe041e70" dependencies = [ "xml-rs", ] diff --git a/Cargo.toml b/Cargo.toml index 160630d61f..f1b4fc09bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,13 +2,15 @@ description = "Diamond Node" name = "diamond-node" # NOTE Make sure to update util/version/Cargo.toml as well -version = "3.3.5-hbbft-0.8.9" +version = "4.0.0" license = "GPL-3.0" authors = [ "bit.diamonds developers", "OpenEthereum developers", "Parity Technologies " ] +edition = "2024" +rust-version = "1.85" [dependencies] blooms-db = { path = "crates/db/blooms-db" } @@ -23,7 +25,7 @@ number_prefix = "0.2" rpassword = "1.0" semver = "0.9" ansi_term = "0.10" -parking_lot = "0.11.1" +parking_lot = "0.12" crossbeam-channel = "0.5.2" regex = "1.0" atty = "0.2.8" @@ -57,7 +59,7 @@ node-filter = { path = "crates/net/node-filter" } parity-crypto = { version = "0.6.2", features = [ "publickey" ] } rlp = { version = "0.4.6" } cli-signer= { path = "crates/util/cli-signer" } -parity-daemonize = "0.3" +parity-daemonize = { git = "https://github.com/DMDcoin/parity-daemonize.git", rev = "1d0802cd6880b0b914c6e82ba274bb3fc63238fb" } parity-local-store = { path = "crates/concensus/miner/local-store" } parity-runtime = { path = "crates/runtime/runtime" } parity-rpc = { path = "crates/rpc" } @@ -73,12 +75,16 @@ kvdb = "0.1" kvdb-rocksdb = "0.1.3" journaldb = { path = "crates/db/journaldb" } stats = { path = "crates/util/stats" } -prometheus = "0.13.0" +prometheus = "0.14" fs-swap = "0.2.6" net2 = "0.2.38" # ethcore-secretstore = { path = "crates/util/secret-store", optional = true } +[patch.crates-io] +# patch parity-rocksdb-sys package +parity-rocksdb = { git = 'https://github.com/DMDcoin/rust-rocksdb.git' } + [build-dependencies] rustc_version = "0.2" @@ -93,7 +99,7 @@ lazy_static = "1.2.0" winapi = { version = "0.3.4", features = ["winsock2", "winuser", "shellapi"] } [features] -default = ["accounts"] +default = ["shutdown-on-deadlock", "accounts"] accounts = ["ethcore-accounts", "parity-rpc/accounts"] miner-debug = ["ethcore/miner-debug"] json-tests = ["ethcore/json-tests"] @@ -111,6 +117,8 @@ deadlock_detection = ["parking_lot/deadlock_detection"] # `valgrind --tool=massif /path/to/parity ` # and `massif-visualizer` for visualization memory_profiling = [] +secretstore = [] +shutdown-on-deadlock = ["deadlock_detection"] [lib] path = "bin/oe/lib.rs" diff --git a/README.md b/README.md index 588b25bb33..29b32681c6 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,12 @@ -# OpenEthereum +# Diamond Node -Fast and feature-rich multi-network Ethereum client. - -[» Download the latest release «](https://github.com/openethereum/openethereum/releases/latest) +Node client for the protocol version 4 of the bit.diamonds network. [![GPL licensed][license-badge]][license-url] -[![Build Status][ci-badge]][ci-url] -[![Discord chat][chat-badge]][chat-url] [license-badge]: https://img.shields.io/badge/license-GPL_v3-green.svg [license-url]: LICENSE -[ci-badge]: https://github.com/openethereum/openethereum/workflows/Build%20and%20Test%20Suite/badge.svg -[ci-url]: https://github.com/openethereum/openethereum/actions -[chat-badge]: https://img.shields.io/discord/669192218728202270.svg?logo=discord -[chat-url]: https://discord.io/openethereum +[chat-url]: (https://discord.com/invite/MwqZ2CYcB4) ## Table of Contents @@ -32,29 +25,25 @@ Fast and feature-rich multi-network Ethereum client. ## 1. Description -**Built for mission-critical use**: Miners, service providers, and exchanges need fast synchronisation and maximum uptime. OpenEthereum provides the core infrastructure essential for speedy and reliable services. - -- Clean, modular codebase for easy customisation -- Advanced CLI-based client -- Minimal memory and storage footprint -- Synchronise in hours, not days with Warp Sync -- Modular for light integration into your service or product +diamond-node is the node software for the upcomming V4 of the diamond network. +The Node Software is on a alpha level and still under active development. ## 2. Technical Overview -OpenEthereum's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing OpenEthereum using the **Rust programming language**. OpenEthereum is licensed under the GPLv3 and can be used for all your Ethereum needs. +diamond-node builds on OpenEthereum, and shares a lot of base features, +covered in the [OpenEthereum Documentation](https://openethereum.github.io/). -By default, OpenEthereum runs a JSON-RPC HTTP server on port `:8545` and a Web-Sockets server on port `:8546`. This is fully configurable and supports a number of APIs. +By default, diamond-node runs a JSON-RPC HTTP server on port `:8545` and a Web-Sockets server on port `:8546`. This is fully configurable and supports a number of APIs. -If you run into problems while using OpenEthereum, check out the [old wiki for documentation](https://openethereum.github.io/), feel free to [file an issue in this repository](https://github.com/openethereum/openethereum/issues/new), or hop on our [Discord](https://discord.io/openethereum) chat room to ask a question. We are glad to help! +If you run into problems while using diamond-node, feel free to [file an issue in this repository](https://github.com/dmdcoind/diamond-node/issues/new), or hop on our [Slack](https://dmdcoin.slack.com/), [Telegram](https://t.me/DMDcoin) or [Discord](https://discord.gg/TStv6gm) chat room to ask a question. We are glad to help! -You can download OpenEthereum's latest release at [the releases page](https://github.com/openethereum/openethereum/releases) or follow the instructions below to build from source. Read the [CHANGELOG.md](CHANGELOG.md) for a list of all changes between different versions. +We do not provide binaries and suggest to build from source. ## 3. Building ### 3.1 Build Dependencies -OpenEthereum requires **latest stable Rust version** to build. +diamond-node requires **latest stable Rust version** to build. We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have `rustup`, you can install it like this: @@ -63,7 +52,7 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/). If you do $ curl https://sh.rustup.rs -sSf | sh ``` - OpenEthereum also requires `clang` (>= 9.0), `clang++`, `pkg-config`, `file`, `make`, and `cmake` packages to be installed. + diamond-node also requires `clang` (>= 9.0), `clang++`, `pkg-config`, `file`, `make`, and `cmake` packages to be installed. - OSX: ```bash @@ -83,14 +72,14 @@ Once you have `rustup` installed, then you need to install: * [Perl](https://www.perl.org) * [Yasm](https://yasm.tortall.net) -Make sure that these binaries are in your `PATH`. After that, you should be able to build OpenEthereum from source. +Make sure that these binaries are in your `PATH`. After that, you should be able to build diamond-node from source. ### 3.2 Build from Source Code ```bash # download OpenEthereum code -$ git clone https://github.com/openethereum/openethereum -$ cd openethereum +$ git clone https://github.com/DMDcoin/diamond-node +$ cd diamond-node # build in release mode $ cargo build --release --features final @@ -116,26 +105,26 @@ This always compiles the latest nightly builds. If you want to build stable, do $ git checkout stable ``` -### 3.3 Starting OpenEthereum +### 3.3 Starting diamond-node #### Manually -To start OpenEthereum manually, just run +To start diamond-node manually, just run ```bash $ ./target/release/openethereum ``` -so OpenEthereum begins syncing the Ethereum blockchain. +so diamond-node begins syncing the Ethereum blockchain. #### Using `systemd` service file -To start OpenEthereum as a regular user using `systemd` init: +To start diamond-node as a regular user using `systemd` init: 1. Copy `./scripts/openethereum.service` to your `systemd` user directory (usually `~/.config/systemd/user`). 2. Copy release to bin folder, write `sudo install ./target/release/openethereum /usr/bin/openethereum` -3. To configure OpenEthereum, see [our wiki](https://openethereum.github.io/Configuring-OpenEthereum) for details. +3. To configure diamond-node, see [our wiki](https://openethereum.github.io/Configuring-OpenEthereum) for details. ## 4. Testing diff --git a/bin/chainspec/Cargo.toml b/bin/chainspec/Cargo.toml index f990a81ddd..ea58e7bd14 100644 --- a/bin/chainspec/Cargo.toml +++ b/bin/chainspec/Cargo.toml @@ -2,6 +2,7 @@ description = "Parity Ethereum Chain Specification" name = "chainspec" version = "0.1.0" +edition = "2024" authors = ["Marek Kotewicz "] [dependencies] diff --git a/bin/chainspec/src/main.rs b/bin/chainspec/src/main.rs index 4928a99b5a..0c5a885ca6 100644 --- a/bin/chainspec/src/main.rs +++ b/bin/chainspec/src/main.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -extern crate ethjson; +use ethjson; extern crate serde_json; use ethjson::spec::Spec; diff --git a/bin/ethkey/Cargo.toml b/bin/ethkey/Cargo.toml index d5af93f26b..709dd1397d 100644 --- a/bin/ethkey/Cargo.toml +++ b/bin/ethkey/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum Keys Generator CLI" name = "ethkey-cli" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] docopt = "1.0" diff --git a/bin/ethkey/src/main.rs b/bin/ethkey/src/main.rs index 230351ef4d..d8b7fab143 100644 --- a/bin/ethkey/src/main.rs +++ b/bin/ethkey/src/main.rs @@ -30,10 +30,10 @@ extern crate serde_derive; use std::{env, fmt, io, num::ParseIntError, process, sync}; use crypto::publickey::{ - sign, verify_address, verify_public, Error as EthkeyError, Generator, KeyPair, Random, + Error as EthkeyError, Generator, KeyPair, Random, sign, verify_address, verify_public, }; use docopt::Docopt; -use ethkey::{brain_recover, Brain, BrainPrefix, Prefix}; +use ethkey::{Brain, BrainPrefix, Prefix, brain_recover}; use rustc_hex::{FromHex, FromHexError}; const USAGE: &'static str = r#" diff --git a/bin/ethstore/Cargo.toml b/bin/ethstore/Cargo.toml index e03abc9090..b13564ecc3 100644 --- a/bin/ethstore/Cargo.toml +++ b/bin/ethstore/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum Key Management CLI" name = "ethstore-cli" version = "0.1.1" authors = ["Parity Technologies "] +edition = "2024" [dependencies] docopt = "1.0" @@ -11,7 +12,7 @@ num_cpus = "1.6" rustc-hex = "1.0" serde = "1.0" serde_derive = "1.0" -parking_lot = "0.11.1" +parking_lot = "0.12" ethstore = { path = "../../crates/accounts/ethstore" } dir = { path = '../../crates/util/dir' } panic_hook = { path = "../../crates/util/panic-hook" } diff --git a/bin/ethstore/src/crack.rs b/bin/ethstore/src/crack.rs index 04f6086a52..3d1fadd71c 100644 --- a/bin/ethstore/src/crack.rs +++ b/bin/ethstore/src/crack.rs @@ -17,7 +17,7 @@ use parking_lot::Mutex; use std::{cmp, collections::VecDeque, sync::Arc, thread}; -use ethstore::{ethkey::Password, Error, PresaleWallet}; +use ethstore::{Error, PresaleWallet, ethkey::Password}; use num_cpus; pub fn run(passwords: VecDeque, wallet_path: &str) -> Result<(), Error> { diff --git a/bin/ethstore/src/main.rs b/bin/ethstore/src/main.rs index 6d275194e8..117be943d5 100644 --- a/bin/ethstore/src/main.rs +++ b/bin/ethstore/src/main.rs @@ -17,7 +17,6 @@ extern crate dir; extern crate docopt; extern crate ethstore; -extern crate num_cpus; extern crate panic_hook; extern crate parking_lot; extern crate rustc_hex; @@ -32,10 +31,10 @@ use std::{collections::VecDeque, env, fmt, fs, io::Read, process}; use docopt::Docopt; use ethstore::{ + EthStore, PresaleWallet, SecretStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef, accounts_dir::{KeyDirectory, RootDiskDirectory}, ethkey::{Address, Password}, - import_accounts, EthStore, PresaleWallet, SecretStore, SecretVaultRef, SimpleSecretStore, - StoreAccountRef, + import_accounts, }; mod crack; @@ -150,7 +149,7 @@ impl fmt::Display for Error { fn main() { panic_hook::set_abort(); if env::var("RUST_LOG").is_err() { - env::set_var("RUST_LOG", "warn") + unsafe { env::set_var("RUST_LOG", "warn") } } env_logger::try_init().expect("Logger initialized only once."); diff --git a/bin/evmbin/Cargo.toml b/bin/evmbin/Cargo.toml index d6b8960519..bd1f6ecf72 100644 --- a/bin/evmbin/Cargo.toml +++ b/bin/evmbin/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity EVM Implementation" name = "evmbin" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [[bin]] name = "openethereum-evm" diff --git a/bin/evmbin/benches/mod.rs b/bin/evmbin/benches/mod.rs index e02f1ea918..c8938c62b5 100644 --- a/bin/evmbin/benches/mod.rs +++ b/bin/evmbin/benches/mod.rs @@ -23,18 +23,18 @@ #[macro_use] extern crate criterion; extern crate ethcore; -extern crate ethereum_types; -extern crate evm; +use ethereum_types; +use evm; extern crate rustc_hex; -extern crate vm; +use vm; -use criterion::{black_box, Criterion}; +use criterion::{Criterion, black_box}; use std::sync::Arc; use ethereum_types::U256; use evm::Factory; use rustc_hex::FromHex; -use vm::{tests::FakeExt, ActionParams, Ext}; +use vm::{ActionParams, Ext, tests::FakeExt}; criterion_group!( evmbin, diff --git a/bin/evmbin/src/display/json.rs b/bin/evmbin/src/display/json.rs index 08658abf0f..4a6bd1037b 100644 --- a/bin/evmbin/src/display/json.rs +++ b/bin/evmbin/src/display/json.rs @@ -19,11 +19,10 @@ use std::{collections::HashMap, mem}; use super::config::Config; +use crate::{display, info as vm}; use bytes::ToPretty; -use display; use ethcore::trace; use ethereum_types::{BigEndianHash, H256, U256}; -use info as vm; /// JSON formatting informant. #[derive(Default)] @@ -272,7 +271,7 @@ impl trace::VMTracer for Informant { #[cfg(test)] mod tests { use super::*; - use info::tests::run_test; + use crate::info::tests::run_test; use serde_json; #[derive(Serialize, Deserialize, Debug, PartialEq)] diff --git a/bin/evmbin/src/display/simple.rs b/bin/evmbin/src/display/simple.rs index 76043d54ea..acd30773d7 100644 --- a/bin/evmbin/src/display/simple.rs +++ b/bin/evmbin/src/display/simple.rs @@ -20,8 +20,7 @@ use super::config::Config; use bytes::ToPretty; use ethcore::trace; -use display; -use info as vm; +use crate::{display, info as vm}; /// Simple formatting informant. #[derive(Default)] diff --git a/bin/evmbin/src/display/std_json.rs b/bin/evmbin/src/display/std_json.rs index 4114f719cc..06a08c03bf 100644 --- a/bin/evmbin/src/display/std_json.rs +++ b/bin/evmbin/src/display/std_json.rs @@ -19,35 +19,25 @@ use std::{collections::HashMap, io}; use super::config::Config; +use crate::{display, info as vm}; use bytes::ToPretty; -use display; use ethcore::{pod_state, trace}; use ethereum_types::{BigEndianHash, H256, U256}; -use info as vm; pub trait Writer: io::Write + Send + Sized { fn clone(&self) -> Self; - fn default() -> Self; } impl Writer for io::Stdout { fn clone(&self) -> Self { io::stdout() } - - fn default() -> Self { - io::stdout() - } } impl Writer for io::Stderr { fn clone(&self) -> Self { io::stderr() } - - fn default() -> Self { - io::stderr() - } } /// JSON formatting informant. @@ -132,7 +122,7 @@ impl Informant { root: H256, end_state: &Option, ) { - if let Some(ref end_state) = end_state { + if let Some(end_state) = end_state { let dump_data = json!({ "root": root, "accounts": end_state, @@ -165,7 +155,7 @@ impl vm::Informant for Informant { } fn finish( result: vm::RunResult<::Output>, - (ref mut trace_sink, ref mut out_sink, _): &mut Self::Sink, + (trace_sink, out_sink, _): &mut Self::Sink, ) { match result { Ok(success) => { @@ -290,7 +280,7 @@ impl trace::VMTracer for Informant { #[cfg(test)] pub mod tests { use super::*; - use info::tests::run_test; + use crate::info::tests::run_test; use std::sync::{Arc, Mutex}; #[derive(Debug, Clone, Default)] @@ -300,9 +290,6 @@ pub mod tests { fn clone(&self) -> Self { Clone::clone(self) } - fn default() -> Self { - Default::default() - } } impl io::Write for TestWriter { diff --git a/bin/evmbin/src/info.rs b/bin/evmbin/src/info.rs index 8dde902614..4aa3a836a3 100644 --- a/bin/evmbin/src/info.rs +++ b/bin/evmbin/src/info.rs @@ -16,14 +16,15 @@ //! VM runner. +use crate::types::transaction; use ethcore::{ + TrieSpec, client::{self, EvmTestClient, EvmTestError, TransactErr, TransactSuccess}, - pod_state, spec, state, state_db, trace, TrieSpec, + pod_state, spec, state, state_db, trace, }; use ethereum_types::{H256, U256}; use ethjson; use std::time::{Duration, Instant}; -use types::transaction; use vm::ActionParams; /// VM execution informant @@ -288,7 +289,7 @@ pub mod tests { #[test] fn should_call_account_from_spec() { - use display::{config::Config, std_json::tests::informant}; + use crate::display::{config::Config, std_json::tests::informant}; let (inf, res) = informant(Config::default()); let mut params = ActionParams::default(); diff --git a/bin/evmbin/src/main.rs b/bin/evmbin/src/main.rs index 343f5118b2..a4e0db7608 100644 --- a/bin/evmbin/src/main.rs +++ b/bin/evmbin/src/main.rs @@ -20,7 +20,7 @@ extern crate common_types as types; extern crate ethcore; -extern crate ethjson; +use ethjson; extern crate rustc_hex; extern crate serde; #[macro_use] @@ -29,11 +29,11 @@ extern crate serde_derive; extern crate serde_json; extern crate docopt; extern crate env_logger; -extern crate ethereum_types; -extern crate evm; +use ethereum_types; +use evm; extern crate panic_hook; extern crate parity_bytes as bytes; -extern crate vm; +use vm; #[cfg(test)] #[macro_use] @@ -44,7 +44,7 @@ extern crate tempdir; use bytes::Bytes; use docopt::Docopt; -use ethcore::{json_tests, spec, TrieSpec}; +use ethcore::{TrieSpec, json_tests, spec}; use ethereum_types::{Address, U256}; use ethjson::spec::ForkSpec; use evm::EnvInfo; @@ -336,7 +336,6 @@ fn run_call(args: Args, informant: T) { #[derive(Debug, Deserialize)] struct Args { - cmd_stats: bool, cmd_state_test: bool, cmd_stats_jsontests_vm: bool, arg_file: Option, diff --git a/bin/oe/account.rs b/bin/oe/account.rs index 0bd87cdbd8..541013026c 100644 --- a/bin/oe/account.rs +++ b/bin/oe/account.rs @@ -57,7 +57,7 @@ mod command { accounts::{AccountProvider, AccountProviderSettings}, helpers::{password_from_file, password_prompt}, }; - use ethstore::{accounts_dir::RootDiskDirectory, import_account, import_accounts, EthStore}; + use ethstore::{EthStore, accounts_dir::RootDiskDirectory, import_account, import_accounts}; use std::path::PathBuf; pub fn execute(cmd: AccountCmd) -> Result { diff --git a/bin/oe/account_utils.rs b/bin/oe/account_utils.rs index 4b9415466b..d5da5afab1 100644 --- a/bin/oe/account_utils.rs +++ b/bin/oe/account_utils.rs @@ -16,7 +16,7 @@ use std::sync::Arc; -use crypto::publickey; +use crate::crypto::publickey; use dir::Directories; use ethereum_types::{Address, H160}; use ethkey::Password; @@ -43,7 +43,9 @@ mod accounts { _cfg: AccountsConfig, _passwords: &[Password], ) -> Result { - warn!("Note: Your instance of OpenEthereum is running without account support. Some CLI options are ignored."); + warn!( + "Note: Your instance of OpenEthereum is running without account support. Some CLI options are ignored." + ); Ok(AccountProvider) } @@ -88,7 +90,7 @@ mod accounts { passwords: &[Password], ) -> Result { use crate::accounts::AccountProviderSettings; - use ethstore::{accounts_dir::RootDiskDirectory, EthStore}; + use ethstore::{EthStore, accounts_dir::RootDiskDirectory}; let path = dirs.keys_path(data_dir); upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path); @@ -105,8 +107,10 @@ mod accounts { | SpecType::Goerli | SpecType::Sokol | SpecType::Dev => vec![], - _ => vec![H160::from_str("00a329c0648769a73afac7f9381e08fb43dbea72") - .expect("the string is valid hex; qed")], + _ => vec![ + H160::from_str("00a329c0648769a73afac7f9381e08fb43dbea72") + .expect("the string is valid hex; qed"), + ], }, }; @@ -230,8 +234,8 @@ mod accounts { match account_provider.insert_account(secret, &Password::from(String::new())) { Err(e) => warn!("Unable to add development account: {}", e), Ok(address) => { - let _ = account_provider - .set_account_name(address.clone(), "Development Account".into()); + let _ = + account_provider.set_account_name(address, "Development Account".into()); let _ = account_provider.set_account_meta( address, ::serde_json::to_string( @@ -254,10 +258,13 @@ mod accounts { // Construct an error `String` with an adaptive hint on how to create an account. fn build_create_account_hint(spec: &SpecType, keys: &str) -> String { - format!("You can create an account via RPC, UI or `openethereum account new --chain {} --keys-path {}`.", spec, keys) + format!( + "You can create an account via RPC, UI or `openethereum account new --chain {} --keys-path {}`.", + spec, keys + ) } } pub use self::accounts::{ - accounts_list, miner_author, miner_local_accounts, prepare_account_provider, AccountProvider, + AccountProvider, accounts_list, miner_author, miner_local_accounts, prepare_account_provider, }; diff --git a/bin/oe/blockchain.rs b/bin/oe/blockchain.rs index d7f585758b..bc2065911c 100644 --- a/bin/oe/blockchain.rs +++ b/bin/oe/blockchain.rs @@ -20,10 +20,10 @@ use crate::{ bytes::ToPretty, cache::CacheConfig, db, - hash::{keccak, KECCAK_NULL_RLP}, + hash::{KECCAK_NULL_RLP, keccak}, helpers::{execute_upgrades, to_client_config}, informant::{FullNodeInformantData, Informant, MillisecondDuration}, - params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch}, + params::{Pruning, SpecType, Switch, fatdb_switch_to_bool, tracing_switch_to_bool}, types::data_format::DataFormat, user_defaults::UserDefaults, }; @@ -34,6 +34,7 @@ use ethcore::{ Balance, BlockChainClient, BlockChainReset, BlockId, DatabaseCompactionProfile, ImportExportBlocks, Mode, Nonce, VMType, }, + exit::ShutdownManager, miner::Miner, verification::queue::VerifierSettings, }; @@ -192,6 +193,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { cmd.pruning_memory, cmd.check_seal, 12, + None, ); client_config.queue.verifier_settings = cmd.verifier_settings; @@ -212,6 +214,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { // TODO [ToDr] don't use test miner here // (actually don't require miner at all) Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ShutdownManager::null()), ) .map_err(|e| format!("Client service error: {:?}", e))?; @@ -253,15 +256,16 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { let report = client.report(); let ms = timer.elapsed().as_milliseconds(); - info!("Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s", - ms / 1000, - report.blocks_imported, - (report.blocks_imported * 1000) as u64 / ms, - report.transactions_applied, - (report.transactions_applied * 1000) as u64 / ms, - report.gas_processed / 1_000_000, - (report.gas_processed / (ms * 1000)).low_u64(), - ); + info!( + "Import completed in {} seconds, {} blocks, {} blk/s, {} transactions, {} tx/s, {} Mgas, {} Mgas/s", + ms / 1000, + report.blocks_imported, + (report.blocks_imported * 1000) as u64 / ms, + report.transactions_applied, + (report.transactions_applied * 1000) as u64 / ms, + report.gas_processed / 1_000_000, + (report.gas_processed / (ms * 1000)).low_u64(), + ); Ok(()) } @@ -277,6 +281,8 @@ fn start_client( cache_config: CacheConfig, require_fat_db: bool, max_round_blocks_to_import: usize, + shutdown_on_missing_block_import: Option, + shutdown: Arc, ) -> Result { // load spec file let spec = spec.spec(&dirs.cache)?; @@ -330,6 +336,7 @@ fn start_client( pruning_memory, true, max_round_blocks_to_import, + shutdown_on_missing_block_import, ); let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config); @@ -347,6 +354,7 @@ fn start_client( // It's fine to use test version here, // since we don't care about miner parameters at all Arc::new(Miner::new_for_tests(&spec, None)), + shutdown, ) .map_err(|e| format!("Client service error: {:?}", e))?; @@ -367,6 +375,8 @@ fn execute_export(cmd: ExportBlockchain) -> Result<(), String> { cmd.cache_config, false, cmd.max_round_blocks_to_import, + None, + Arc::new(ShutdownManager::null()), )?; let client = service.client(); @@ -396,6 +406,8 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> { cmd.cache_config, true, cmd.max_round_blocks_to_import, + None, + Arc::new(ShutdownManager::null()), )?; let client = service.client(); @@ -425,8 +437,8 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> { let balance = client .balance(&account, at.into()) .unwrap_or_else(U256::zero); - if cmd.min_balance.map_or(false, |m| balance < m) - || cmd.max_balance.map_or(false, |m| balance > m) + if cmd.min_balance.is_some_and(|m| balance < m) + || cmd.max_balance.is_some_and(|m| balance > m) { last = Some(account); continue; //filtered out @@ -514,6 +526,8 @@ fn execute_reset(cmd: ResetBlockchain) -> Result<(), String> { cmd.cache_config, false, 0, + None, + Arc::new(ShutdownManager::null()), )?; let client = service.client(); diff --git a/bin/oe/cache.rs b/bin/oe/cache.rs index 26b34e8992..ca2ed68703 100644 --- a/bin/oe/cache.rs +++ b/bin/oe/cache.rs @@ -67,11 +67,11 @@ impl CacheConfig { /// Creates new cache config with gitven details. pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self { CacheConfig { - db: db, - blockchain: blockchain, - queue: queue, + db, + blockchain, + queue, traces: DEFAULT_TRACE_CACHE_SIZE, - state: state, + state, } } diff --git a/bin/oe/cli/mod.rs b/bin/oe/cli/mod.rs index 1b7bdc58ff..c28b91c8c3 100644 --- a/bin/oe/cli/mod.rs +++ b/bin/oe/cli/mod.rs @@ -238,11 +238,11 @@ usage! { "--mode=[MODE]", "Set the operating mode. MODE can be one of: last - Uses the last-used mode, active if none; active - Parity continuously syncs the chain; passive - Parity syncs initially, then sleeps and wakes regularly to resync; dark - Parity syncs only when the JSON-RPC is active; offline - Parity doesn't sync.", - ARG arg_mode_timeout: (u64) = 300u64, or |c: &Config| c.parity.as_ref()?.mode_timeout.clone(), + ARG arg_mode_timeout: (u64) = 300u64, or |c: &Config| c.parity.as_ref()?.mode_timeout, "--mode-timeout=[SECS]", "Specify the number of seconds before inactivity timeout occurs when mode is dark or passive", - ARG arg_mode_alarm: (u64) = 3600u64, or |c: &Config| c.parity.as_ref()?.mode_alarm.clone(), + ARG arg_mode_alarm: (u64) = 3600u64, or |c: &Config| c.parity.as_ref()?.mode_alarm, "--mode-alarm=[SECS]", "Specify the number of seconds before auto sleep reawake timeout occurs when mode is passive", @@ -280,15 +280,15 @@ usage! { "Add SHIFT to all port numbers OpenEthereum is listening on. Includes network port and all servers (HTTP JSON-RPC, WebSockets JSON-RPC, SecretStore).", ["Account Options"] - FLAG flag_fast_unlock: (bool) = false, or |c: &Config| c.account.as_ref()?.fast_unlock.clone(), + FLAG flag_fast_unlock: (bool) = false, or |c: &Config| c.account.as_ref()?.fast_unlock, "--fast-unlock", "Use drastically faster unlocking mode. This setting causes raw secrets to be stored unprotected in memory, so use with care.", - ARG arg_keys_iterations: (u32) = 10240u32, or |c: &Config| c.account.as_ref()?.keys_iterations.clone(), + ARG arg_keys_iterations: (u32) = 10240u32, or |c: &Config| c.account.as_ref()?.keys_iterations, "--keys-iterations=[NUM]", "Specify the number of iterations to use when deriving key from the password (bigger is more secure)", - ARG arg_accounts_refresh: (u64) = 5u64, or |c: &Config| c.account.as_ref()?.refresh_time.clone(), + ARG arg_accounts_refresh: (u64) = 5u64, or |c: &Config| c.account.as_ref()?.refresh_time, "--accounts-refresh=[TIME]", "Specify the cache time of accounts read from disk. If you manage thousands of accounts set this to 0 to disable refresh.", @@ -306,15 +306,15 @@ usage! { "Specify directory where Trusted UIs tokens should be stored.", ["Networking Options"] - FLAG flag_no_warp: (bool) = false, or |c: &Config| c.network.as_ref()?.warp.clone().map(|w| !w), + FLAG flag_no_warp: (bool) = false, or |c: &Config| c.network.as_ref()?.warp.map(|w| !w), "--no-warp", "Disable syncing from the snapshot over the network.", - FLAG flag_no_discovery: (bool) = false, or |c: &Config| c.network.as_ref()?.discovery.map(|d| !d).clone(), + FLAG flag_no_discovery: (bool) = false, or |c: &Config| c.network.as_ref()?.discovery.map(|d| !d), "--no-discovery", "Disable new peer discovery.", - FLAG flag_reserved_only: (bool) = false, or |c: &Config| c.network.as_ref()?.reserved_only.clone(), + FLAG flag_reserved_only: (bool) = false, or |c: &Config| c.network.as_ref()?.reserved_only, "--reserved-only", "Connect only to reserved nodes.", @@ -322,11 +322,11 @@ usage! { "--no-ancient-blocks", "Disable downloading old blocks after snapshot restoration or warp sync. Not recommended.", - ARG arg_warp_barrier: (Option) = None, or |c: &Config| c.network.as_ref()?.warp_barrier.clone(), + ARG arg_warp_barrier: (Option) = None, or |c: &Config| c.network.as_ref()?.warp_barrier, "--warp-barrier=[NUM]", "When warp enabled never attempt regular sync before warping to block NUM.", - ARG arg_port: (u16) = 30303u16, or |c: &Config| c.network.as_ref()?.port.clone(), + ARG arg_port: (u16) = 30303u16, or |c: &Config| c.network.as_ref()?.port, "--port=[PORT]", "Override the port on which the node should listen.", @@ -334,15 +334,15 @@ usage! { "--interface=[IP]", "Network interfaces. Valid values are 'all', 'local' or the ip of the interface you want OpenEthereum to listen to.", - ARG arg_min_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.min_peers.clone(), + ARG arg_min_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.min_peers, "--min-peers=[NUM]", "Try to maintain at least NUM peers.", - ARG arg_max_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.max_peers.clone(), + ARG arg_max_peers: (Option) = None, or |c: &Config| c.network.as_ref()?.max_peers, "--max-peers=[NUM]", "Allow up to NUM peers.", - ARG arg_snapshot_peers: (u16) = 0u16, or |c: &Config| c.network.as_ref()?.snapshot_peers.clone(), + ARG arg_snapshot_peers: (u16) = 0u16, or |c: &Config| c.network.as_ref()?.snapshot_peers, "--snapshot-peers=[NUM]", "Allow additional NUM peers for a snapshot sync.", @@ -354,11 +354,11 @@ usage! { "--allow-ips=[FILTER]", "Filter outbound connections. Must be one of: private - connect to private network IP addresses only; public - connect to public network IP addresses only; all - connect to any IP address.", - ARG arg_max_pending_peers: (u16) = 64u16, or |c: &Config| c.network.as_ref()?.max_pending_peers.clone(), + ARG arg_max_pending_peers: (u16) = 64u16, or |c: &Config| c.network.as_ref()?.max_pending_peers, "--max-pending-peers=[NUM]", "Allow up to NUM pending connections.", - ARG arg_network_id: (Option) = None, or |c: &Config| c.network.as_ref()?.id.clone(), + ARG arg_network_id: (Option) = None, or |c: &Config| c.network.as_ref()?.id, "--network-id=[INDEX]", "Override the network identifier from the chain we are on.", @@ -385,11 +385,11 @@ usage! { }, ["API and Console Options – HTTP JSON-RPC"] - FLAG flag_jsonrpc_allow_missing_blocks: (bool) = false, or |c: &Config| c.rpc.as_ref()?.allow_missing_blocks.clone(), + FLAG flag_jsonrpc_allow_missing_blocks: (bool) = false, or |c: &Config| c.rpc.as_ref()?.allow_missing_blocks, "--jsonrpc-allow-missing-blocks", "RPC calls will return 'null' instead of an error if ancient block sync is still in progress and the block information requested could not be found", - FLAG flag_no_jsonrpc: (bool) = false, or |c: &Config| c.rpc.as_ref()?.disable.clone(), + FLAG flag_no_jsonrpc: (bool) = false, or |c: &Config| c.rpc.as_ref()?.disable, "--no-jsonrpc", "Disable the HTTP JSON-RPC API server.", @@ -397,11 +397,11 @@ usage! { "--jsonrpc-no-keep-alive", "Disable HTTP/1.1 keep alive header. Disabling keep alive will prevent re-using the same TCP connection to fire multiple requests, recommended when using one request per connection.", - FLAG flag_jsonrpc_experimental: (bool) = false, or |c: &Config| c.rpc.as_ref()?.experimental_rpcs.clone(), + FLAG flag_jsonrpc_experimental: (bool) = false, or |c: &Config| c.rpc.as_ref()?.experimental_rpcs, "--jsonrpc-experimental", "Enable experimental RPCs. Enable to have access to methods from unfinalised EIPs in all namespaces", - ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| c.rpc.as_ref()?.port.clone(), + ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| c.rpc.as_ref()?.port, "--jsonrpc-port=[PORT]", "Specify the port portion of the HTTP JSON-RPC API server.", @@ -433,16 +433,16 @@ usage! { "--jsonrpc-max-payload=[MB]", "Specify maximum size for HTTP JSON-RPC requests in megabytes.", - ARG arg_poll_lifetime: (u32) = 60u32, or |c: &Config| c.rpc.as_ref()?.poll_lifetime.clone(), + ARG arg_poll_lifetime: (u32) = 60u32, or |c: &Config| c.rpc.as_ref()?.poll_lifetime, "--poll-lifetime=[S]", "Set the RPC filter lifetime to S seconds. The filter has to be polled at least every S seconds , otherwise it is removed.", ["API and Console Options – WebSockets"] - FLAG flag_no_ws: (bool) = false, or |c: &Config| c.websockets.as_ref()?.disable.clone(), + FLAG flag_no_ws: (bool) = false, or |c: &Config| c.websockets.as_ref()?.disable, "--no-ws", "Disable the WebSockets JSON-RPC server.", - ARG arg_ws_port: (u16) = 8546u16, or |c: &Config| c.websockets.as_ref()?.port.clone(), + ARG arg_ws_port: (u16) = 8546u16, or |c: &Config| c.websockets.as_ref()?.port, "--ws-port=[PORT]", "Specify the port portion of the WebSockets JSON-RPC server.", @@ -471,7 +471,7 @@ usage! { "Specify maximum size for WS JSON-RPC requests in megabytes.", ["Metrics"] - FLAG flag_metrics: (bool) = false, or |c: &Config| c.metrics.as_ref()?.enable.clone(), + FLAG flag_metrics: (bool) = false, or |c: &Config| c.metrics.as_ref()?.enable, "--metrics", "Enable prometheus metrics (only full client).", @@ -479,7 +479,7 @@ usage! { "--metrics-prefix=[prefix]", "Prepend the specified prefix to the exported metrics names.", - ARG arg_metrics_port: (u16) = 3000u16, or |c: &Config| c.metrics.as_ref()?.port.clone(), + ARG arg_metrics_port: (u16) = 3000u16, or |c: &Config| c.metrics.as_ref()?.port, "--metrics-port=[PORT]", "Specify the port portion of the metrics server.", @@ -488,7 +488,7 @@ usage! { "Specify the hostname portion of the metrics server, IP should be an interface's IP address, or all (all interfaces) or local.", ["API and Console Options – IPC"] - FLAG flag_no_ipc: (bool) = false, or |c: &Config| c.ipc.as_ref()?.disable.clone(), + FLAG flag_no_ipc: (bool) = false, or |c: &Config| c.ipc.as_ref()?.disable, "--no-ipc", "Disable JSON-RPC over IPC service.", @@ -501,15 +501,15 @@ usage! { "Specify custom API set available via JSON-RPC over IPC using a comma-delimited list of API names. Possible names are: all, safe, web3, net, eth, pubsub, personal, signer, parity, parity_pubsub, parity_accounts, parity_set, traces, rpc, secretstore. You can also disable a specific API by putting '-' in the front, example: all,-personal. 'safe' enables the following APIs: web3, net, eth, pubsub, parity, parity_pubsub, traces, rpc", ["Secret Store Options"] - FLAG flag_no_secretstore: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable.clone(), + FLAG flag_no_secretstore: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable, "--no-secretstore", "Disable Secret Store functionality.", - FLAG flag_no_secretstore_http: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_http.clone(), + FLAG flag_no_secretstore_http: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_http, "--no-secretstore-http", "Disable Secret Store HTTP API.", - FLAG flag_no_secretstore_auto_migrate: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_auto_migrate.clone(), + FLAG flag_no_secretstore_auto_migrate: (bool) = false, or |c: &Config| c.secretstore.as_ref()?.disable_auto_migrate, "--no-secretstore-auto-migrate", "Do not run servers set change session automatically when servers set changes. This option has no effect when servers set is read from configuration file.", @@ -549,7 +549,7 @@ usage! { "--secretstore-interface=[IP]", "Specify the hostname portion for listening to Secret Store Key Server internal requests, IP should be an interface's IP address, or local.", - ARG arg_secretstore_port: (u16) = 8083u16, or |c: &Config| c.secretstore.as_ref()?.port.clone(), + ARG arg_secretstore_port: (u16) = 8083u16, or |c: &Config| c.secretstore.as_ref()?.port, "--secretstore-port=[PORT]", "Specify the port portion for listening to Secret Store Key Server internal requests.", @@ -557,7 +557,7 @@ usage! { "--secretstore-http-interface=[IP]", "Specify the hostname portion for listening to Secret Store Key Server HTTP requests, IP should be an interface's IP address, or local.", - ARG arg_secretstore_http_port: (u16) = 8082u16, or |c: &Config| c.secretstore.as_ref()?.http_port.clone(), + ARG arg_secretstore_http_port: (u16) = 8082u16, or |c: &Config| c.secretstore.as_ref()?.http_port, "--secretstore-http-port=[PORT]", "Specify the port portion for listening to Secret Store Key Server HTTP requests.", @@ -574,31 +574,31 @@ usage! { "Hex-encoded public key of secret store administrator.", ["Sealing/Mining Options"] - FLAG flag_force_sealing: (bool) = false, or |c: &Config| c.mining.as_ref()?.force_sealing.clone(), + FLAG flag_force_sealing: (bool) = false, or |c: &Config| c.mining.as_ref()?.force_sealing, "--force-sealing", "Force the node to author new blocks as if it were always sealing/mining.", - FLAG flag_reseal_on_uncle: (bool) = false, or |c: &Config| c.mining.as_ref()?.reseal_on_uncle.clone(), + FLAG flag_reseal_on_uncle: (bool) = false, or |c: &Config| c.mining.as_ref()?.reseal_on_uncle, "--reseal-on-uncle", "Force the node to author new blocks when a new uncle block is imported.", - FLAG flag_remove_solved: (bool) = false, or |c: &Config| c.mining.as_ref()?.remove_solved.clone(), + FLAG flag_remove_solved: (bool) = false, or |c: &Config| c.mining.as_ref()?.remove_solved, "--remove-solved", "Move solved blocks from the work package queue instead of cloning them. This gives a slightly faster import speed, but means that extra solutions submitted for the same work package will go unused.", - FLAG flag_tx_queue_no_unfamiliar_locals: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_unfamiliar_locals.clone(), + FLAG flag_tx_queue_no_unfamiliar_locals: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_unfamiliar_locals, "--tx-queue-no-unfamiliar-locals", "Local transactions sent through JSON-RPC (HTTP, WebSockets, etc) will be treated as 'external' if the sending account is unknown.", - FLAG flag_tx_queue_no_early_reject: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_early_reject.clone(), + FLAG flag_tx_queue_no_early_reject: (bool) = false, or |c: &Config| c.mining.as_ref()?.tx_queue_no_early_reject, "--tx-queue-no-early-reject", "Disables transaction queue optimization to early reject transactions below minimal effective gas price. This allows local transactions to always enter the pool, despite it being full, but requires additional ecrecover on every transaction.", - FLAG flag_refuse_service_transactions: (bool) = false, or |c: &Config| c.mining.as_ref()?.refuse_service_transactions.clone(), + FLAG flag_refuse_service_transactions: (bool) = false, or |c: &Config| c.mining.as_ref()?.refuse_service_transactions, "--refuse-service-transactions", "Always refuse service transactions.", - FLAG flag_infinite_pending_block: (bool) = false, or |c: &Config| c.mining.as_ref()?.infinite_pending_block.clone(), + FLAG flag_infinite_pending_block: (bool) = false, or |c: &Config| c.mining.as_ref()?.infinite_pending_block, "--infinite-pending-block", "Pending block will be created with maximal possible gas limit and will execute all transactions in the queue. Note that such block is invalid and should never be attempted to be mined.", @@ -614,15 +614,15 @@ usage! { "--reseal-on-txs=[SET]", "Specify which transactions should force the node to reseal a block. SET is one of: none - never reseal on new transactions; own - reseal only on a new local transaction; ext - reseal only on a new external transaction; all - reseal on all new transactions.", - ARG arg_reseal_min_period: (u64) = 2000u64, or |c: &Config| c.mining.as_ref()?.reseal_min_period.clone(), + ARG arg_reseal_min_period: (u64) = 2000u64, or |c: &Config| c.mining.as_ref()?.reseal_min_period, "--reseal-min-period=[MS]", "Specify the minimum time between reseals from incoming transactions. MS is time measured in milliseconds.", - ARG arg_reseal_max_period: (u64) = 120000u64, or |c: &Config| c.mining.as_ref()?.reseal_max_period.clone(), + ARG arg_reseal_max_period: (u64) = 120000u64, or |c: &Config| c.mining.as_ref()?.reseal_max_period, "--reseal-max-period=[MS]", "Specify the maximum time since last block to enable force-sealing. MS is time measured in milliseconds.", - ARG arg_work_queue_size: (usize) = 20usize, or |c: &Config| c.mining.as_ref()?.work_queue_size.clone(), + ARG arg_work_queue_size: (usize) = 20usize, or |c: &Config| c.mining.as_ref()?.work_queue_size, "--work-queue-size=[ITEMS]", "Specify the number of historical work packages which are kept cached lest a solution is found for them later. High values take more memory but result in fewer unusable solutions.", @@ -650,15 +650,15 @@ usage! { "--gas-cap=[GAS]", "A cap on how large we will raise the gas limit per block due to transaction volume.", - ARG arg_tx_queue_mem_limit: (u32) = 4u32, or |c: &Config| c.mining.as_ref()?.tx_queue_mem_limit.clone(), + ARG arg_tx_queue_mem_limit: (u32) = 4u32, or |c: &Config| c.mining.as_ref()?.tx_queue_mem_limit, "--tx-queue-mem-limit=[MB]", "Maximum amount of memory that can be used by the transaction queue. Setting this parameter to 0 disables limiting.", - ARG arg_tx_queue_size: (usize) = 8_192usize, or |c: &Config| c.mining.as_ref()?.tx_queue_size.clone(), + ARG arg_tx_queue_size: (usize) = 8_192usize, or |c: &Config| c.mining.as_ref()?.tx_queue_size, "--tx-queue-size=[LIMIT]", "Maximum amount of transactions in the queue (waiting to be included in next block).", - ARG arg_tx_queue_per_sender: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_per_sender.clone(), + ARG arg_tx_queue_per_sender: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_queue_per_sender, "--tx-queue-per-sender=[LIMIT]", "Maximum number of transactions per sender in the queue. By default it's 1% of the entire queue, but not less than 16.", @@ -674,11 +674,11 @@ usage! { "--stratum-interface=[IP]", "Interface address for Stratum server.", - ARG arg_stratum_port: (u16) = 8008u16, or |c: &Config| c.stratum.as_ref()?.port.clone(), + ARG arg_stratum_port: (u16) = 8008u16, or |c: &Config| c.stratum.as_ref()?.port, "--stratum-port=[PORT]", "Port for Stratum server to listen on.", - ARG arg_min_gas_price: (Option) = None, or |c: &Config| c.mining.as_ref()?.min_gas_price.clone(), + ARG arg_min_gas_price: (Option) = None, or |c: &Config| c.mining.as_ref()?.min_gas_price, "--min-gas-price=[STRING]", "Minimum amount of Wei per GAS to be paid for a transaction on top of base fee, to be accepted for mining. Overrides --usd-per-tx.", @@ -698,7 +698,7 @@ usage! { "--tx-gas-limit=[GAS]", "Apply a limit of GAS as the maximum amount of gas a single transaction may have for it to be mined.", - ARG arg_tx_time_limit: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_time_limit.clone(), + ARG arg_tx_time_limit: (Option) = None, or |c: &Config| c.mining.as_ref()?.tx_time_limit, "--tx-time-limit=[MS]", "Maximal time for processing single transaction. If enabled senders of transactions offending the limit will get other transactions penalized.", @@ -714,11 +714,11 @@ usage! { "--stratum-secret=[STRING]", "Secret for authorizing Stratum server for peers.", - ARG arg_max_round_blocks_to_import: (usize) = 1usize, or |c: &Config| c.mining.as_ref()?.max_round_blocks_to_import.clone(), + ARG arg_max_round_blocks_to_import: (usize) = 1usize, or |c: &Config| c.mining.as_ref()?.max_round_blocks_to_import, "--max-round-blocks-to-import=[S]", "Maximal number of blocks to import for each import round.", - ARG arg_new_transactions_stats_period: (u64) = 0u64, or |c: &Config| c.mining.as_ref()?.new_transactions_stats_period.clone(), + ARG arg_new_transactions_stats_period: (u64) = 0u64, or |c: &Config| c.mining.as_ref()?.new_transactions_stats_period, "--new-transactions-stats-period=[N]", "Specify number of blocks for which new transactions will be returned in a result of `parity_newTransactionsStats` RPC call. Setting this parameter to 0 will return only transactions imported during the current block. (default: 0)", @@ -728,7 +728,7 @@ usage! { "Executable will auto-restart if exiting with 69", ["Miscellaneous Options"] - FLAG flag_no_color: (bool) = false, or |c: &Config| c.misc.as_ref()?.color.map(|c| !c).clone(), + FLAG flag_no_color: (bool) = false, or |c: &Config| c.misc.as_ref()?.color.map(|c| !c), "--no-color", "Don't use terminal color codes in output.", @@ -748,8 +748,12 @@ usage! { "--log-file=[FILENAME]", "Specify a filename into which logging should be appended.", + ARG arg_shutdown_on_missing_block_import: (Option) = None, or |c: &Config| c.misc.as_ref()?.shutdown_on_missing_block_import, + "--shutdown-on-missing-block-import=[STRING]", + "Shuts down if no block has been imported for N seconds. Defaults to None. Set to None or 0 to disable this feature. This setting is only respected by the HBBFT Engine", + ["Footprint Options"] - FLAG flag_scale_verifiers: (bool) = false, or |c: &Config| c.footprint.as_ref()?.scale_verifiers.clone(), + FLAG flag_scale_verifiers: (bool) = false, or |c: &Config| c.footprint.as_ref()?.scale_verifiers, "--scale-verifiers", "Automatically scale amount of verifier threads based on workload. Not guaranteed to be faster.", @@ -761,27 +765,27 @@ usage! { "--pruning=[METHOD]", "Configure pruning of the state/storage trie. METHOD may be one of auto, archive, fast: archive - keep all state trie data. No pruning. fast - maintain journal overlay. Fast but 50MB used. auto - use the method most recently synced or default to fast if none synced.", - ARG arg_pruning_history: (u64) = 64u64, or |c: &Config| c.footprint.as_ref()?.pruning_history.clone(), + ARG arg_pruning_history: (u64) = 64u64, or |c: &Config| c.footprint.as_ref()?.pruning_history, "--pruning-history=[NUM]", "Set a minimum number of recent states to keep in memory when pruning is active.", - ARG arg_pruning_memory: (usize) = 32usize, or |c: &Config| c.footprint.as_ref()?.pruning_memory.clone(), + ARG arg_pruning_memory: (usize) = 32usize, or |c: &Config| c.footprint.as_ref()?.pruning_memory, "--pruning-memory=[MB]", "The ideal amount of memory in megabytes to use to store recent states. As many states as possible will be kept within this limit, and at least --pruning-history states will always be kept.", - ARG arg_cache_size_db: (u32) = 128u32, or |c: &Config| c.footprint.as_ref()?.cache_size_db.clone(), + ARG arg_cache_size_db: (u32) = 128u32, or |c: &Config| c.footprint.as_ref()?.cache_size_db, "--cache-size-db=[MB]", "Override database cache size.", - ARG arg_cache_size_blocks: (u32) = 8u32, or |c: &Config| c.footprint.as_ref()?.cache_size_blocks.clone(), + ARG arg_cache_size_blocks: (u32) = 8u32, or |c: &Config| c.footprint.as_ref()?.cache_size_blocks, "--cache-size-blocks=[MB]", "Specify the preferred size of the blockchain cache in megabytes.", - ARG arg_cache_size_queue: (u32) = 40u32, or |c: &Config| c.footprint.as_ref()?.cache_size_queue.clone(), + ARG arg_cache_size_queue: (u32) = 40u32, or |c: &Config| c.footprint.as_ref()?.cache_size_queue, "--cache-size-queue=[MB]", "Specify the maximum size of memory to use for block queue.", - ARG arg_cache_size_state: (u32) = 25u32, or |c: &Config| c.footprint.as_ref()?.cache_size_state.clone(), + ARG arg_cache_size_state: (u32) = 25u32, or |c: &Config| c.footprint.as_ref()?.cache_size_state, "--cache-size-state=[MB]", "Specify the maximum size of memory to use for the state cache.", @@ -793,11 +797,11 @@ usage! { "--fat-db=[BOOL]", "Build appropriate information to allow enumeration of all accounts and storage keys. Doubles the size of the state database. BOOL may be one of on, off or auto.", - ARG arg_cache_size: (Option) = None, or |c: &Config| c.footprint.as_ref()?.cache_size.clone(), + ARG arg_cache_size: (Option) = None, or |c: &Config| c.footprint.as_ref()?.cache_size, "--cache-size=[MB]", "Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options.", - ARG arg_num_verifiers: (Option) = None, or |c: &Config| c.footprint.as_ref()?.num_verifiers.clone(), + ARG arg_num_verifiers: (Option) = None, or |c: &Config| c.footprint.as_ref()?.num_verifiers, "--num-verifiers=[INT]", "Amount of verifier threads to use or to begin with, if verifier auto-scaling is enabled.", @@ -807,7 +811,7 @@ usage! { "Skip block seal check.", ["Snapshot Options"] - FLAG flag_enable_snapshotting: (bool) = false, or |c: &Config| c.snapshots.as_ref()?.enable.clone(), + FLAG flag_enable_snapshotting: (bool) = false, or |c: &Config| c.snapshots.as_ref()?.enable, "--enable-snapshotting", "Enable automated snapshots which usually occur once every 5000 blocks.", @@ -1039,6 +1043,8 @@ struct Misc { color: Option, ports_shift: Option, unsafe_expose: Option, + /// seconds, until the system shuts down if no block has been produced. None disables this feature. + shutdown_on_missing_block_import: Option, } #[cfg(test)] @@ -1048,51 +1054,57 @@ mod tests { Operating, Rpc, SecretStore, Snapshots, Ws, }; use clap::ErrorKind as ClapErrorKind; - use toml; + use parity_version::NODE_SOFTWARE_NAME; #[test] fn should_accept_any_argument_order() { - let args = Args::parse(&["openethereum", "--no-warp", "account", "list"]).unwrap(); - assert_eq!(args.flag_no_warp, true); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--no-warp", "account", "list"]).unwrap(); + assert!(args.flag_no_warp); - let args = Args::parse(&["openethereum", "account", "list", "--no-warp"]).unwrap(); - assert_eq!(args.flag_no_warp, true); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "account", "list", "--no-warp"]).unwrap(); + assert!(args.flag_no_warp); - let args = Args::parse(&["openethereum", "--chain=dev", "account", "list"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--chain=dev", "account", "list"]).unwrap(); assert_eq!(args.arg_chain, "dev"); - let args = Args::parse(&["openethereum", "account", "list", "--chain=dev"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "account", "list", "--chain=dev"]).unwrap(); assert_eq!(args.arg_chain, "dev"); } #[test] fn should_reject_invalid_values() { - let args = Args::parse(&["openethereum", "--jsonrpc-port=8545"]); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--jsonrpc-port=8545"]); assert!(args.is_ok()); - let args = Args::parse(&["openethereum", "--jsonrpc-port=asd"]); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--jsonrpc-port=asd"]); assert!(args.is_err()); } #[test] fn should_parse_args_and_flags() { - let args = Args::parse(&["openethereum", "--no-warp"]).unwrap(); - assert_eq!(args.flag_no_warp, true); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--no-warp"]).unwrap(); + assert!(args.flag_no_warp); - let args = Args::parse(&["openethereum", "--pruning", "archive"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--pruning", "archive"]).unwrap(); assert_eq!(args.arg_pruning, "archive"); - let args = Args::parse(&["openethereum", "export", "state", "--no-storage"]).unwrap(); - assert_eq!(args.flag_export_state_no_storage, true); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "export", "state", "--no-storage"]).unwrap(); + assert!(args.flag_export_state_no_storage); - let args = - Args::parse(&["openethereum", "export", "state", "--min-balance", "123"]).unwrap(); + let args = Args::parse(&[ + NODE_SOFTWARE_NAME, + "export", + "state", + "--min-balance", + "123", + ]) + .unwrap(); assert_eq!(args.arg_export_state_min_balance, Some("123".to_string())); } #[test] fn should_exit_gracefully_on_unknown_argument() { - let result = Args::parse(&["openethereum", "--please-exit-gracefully"]); + let result = Args::parse(&[NODE_SOFTWARE_NAME, "--please-exit-gracefully"]); assert!(match result { Err(ArgsError::Clap(ref clap_error)) if clap_error.kind == ClapErrorKind::UnknownArgument => @@ -1103,39 +1115,40 @@ mod tests { #[test] fn should_use_subcommand_arg_default() { - let args = Args::parse(&["openethereum", "export", "state", "--at", "123"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "export", "state", "--at", "123"]).unwrap(); assert_eq!(args.arg_export_state_at, "123"); assert_eq!(args.arg_snapshot_at, "latest"); - let args = Args::parse(&["openethereum", "snapshot", "--at", "123", "file.dump"]).unwrap(); + let args = + Args::parse(&[NODE_SOFTWARE_NAME, "snapshot", "--at", "123", "file.dump"]).unwrap(); assert_eq!(args.arg_snapshot_at, "123"); assert_eq!(args.arg_export_state_at, "latest"); - let args = Args::parse(&["openethereum", "export", "state"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "export", "state"]).unwrap(); assert_eq!(args.arg_snapshot_at, "latest"); assert_eq!(args.arg_export_state_at, "latest"); - let args = Args::parse(&["openethereum", "snapshot", "file.dump"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "snapshot", "file.dump"]).unwrap(); assert_eq!(args.arg_snapshot_at, "latest"); assert_eq!(args.arg_export_state_at, "latest"); } #[test] fn should_parse_multiple_values() { - let args = Args::parse(&["openethereum", "account", "import", "~/1", "~/2"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "account", "import", "~/1", "~/2"]).unwrap(); assert_eq!( args.arg_account_import_path, Some(vec!["~/1".to_owned(), "~/2".to_owned()]) ); - let args = Args::parse(&["openethereum", "account", "import", "~/1,ext"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "account", "import", "~/1,ext"]).unwrap(); assert_eq!( args.arg_account_import_path, Some(vec!["~/1,ext".to_owned()]) ); let args = Args::parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--secretstore-nodes", "abc@127.0.0.1:3333,cde@10.10.10.10:4444", ]) @@ -1146,7 +1159,7 @@ mod tests { ); let args = Args::parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--password", "~/.safe/1", "--password", @@ -1158,7 +1171,7 @@ mod tests { vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()] ); - let args = Args::parse(&["openethereum", "--password", "~/.safe/1,~/.safe/2"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--password", "~/.safe/1,~/.safe/2"]).unwrap(); assert_eq!( args.arg_password, vec!["~/.safe/1".to_owned(), "~/.safe/2".to_owned()] @@ -1167,7 +1180,7 @@ mod tests { #[test] fn should_parse_global_args_with_subcommand() { - let args = Args::parse(&["openethereum", "--chain", "dev", "account", "list"]).unwrap(); + let args = Args::parse(&[NODE_SOFTWARE_NAME, "--chain", "dev", "account", "list"]).unwrap(); assert_eq!(args.arg_chain, "dev".to_owned()); } @@ -1195,7 +1208,8 @@ mod tests { config.parity = Some(operating); // when - let args = Args::parse_with_config(&["openethereum", "--chain", "xyz"], config).unwrap(); + let args = + Args::parse_with_config(&[NODE_SOFTWARE_NAME, "--chain", "xyz"], config).unwrap(); // then assert_eq!(args.arg_chain, "xyz".to_owned()); @@ -1221,7 +1235,8 @@ mod tests { let config = toml::from_str(include_str!("./tests/config.full.toml")).unwrap(); // when - let args = Args::parse_with_config(&["openethereum", "--chain", "xyz"], config).unwrap(); + let args = + Args::parse_with_config(&[NODE_SOFTWARE_NAME, "--chain", "xyz"], config).unwrap(); // then assert_eq!( @@ -1450,6 +1465,7 @@ mod tests { arg_log_file: Some("/var/log/openethereum.log".into()), flag_no_color: false, flag_no_config: false, + arg_shutdown_on_missing_block_import: None, } ); } @@ -1640,6 +1656,7 @@ mod tests { color: Some(true), ports_shift: Some(0), unsafe_expose: Some(false), + shutdown_on_missing_block_import: None, }), stratum: None, } @@ -1648,7 +1665,7 @@ mod tests { #[test] fn should_not_accept_min_peers_bigger_than_max_peers() { - match Args::parse(&["openethereum", "--max-peers=39", "--min-peers=40"]) { + match Args::parse(&[NODE_SOFTWARE_NAME, "--max-peers=39", "--min-peers=40"]) { Err(ArgsError::PeerConfiguration) => (), _ => assert_eq!(false, true), } @@ -1656,7 +1673,7 @@ mod tests { #[test] fn should_accept_max_peers_equal_or_bigger_than_min_peers() { - Args::parse(&["openethereum", "--max-peers=40", "--min-peers=40"]).unwrap(); - Args::parse(&["openethereum", "--max-peers=100", "--min-peers=40"]).unwrap(); + Args::parse(&[NODE_SOFTWARE_NAME, "--max-peers=40", "--min-peers=40"]).unwrap(); + Args::parse(&[NODE_SOFTWARE_NAME, "--max-peers=100", "--min-peers=40"]).unwrap(); } } diff --git a/bin/oe/cli/presets/mod.rs b/bin/oe/cli/presets/mod.rs index 14a7bae129..4c362c3bb4 100644 --- a/bin/oe/cli/presets/mod.rs +++ b/bin/oe/cli/presets/mod.rs @@ -23,6 +23,9 @@ pub fn preset_config_string(arg: &str) -> Result<&'static str, Error> { "non-standard-ports" => Ok(include_str!("./config.non-standard-ports.toml")), "insecure" => Ok(include_str!("./config.insecure.toml")), "dev-insecure" => Ok(include_str!("./config.dev-insecure.toml")), - _ => Err(Error::new(ErrorKind::InvalidInput, "Config doesn't match any presets [dev, mining, non-standard-ports, insecure, dev-insecure]")) + _ => Err(Error::new( + ErrorKind::InvalidInput, + "Config doesn't match any presets [dev, mining, non-standard-ports, insecure, dev-insecure]", + )), } } diff --git a/bin/oe/cli/usage_header.txt b/bin/oe/cli/usage_header.txt index a7f156d1f6..09b31531a6 100644 --- a/bin/oe/cli/usage_header.txt +++ b/bin/oe/cli/usage_header.txt @@ -1,5 +1,5 @@ -OpenEthereum Client. - By Wood/Paronyan/Kotewicz/Drwięga/Volf/Greeff +diamond-node bit.diamonds Node Software + By Haller/Forstenlechner/Wood/Paronyan/Kotewicz/Drwięga/Volf/Greeff Habermeier/Czaban/Gotchac/Redman/Nikolsky Schoedon/Tang/Adolfsson/Silva/Palm/Hirsz et al. Copyright 2015-2020 Parity Technologies (UK) Ltd. diff --git a/bin/oe/cli/version.txt b/bin/oe/cli/version.txt index 41fa042124..e0820a015a 100644 --- a/bin/oe/cli/version.txt +++ b/bin/oe/cli/version.txt @@ -1,10 +1,11 @@ -OpenEthereum Client. +diamond-node version {} Copyright 2015-2020 Parity Technologies (UK) Ltd. +Copyright 2020-2025 DMD diamond. License GPLv3+: GNU GPL version 3 or later . This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. -By Wood/Paronyan/Kotewicz/Drwięga/Volf/Greeff +By Haller/Forstenlechner/Wood/Paronyan/Kotewicz/Drwięga/Volf/Greeff Habermeier/Czaban/Gotchac/Redman/Nikolsky Schoedon/Tang/Adolfsson/Silva/Palm/Hirsz et al. diff --git a/bin/oe/configuration.rs b/bin/oe/configuration.rs index df7a23daab..8110076a27 100644 --- a/bin/oe/configuration.rs +++ b/bin/oe/configuration.rs @@ -20,20 +20,19 @@ use crate::{ hash::keccak, metrics::MetricsConfiguration, miner::pool, - sync::{self, validate_node_url, NetworkConfiguration}, + sync::{self, NetworkConfiguration, validate_node_url}, }; use ansi_term::Colour; -use crypto::publickey::{Public, Secret}; +use crate::crypto::publickey::{Public, Secret}; use ethcore::{ client::VMType, - miner::{stratum, MinerOptions}, + miner::{MinerOptions, stratum}, snapshot::SnapshotConfiguration, verification::queue::VerifierSettings, }; use ethereum_types::{Address, H256, U256}; -use num_cpus; use parity_version::{version, version_data}; use std::{ cmp, @@ -70,9 +69,8 @@ use crate::{ types::data_format::DataFormat, }; use dir::{ - self, default_data_path, default_local_path, + self, Directories, default_data_path, default_local_path, helpers::{replace_home, replace_home_and_local}, - Directories, }; use ethcore_logger::Config as LogConfig; use parity_rpc::NetworkSettings; @@ -110,7 +108,8 @@ pub enum Cmd { } pub struct Execute { - pub logger: LogConfig, + // pub logger: LogConfig, + /// executed command. pub cmd: Cmd, } @@ -146,7 +145,7 @@ impl Configuration { let mode = match self.args.arg_mode.as_ref() { "last" => None, mode => Some(to_mode( - &mode, + mode, self.args.arg_mode_timeout, self.args.arg_mode_alarm, )?), @@ -168,7 +167,7 @@ impl Configuration { let format = self.format()?; let metrics_conf = self.metrics_config()?; let keys_iterations = NonZeroU32::new(self.args.arg_keys_iterations) - .ok_or_else(|| "--keys-iterations must be non-zero")?; + .ok_or("--keys-iterations must be non-zero")?; let cmd = if self.args.flag_version { Cmd::Version @@ -182,23 +181,23 @@ impl Configuration { .accounts_config()? .password_files .first() - .map(|pwfile| PathBuf::from(pwfile)); + .map(PathBuf::from); Cmd::SignerSign { id: self.args.arg_signer_sign_id, - pwfile: pwfile, + pwfile, port: ws_conf.port, - authfile: authfile, + authfile, } } else if self.args.cmd_signer_reject { Cmd::SignerReject { id: self.args.arg_signer_reject_id, port: ws_conf.port, - authfile: authfile, + authfile, } } else if self.args.cmd_signer_list { Cmd::SignerList { port: ws_conf.port, - authfile: authfile, + authfile, } } else { unreachable!(); @@ -220,16 +219,16 @@ impl Configuration { })) } else if self.args.cmd_db && self.args.cmd_db_kill { Cmd::Blockchain(BlockchainCmd::Kill(KillBlockchain { - spec: spec, - dirs: dirs, - pruning: pruning, + spec, + dirs, + pruning, })) } else if self.args.cmd_account { let account_cmd = if self.args.cmd_account_new { let new_acc = NewAccount { iterations: keys_iterations, path: dirs.keys, - spec: spec, + spec, password_file: self .accounts_config()? .password_files @@ -240,7 +239,7 @@ impl Configuration { } else if self.args.cmd_account_list { let list_acc = ListAccounts { path: dirs.keys, - spec: spec, + spec, }; AccountCmd::List(list_acc) } else if self.args.cmd_account_import { @@ -251,7 +250,7 @@ impl Configuration { .expect("CLI argument is required; qed") .clone(), to: dirs.keys, - spec: spec, + spec, }; AccountCmd::Import(import_acc) } else { @@ -262,7 +261,7 @@ impl Configuration { let presale_cmd = ImportWallet { iterations: keys_iterations, path: dirs.keys, - spec: spec, + spec, wallet_path: self.args.arg_wallet_import_path.clone().unwrap(), password_file: self .accounts_config()? @@ -273,18 +272,18 @@ impl Configuration { Cmd::ImportPresaleWallet(presale_cmd) } else if self.args.cmd_import { let import_cmd = ImportBlockchain { - spec: spec, - cache_config: cache_config, - dirs: dirs, + spec, + cache_config, + dirs, file_path: self.args.arg_import_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, + format, + pruning, + pruning_history, pruning_memory: self.args.arg_pruning_memory, - compaction: compaction, - tracing: tracing, - fat_db: fat_db, - vm_type: vm_type, + compaction, + tracing, + fat_db, + vm_type, check_seal: !self.args.flag_no_seal_check, with_color: logger_config.color, verifier_settings: self.verifier_settings(), @@ -294,17 +293,17 @@ impl Configuration { } else if self.args.cmd_export { if self.args.cmd_export_blocks { let export_cmd = ExportBlockchain { - spec: spec, - cache_config: cache_config, - dirs: dirs, + spec, + cache_config, + dirs, file_path: self.args.arg_export_blocks_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, + format, + pruning, + pruning_history, pruning_memory: self.args.arg_pruning_memory, - compaction: compaction, - tracing: tracing, - fat_db: fat_db, + compaction, + tracing, + fat_db, from_block: to_block_id(&self.args.arg_export_blocks_from)?, to_block: to_block_id(&self.args.arg_export_blocks_to)?, check_seal: !self.args.flag_no_seal_check, @@ -313,17 +312,17 @@ impl Configuration { Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) } else if self.args.cmd_export_state { let export_cmd = ExportState { - spec: spec, - cache_config: cache_config, - dirs: dirs, + spec, + cache_config, + dirs, file_path: self.args.arg_export_state_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, + format, + pruning, + pruning_history, pruning_memory: self.args.arg_pruning_memory, - compaction: compaction, - tracing: tracing, - fat_db: fat_db, + compaction, + tracing, + fat_db, at: to_block_id(&self.args.arg_export_state_at)?, storage: !self.args.flag_export_state_no_storage, code: !self.args.flag_export_state_no_code, @@ -343,38 +342,38 @@ impl Configuration { } } else if self.args.cmd_snapshot { let snapshot_cmd = SnapshotCommand { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - pruning_history: pruning_history, + cache_config, + dirs, + spec, + pruning, + pruning_history, pruning_memory: self.args.arg_pruning_memory, - tracing: tracing, - fat_db: fat_db, - compaction: compaction, + tracing, + fat_db, + compaction, file_path: self.args.arg_snapshot_file.clone(), kind: snapshot::Kind::Take, block_at: to_block_id(&self.args.arg_snapshot_at)?, max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - snapshot_conf: snapshot_conf, + snapshot_conf, }; Cmd::Snapshot(snapshot_cmd) } else if self.args.cmd_restore { let restore_cmd = SnapshotCommand { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - pruning_history: pruning_history, + cache_config, + dirs, + spec, + pruning, + pruning_history, pruning_memory: self.args.arg_pruning_memory, - tracing: tracing, - fat_db: fat_db, - compaction: compaction, + tracing, + fat_db, + compaction, file_path: self.args.arg_restore_file.clone(), kind: snapshot::Kind::Restore, block_at: to_block_id("latest")?, // unimportant. max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, - snapshot_conf: snapshot_conf, + snapshot_conf, }; Cmd::Snapshot(restore_cmd) } else { @@ -392,54 +391,55 @@ impl Configuration { let verifier_settings = self.verifier_settings(); let run_cmd = RunCmd { - cache_config: cache_config, - dirs: dirs, - spec: spec, - pruning: pruning, - pruning_history: pruning_history, + cache_config, + dirs, + spec, + pruning, + pruning_history, pruning_memory: self.args.arg_pruning_memory, - daemon: daemon, + daemon, logger_config: logger_config.clone(), miner_options: self.miner_options()?, gas_price_percentile: self.args.arg_gas_price_percentile, poll_lifetime: self.args.arg_poll_lifetime, - ws_conf: ws_conf, - snapshot_conf: snapshot_conf, - http_conf: http_conf, - ipc_conf: ipc_conf, - net_conf: net_conf, - network_id: network_id, + ws_conf, + snapshot_conf, + http_conf, + ipc_conf, + net_conf, + network_id, acc_conf: self.accounts_config()?, gas_pricer_conf: self.gas_pricer_config()?, miner_extras: self.miner_extras()?, stratum: self.stratum_options()?, allow_missing_blocks: self.args.flag_jsonrpc_allow_missing_blocks, - mode: mode, - tracing: tracing, - fat_db: fat_db, - compaction: compaction, - vm_type: vm_type, - warp_sync: warp_sync, + mode, + tracing, + fat_db, + compaction, + vm_type, + warp_sync, warp_barrier: self.args.arg_warp_barrier, experimental_rpcs, net_settings: self.network_settings()?, - secretstore_conf: secretstore_conf, + secretstore_conf, name: self.args.arg_identity, custom_bootnodes: self.args.arg_bootnodes.is_some(), check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, new_transactions_stats_period: self.args.arg_new_transactions_stats_period, - verifier_settings: verifier_settings, + verifier_settings, no_persistent_txqueue: self.args.flag_no_persistent_txqueue, max_round_blocks_to_import: self.args.arg_max_round_blocks_to_import, metrics_conf, + shutdown_on_missing_block_import: self.args.arg_shutdown_on_missing_block_import, }; Cmd::Run(run_cmd) }; Ok(Execute { - logger: logger_config, - cmd: cmd, + // logger: logger_config, + cmd, }) } @@ -456,9 +456,7 @@ impl Configuration { gas_range_target: (floor, ceil), engine_signer: self.engine_signer()?, work_notify: self.work_notify(), - local_accounts: HashSet::from_iter( - to_addresses(&self.args.arg_tx_queue_locals)?.into_iter(), - ), + local_accounts: HashSet::from_iter(to_addresses(&self.args.arg_tx_queue_locals)?), }; Ok(extras) @@ -511,7 +509,7 @@ impl Configuration { } fn chain(&self) -> Result { - Ok(self.args.arg_chain.parse()?) + self.args.arg_chain.parse() } fn is_dev_chain(&self) -> Result { @@ -554,7 +552,7 @@ impl Configuration { fn accounts_config(&self) -> Result { let keys_iterations = NonZeroU32::new(self.args.arg_keys_iterations) - .ok_or_else(|| "--keys-iterations must be non-zero")?; + .ok_or("--keys-iterations must be non-zero")?; let cfg = AccountsConfig { iterations: keys_iterations, refresh_time: self.args.arg_accounts_refresh, @@ -699,7 +697,7 @@ impl Configuration { if "auto" == self.args.arg_usd_per_eth { Ok(GasPricerConfig::Calibrated { - usd_per_tx: usd_per_tx, + usd_per_tx, recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, api_endpoint: ETHERSCAN_ETH_PRICE_ENDPOINT.to_string(), }) @@ -717,7 +715,7 @@ impl Configuration { Ok(GasPricerConfig::Fixed(wei_per_gas)) } else { Ok(GasPricerConfig::Calibrated { - usd_per_tx: usd_per_tx, + usd_per_tx, recalibration_period: to_duration(self.args.arg_price_update_period.as_str())?, api_endpoint: self.args.arg_usd_per_eth.clone(), }) @@ -758,13 +756,13 @@ impl Configuration { return Err(format!( "Failed to resolve hostname of a boot node: {}", line - )) + )); } Some(_) => { return Err(format!( "Invalid node address format given for a boot node: {}", line - )) + )); } } } @@ -802,7 +800,7 @@ impl Configuration { return Err(format!( "Invalid host given with `--nat extip:{}`", &self.args.arg_nat[6..] - )) + )); } } } else { @@ -935,7 +933,7 @@ impl Configuration { }, processing_threads: self.args.arg_jsonrpc_threads, max_payload: match self.args.arg_jsonrpc_max_payload { - Some(max) if max > 0 => max as usize, + Some(max) if max > 0 => max, _ => 5usize, }, keep_alive: !self.args.flag_jsonrpc_no_keep_alive, @@ -1007,7 +1005,7 @@ impl Configuration { .args .arg_base_path .as_ref() - .map_or_else(|| default_data_path(), |s| s.clone()); + .map_or_else(default_data_path, |s| s.clone()); let data_path = replace_home("", &base_path); let is_using_base_path = self.args.arg_base_path.is_some(); // If base_path is set and db_path is not we default to base path subdir instead of LOCAL. @@ -1017,7 +1015,7 @@ impl Configuration { self.args .arg_db_path .as_ref() - .map_or(dir::CHAINS_PATH, |s| &s) + .map_or(dir::CHAINS_PATH, |s| s) }; let cache_path = if is_using_base_path { "$BASE/cache" @@ -1025,7 +1023,7 @@ impl Configuration { dir::CACHE_PATH }; - let db_path = replace_home_and_local(&data_path, &local_path, &base_db_path); + let db_path = replace_home_and_local(&data_path, &local_path, base_db_path); let cache_path = replace_home_and_local(&data_path, &local_path, cache_path); let keys_path = replace_home(&data_path, &self.args.arg_keys_path); let secretstore_path = replace_home(&data_path, &self.args.arg_secretstore_path); @@ -1089,7 +1087,7 @@ impl Configuration { #[cfg(feature = "accounts")] Some(ref s) if s.len() == 40 => Ok(Some(NodeSecretKey::KeyStore(s.parse() .map_err(|e| format!("Invalid secret store secret address: {}. Error: {:?}", s, e))?))), - Some(_) => Err(format!("Invalid secret store secret. Must be either existing account address, or hex-encoded private key")), + Some(_) => Err("Invalid secret store secret. Must be either existing account address, or hex-encoded private key".to_string()), None => Ok(None), } } @@ -1269,6 +1267,7 @@ mod tests { use dir::Directories; use ethcore::{client::VMType, miner::MinerOptions}; use parity_rpc::NetworkSettings; + use parity_version::NODE_SOFTWARE_NAME; use tempdir::TempDir; use crate::network::{AllowIP, IpFilter}; @@ -1282,9 +1281,6 @@ mod tests { static ref ITERATIONS: NonZeroU32 = NonZeroU32::new(10240).expect("10240 > 0; qed"); } - #[derive(Debug, PartialEq)] - struct TestPasswordReader(&'static str); - fn parse(args: &[&str]) -> Configuration { Configuration { args: Args::parse_without_config(args).unwrap(), @@ -1293,14 +1289,14 @@ mod tests { #[test] fn test_command_version() { - let args = vec!["openethereum", "--version"]; + let args = vec![NODE_SOFTWARE_NAME, "--version"]; let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Version); } #[test] fn test_command_account_new() { - let args = vec!["openethereum", "account", "new"]; + let args = vec![NODE_SOFTWARE_NAME, "account", "new"]; let conf = parse(&args); assert_eq!( conf.into_command().unwrap().cmd, @@ -1315,7 +1311,7 @@ mod tests { #[test] fn test_command_account_list() { - let args = vec!["openethereum", "account", "list"]; + let args = vec![NODE_SOFTWARE_NAME, "account", "list"]; let conf = parse(&args); assert_eq!( conf.into_command().unwrap().cmd, @@ -1328,7 +1324,13 @@ mod tests { #[test] fn test_command_account_import() { - let args = vec!["openethereum", "account", "import", "my_dir", "another_dir"]; + let args = vec![ + NODE_SOFTWARE_NAME, + "account", + "import", + "my_dir", + "another_dir", + ]; let conf = parse(&args); assert_eq!( conf.into_command().unwrap().cmd, @@ -1343,7 +1345,7 @@ mod tests { #[test] fn test_command_wallet_import() { let args = vec![ - "openethereum", + NODE_SOFTWARE_NAME, "wallet", "import", "my_wallet.json", @@ -1365,7 +1367,7 @@ mod tests { #[test] fn test_command_blockchain_import() { - let args = vec!["openethereum", "import", "blockchain.json"]; + let args = vec![NODE_SOFTWARE_NAME, "import", "blockchain.json"]; let conf = parse(&args); assert_eq!( conf.into_command().unwrap().cmd, @@ -1392,7 +1394,7 @@ mod tests { #[test] fn test_command_blockchain_export() { - let args = vec!["openethereum", "export", "blocks", "blockchain.json"]; + let args = vec![NODE_SOFTWARE_NAME, "export", "blocks", "blockchain.json"]; let conf = parse(&args); assert_eq!( conf.into_command().unwrap().cmd, @@ -1418,7 +1420,7 @@ mod tests { #[test] fn test_command_state_export() { - let args = vec!["openethereum", "export", "state", "state.json"]; + let args = vec![NODE_SOFTWARE_NAME, "export", "state", "state.json"]; let conf = parse(&args); assert_eq!( conf.into_command().unwrap().cmd, @@ -1447,7 +1449,7 @@ mod tests { #[test] fn test_command_blockchain_export_with_custom_format() { let args = vec![ - "openethereum", + NODE_SOFTWARE_NAME, "export", "blocks", "--format", @@ -1479,7 +1481,7 @@ mod tests { #[test] fn test_command_signer_new_token() { - let args = vec!["openethereum", "signer", "new-token"]; + let args = vec![NODE_SOFTWARE_NAME, "signer", "new-token"]; let conf = parse(&args); let expected = Directories::default().signer; assert_eq!( @@ -1512,7 +1514,7 @@ mod tests { #[test] fn test_ws_max_connections() { - let args = vec!["openethereum", "--ws-max-connections", "1"]; + let args = vec![NODE_SOFTWARE_NAME, "--ws-max-connections", "1"]; let conf = parse(&args); assert_eq!( @@ -1570,6 +1572,7 @@ mod tests { no_persistent_txqueue: false, max_round_blocks_to_import: 1, metrics_conf: MetricsConfiguration::default(), + shutdown_on_missing_block_import: None, }; expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); expected.secretstore_conf.http_enabled = cfg!(feature = "secretstore"); @@ -1583,7 +1586,7 @@ mod tests { // when let conf0 = parse(&["openethereum"]); - let conf2 = parse(&["openethereum", "--tx-queue-strategy", "gas_price"]); + let conf2 = parse(&[NODE_SOFTWARE_NAME, "--tx-queue-strategy", "gas_price"]); // then assert_eq!(conf0.miner_options().unwrap(), mining_options); @@ -1594,7 +1597,7 @@ mod tests { #[test] fn should_fail_on_force_reseal_and_reseal_min_period() { let conf = parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--chain", "dev", "--force-sealing", @@ -1611,7 +1614,7 @@ mod tests { // when let conf = parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--chain", "goerli", "--identity", @@ -1639,9 +1642,13 @@ mod tests { // when let conf0 = parse(&["openethereum"]); - let conf1 = parse(&["openethereum", "--jsonrpc-hosts", "none"]); - let conf2 = parse(&["openethereum", "--jsonrpc-hosts", "all"]); - let conf3 = parse(&["openethereum", "--jsonrpc-hosts", "parity.io,something.io"]); + let conf1 = parse(&[NODE_SOFTWARE_NAME, "--jsonrpc-hosts", "none"]); + let conf2 = parse(&[NODE_SOFTWARE_NAME, "--jsonrpc-hosts", "all"]); + let conf3 = parse(&[ + NODE_SOFTWARE_NAME, + "--jsonrpc-hosts", + "parity.io,something.io", + ]); // then assert_eq!(conf0.rpc_hosts(), Some(Vec::new())); @@ -1658,7 +1665,7 @@ mod tests { // given // when - let conf0 = parse(&["openethereum", "--ui-path=signer"]); + let conf0 = parse(&[NODE_SOFTWARE_NAME, "--ui-path=signer"]); // then assert_eq!(conf0.directories().signer, "signer".to_owned()); @@ -1673,7 +1680,7 @@ mod tests { .write_all(b" \n\t\n") .unwrap(); let args = vec![ - "openethereum", + NODE_SOFTWARE_NAME, "--reserved-peers", filename.to_str().unwrap(), ]; @@ -1687,7 +1694,7 @@ mod tests { let filename = tempdir.path().join("peers_comments"); File::create(&filename).unwrap().write_all(b"# Sample comment\nenode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@172.0.0.1:30303\n").unwrap(); let args = vec![ - "openethereum", + NODE_SOFTWARE_NAME, "--reserved-peers", filename.to_str().unwrap(), ]; @@ -1699,7 +1706,7 @@ mod tests { #[test] fn test_dev_preset() { - let args = vec!["openethereum", "--config", "dev"]; + let args = vec![NODE_SOFTWARE_NAME, "--config", "dev"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -1713,16 +1720,16 @@ mod tests { #[test] fn test_mining_preset() { - let args = vec!["openethereum", "--config", "mining"]; + let args = vec![NODE_SOFTWARE_NAME, "--config", "mining"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 50); assert_eq!(c.net_conf.max_peers, 100); - assert_eq!(c.ipc_conf.enabled, false); - assert_eq!(c.miner_options.force_sealing, true); - assert_eq!(c.miner_options.reseal_on_external_tx, true); - assert_eq!(c.miner_options.reseal_on_own_tx, true); + assert!(!c.ipc_conf.enabled); + assert!(c.miner_options.force_sealing); + assert!(c.miner_options.reseal_on_external_tx); + assert!(c.miner_options.reseal_on_own_tx); assert_eq!( c.miner_options.reseal_min_period, Duration::from_millis(4000) @@ -1737,7 +1744,7 @@ mod tests { #[test] fn test_non_standard_ports_preset() { - let args = vec!["openethereum", "--config", "non-standard-ports"]; + let args = vec![NODE_SOFTWARE_NAME, "--config", "non-standard-ports"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -1750,7 +1757,7 @@ mod tests { #[test] fn test_insecure_preset() { - let args = vec!["openethereum", "--config", "insecure"]; + let args = vec![NODE_SOFTWARE_NAME, "--config", "insecure"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -1768,7 +1775,7 @@ mod tests { #[test] fn test_dev_insecure_preset() { - let args = vec!["openethereum", "--config", "dev-insecure"]; + let args = vec![NODE_SOFTWARE_NAME, "--config", "dev-insecure"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -1789,7 +1796,7 @@ mod tests { #[test] fn test_override_preset() { - let args = vec!["openethereum", "--config", "mining", "--min-peers=99"]; + let args = vec![NODE_SOFTWARE_NAME, "--config", "mining", "--min-peers=99"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -1801,15 +1808,16 @@ mod tests { #[test] fn test_identity_arg() { - let args = vec!["openethereum", "--identity", "Somebody"]; + let args = vec![NODE_SOFTWARE_NAME, "--identity", "Somebody"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.name, "Somebody"); - assert!(c - .net_conf - .client_version - .starts_with("OpenEthereum/Somebody/")); + assert!( + c.net_conf + .client_version + .starts_with("diamond-node/Somebody/") + ); } _ => panic!("Should be Cmd::Run"), } @@ -1820,9 +1828,9 @@ mod tests { // given // when - let conf0 = parse(&["openethereum", "--ports-shift", "1", "--stratum"]); + let conf0 = parse(&[NODE_SOFTWARE_NAME, "--ports-shift", "1", "--stratum"]); let conf1 = parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--ports-shift", "1", "--jsonrpc-port", @@ -1851,7 +1859,7 @@ mod tests { #[test] fn should_resolve_external_nat_hosts() { // Ip works - let conf = parse(&["openethereum", "--nat", "extip:1.1.1.1"]); + let conf = parse(&[NODE_SOFTWARE_NAME, "--nat", "extip:1.1.1.1"]); assert_eq!( conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "1.1.1.1" @@ -1859,7 +1867,7 @@ mod tests { assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); // Ip with port works, port is discarded - let conf = parse(&["openethereum", "--nat", "extip:192.168.1.1:123"]); + let conf = parse(&[NODE_SOFTWARE_NAME, "--nat", "extip:192.168.1.1:123"]); assert_eq!( conf.net_addresses().unwrap().1.unwrap().ip().to_string(), "192.168.1.1" @@ -1867,13 +1875,13 @@ mod tests { assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); // Hostname works - let conf = parse(&["openethereum", "--nat", "extip:ethereum.org"]); + let conf = parse(&[NODE_SOFTWARE_NAME, "--nat", "extip:ethereum.org"]); assert!(conf.net_addresses().unwrap().1.is_some()); assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); // Hostname works, garbage at the end is discarded let conf = parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--nat", "extip:ethereum.org:whatever bla bla 123", ]); @@ -1881,7 +1889,7 @@ mod tests { assert_eq!(conf.net_addresses().unwrap().1.unwrap().port(), 30303); // Garbage is error - let conf = parse(&["openethereum", "--nat", "extip:blabla"]); + let conf = parse(&[NODE_SOFTWARE_NAME, "--nat", "extip:blabla"]); assert!(conf.net_addresses().is_err()); } @@ -1890,7 +1898,7 @@ mod tests { // given // when - let conf0 = parse(&["openethereum", "--unsafe-expose"]); + let conf0 = parse(&[NODE_SOFTWARE_NAME, "--unsafe-expose"]); // then assert_eq!(&conf0.network_settings().unwrap().rpc_interface, "0.0.0.0"); @@ -1908,16 +1916,16 @@ mod tests { #[test] fn allow_ips() { - let all = parse(&["openethereum", "--allow-ips", "all"]); - let private = parse(&["openethereum", "--allow-ips", "private"]); - let block_custom = parse(&["openethereum", "--allow-ips", "-10.0.0.0/8"]); + let all = parse(&[NODE_SOFTWARE_NAME, "--allow-ips", "all"]); + let private = parse(&[NODE_SOFTWARE_NAME, "--allow-ips", "private"]); + let block_custom = parse(&[NODE_SOFTWARE_NAME, "--allow-ips", "-10.0.0.0/8"]); let combo = parse(&[ - "openethereum", + NODE_SOFTWARE_NAME, "--allow-ips", "public 10.0.0.0/8 -1.0.0.0/8", ]); - let ipv6_custom_public = parse(&["openethereum", "--allow-ips", "public fc00::/7"]); - let ipv6_custom_private = parse(&["openethereum", "--allow-ips", "private -fc00::/7"]); + let ipv6_custom_public = parse(&[NODE_SOFTWARE_NAME, "--allow-ips", "public fc00::/7"]); + let ipv6_custom_private = parse(&[NODE_SOFTWARE_NAME, "--allow-ips", "private -fc00::/7"]); assert_eq!( all.ip_filter().unwrap(), @@ -1979,7 +1987,7 @@ mod tests { use std::path; let std = parse(&["openethereum"]); - let base = parse(&["openethereum", "--base-path", "/test"]); + let base = parse(&[NODE_SOFTWARE_NAME, "--base-path", "/test"]); let base_path = ::dir::default_data_path(); let local_path = ::dir::default_local_path(); @@ -1995,7 +2003,7 @@ mod tests { #[test] fn should_respect_only_max_peers_and_default() { - let args = vec!["openethereum", "--max-peers=50"]; + let args = vec![NODE_SOFTWARE_NAME, "--max-peers=50"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -2008,7 +2016,7 @@ mod tests { #[test] fn should_respect_only_max_peers_less_than_default() { - let args = vec!["openethereum", "--max-peers=5"]; + let args = vec![NODE_SOFTWARE_NAME, "--max-peers=5"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -2021,7 +2029,7 @@ mod tests { #[test] fn should_respect_only_min_peers_and_default() { - let args = vec!["openethereum", "--min-peers=5"]; + let args = vec![NODE_SOFTWARE_NAME, "--min-peers=5"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -2034,7 +2042,7 @@ mod tests { #[test] fn should_respect_only_min_peers_and_greater_than_default() { - let args = vec!["openethereum", "--min-peers=500"]; + let args = vec![NODE_SOFTWARE_NAME, "--min-peers=500"]; let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { @@ -2044,4 +2052,19 @@ mod tests { _ => panic!("Should be Cmd::Run"), } } + + #[test] + fn should_parse_shutdown_on_missing_block_import() { + let args = vec![ + NODE_SOFTWARE_NAME, + "--shutdown-on-missing-block-import=1234", + ]; + let conf = Configuration::parse_cli(&args).unwrap(); + match conf.into_command().unwrap().cmd { + Cmd::Run(c) => { + assert_eq!(c.shutdown_on_missing_block_import, Some(1234)); + } + _ => panic!("Should be Cmd::Run"), + } + } } diff --git a/bin/oe/db/rocksdb/blooms.rs b/bin/oe/db/rocksdb/blooms.rs index 06c8749d3a..ad5e397654 100644 --- a/bin/oe/db/rocksdb/blooms.rs +++ b/bin/oe/db/rocksdb/blooms.rs @@ -19,7 +19,6 @@ use super::{kvdb_rocksdb::DatabaseConfig, open_database}; use ethcore::error::Error; use ethereum_types::Bloom; -use rlp; use std::path::Path; const LOG_BLOOMS_ELEMENTS_PER_INDEX: u64 = 16; @@ -41,9 +40,9 @@ pub fn migrate_blooms>(path: P, config: &DatabaseConfig) -> Resul .filter(|(key, _)| key.len() == 6) .take_while(|(key, _)| key[0] == 3u8 && key[1] == 0u8) .map(|(key, group)| { - let index = (key[2] as u64) << 24 - | (key[3] as u64) << 16 - | (key[4] as u64) << 8 + let index = ((key[2] as u64) << 24) + | ((key[3] as u64) << 16) + | ((key[4] as u64) << 8) | (key[5] as u64); let number = index * LOG_BLOOMS_ELEMENTS_PER_INDEX; @@ -66,9 +65,9 @@ pub fn migrate_blooms>(path: P, config: &DatabaseConfig) -> Resul .take_while(|(key, _)| key[0] == 1u8 && key[1] == 0u8) .map(|(key, group)| { let index = (key[2] as u64) - | (key[3] as u64) << 8 - | (key[4] as u64) << 16 - | (key[5] as u64) << 24; + | ((key[3] as u64) << 8) + | ((key[4] as u64) << 16) + | ((key[5] as u64) << 24); let number = index * LOG_BLOOMS_ELEMENTS_PER_INDEX; let blooms = rlp::decode_list::(&group); diff --git a/bin/oe/db/rocksdb/helpers.rs b/bin/oe/db/rocksdb/helpers.rs index e75f6efded..a8f3556602 100644 --- a/bin/oe/db/rocksdb/helpers.rs +++ b/bin/oe/db/rocksdb/helpers.rs @@ -23,10 +23,10 @@ pub fn compaction_profile( profile: &DatabaseCompactionProfile, db_path: &Path, ) -> CompactionProfile { - match profile { - &DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), - &DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), - &DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), + match *profile { + DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path), + DatabaseCompactionProfile::SSD => CompactionProfile::ssd(), + DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), } } @@ -34,7 +34,7 @@ pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> Dat let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS); client_db_config.memory_budget = client_config.db_cache_size; - client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path); + client_db_config.compaction = compaction_profile(&client_config.db_compaction, client_path); client_db_config } diff --git a/bin/oe/db/rocksdb/migration.rs b/bin/oe/db/rocksdb/migration.rs index 441927aeeb..7e15895f50 100644 --- a/bin/oe/db/rocksdb/migration.rs +++ b/bin/oe/db/rocksdb/migration.rs @@ -55,7 +55,7 @@ const BLOOMS_DB_VERSION: u32 = 13; /// Defines how many items are migrated to the new version of database at once. const BATCH_SIZE: usize = 1024; /// Version file name. -const VERSION_FILE_NAME: &'static str = "db_version"; +const VERSION_FILE_NAME: &str = "db_version"; /// Migration related erorrs. #[derive(Debug)] @@ -175,12 +175,12 @@ fn migrate_database( return Ok(()); } - let backup_path = backup_database_path(&db_path); + let backup_path = backup_database_path(db_path); // remove the backup dir if it exists let _ = fs::remove_dir_all(&backup_path); // migrate old database to the new one - let temp_path = migrations.execute(&db_path, version)?; + let temp_path = migrations.execute(db_path, version)?; // completely in-place migration leads to the paths being equal. // in that case, no need to shuffle directories. @@ -189,12 +189,12 @@ fn migrate_database( } // create backup - fs::rename(&db_path, &backup_path)?; + fs::rename(db_path, &backup_path)?; // replace the old database with the new one - if let Err(err) = fs::rename(&temp_path, &db_path) { + if let Err(err) = fs::rename(&temp_path, db_path) { // if something went wrong, bring back backup - fs::rename(&backup_path, &db_path)?; + fs::rename(&backup_path, db_path)?; return Err(err.into()); } @@ -208,7 +208,7 @@ fn exists(path: &Path) -> bool { /// Migrates the database. pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> Result<(), Error> { - let compaction_profile = helpers::compaction_profile(&compaction_profile, path); + let compaction_profile = helpers::compaction_profile(compaction_profile, path); // read version file. let version = current_version(path)?; diff --git a/bin/oe/db/rocksdb/mod.rs b/bin/oe/db/rocksdb/mod.rs index e5789f89a9..1c59fdbf32 100644 --- a/bin/oe/db/rocksdb/mod.rs +++ b/bin/oe/db/rocksdb/mod.rs @@ -22,7 +22,6 @@ use self::{ ethcore_blockchain::{BlockChainDB, BlockChainDBHandler}, kvdb_rocksdb::{Database, DatabaseConfig}, }; -use blooms_db; use ethcore::client::ClientConfig; use ethcore_db::KeyValueDB; use stats::PrometheusMetrics; @@ -55,7 +54,9 @@ impl BlockChainDB for AppDB { } impl PrometheusMetrics for AppDB { - fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {} + fn prometheus_metrics(&self, r: &mut stats::PrometheusRegistry) { + self.key_value.prometheus_metrics(r); + } } /// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path. @@ -106,7 +107,7 @@ pub fn open_database( fs::create_dir_all(&blooms_path)?; fs::create_dir_all(&trace_blooms_path)?; - let db = Database::open(&config, client_path)?; + let db = Database::open(config, client_path)?; let db_with_metrics = ethcore_db::DatabaseWithMetrics::new(db); let db = AppDB { diff --git a/bin/oe/helpers.rs b/bin/oe/helpers.rs index 688ba25e4f..786628d38b 100644 --- a/bin/oe/helpers.rs +++ b/bin/oe/helpers.rs @@ -21,7 +21,7 @@ use crate::{ sync::{self, validate_node_url}, upgrade::{upgrade, upgrade_data_paths}, }; -use dir::{helpers::replace_home, DatabaseDirectories}; +use dir::{DatabaseDirectories, helpers::replace_home}; use ethcore::{ client::{BlockId, ClientConfig, DatabaseCompactionProfile, Mode, VMType, VerifierType}, miner::{Penalization, PendingSet}, @@ -42,8 +42,8 @@ pub fn to_duration(s: &str) -> Result { } fn clean_0x(s: &str) -> &str { - if s.starts_with("0x") { - &s[2..] + if let Some(stripped) = s.strip_prefix("0x") { + stripped } else { s } @@ -263,6 +263,7 @@ pub fn to_client_config( pruning_memory: usize, check_seal: bool, max_round_blocks_to_import: usize, + shutdown_on_missing_block_import: Option, ) -> ClientConfig { let mut client_config = ClientConfig::default(); @@ -301,6 +302,7 @@ pub fn to_client_config( }; client_config.spec_name = spec_name; client_config.max_round_blocks_to_import = max_round_blocks_to_import; + client_config.shutdown_on_missing_block_import = shutdown_on_missing_block_import; client_config } @@ -329,7 +331,7 @@ pub fn execute_upgrades( /// Prompts user asking for password. pub fn password_prompt() -> Result { use rpassword::read_password; - const STDIN_ERROR: &'static str = "Unable to ask for password on non-interactive terminal."; + const STDIN_ERROR: &str = "Unable to ask for password on non-interactive terminal."; println!("Please note that password is NOT RECOVERABLE."); print!("Type password: "); @@ -354,8 +356,8 @@ pub fn password_from_file(path: String) -> Result { let passwords = passwords_from_files(&[path])?; // use only first password from the file passwords - .get(0) - .map(Password::clone) + .first() + .cloned() .ok_or_else(|| "Password file seems to be empty.".to_owned()) } @@ -370,7 +372,7 @@ pub fn passwords_from_files(files: &[String]) -> Result, String> { .collect::>(); Ok(lines) }).collect::>, String>>(); - Ok(passwords?.into_iter().flat_map(|x| x).collect()) + Ok(passwords?.into_iter().flatten().collect()) } #[cfg(test)] @@ -401,7 +403,7 @@ mod tests { assert_eq!(to_duration("1second").unwrap(), Duration::from_secs(1)); assert_eq!(to_duration("2seconds").unwrap(), Duration::from_secs(2)); assert_eq!(to_duration("15seconds").unwrap(), Duration::from_secs(15)); - assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(1 * 60)); + assert_eq!(to_duration("1minute").unwrap(), Duration::from_secs(60)); assert_eq!( to_duration("2minutes").unwrap(), Duration::from_secs(2 * 60) @@ -415,10 +417,7 @@ mod tests { to_duration("daily").unwrap(), Duration::from_secs(24 * 60 * 60) ); - assert_eq!( - to_duration("1hour").unwrap(), - Duration::from_secs(1 * 60 * 60) - ); + assert_eq!(to_duration("1hour").unwrap(), Duration::from_secs(60 * 60)); assert_eq!( to_duration("2hours").unwrap(), Duration::from_secs(2 * 60 * 60) @@ -429,7 +428,7 @@ mod tests { ); assert_eq!( to_duration("1day").unwrap(), - Duration::from_secs(1 * 24 * 60 * 60) + Duration::from_secs(24 * 60 * 60) ); assert_eq!( to_duration("2days").unwrap(), @@ -592,9 +591,9 @@ but the first password is trimmed let res = join_set(Some(&test_set)).unwrap(); assert!( - res == "0x1111111111111111111111111111111111111111,0x0000000000000000000000000000000000000000" - || - res == "0x0000000000000000000000000000000000000000,0x1111111111111111111111111111111111111111" - ); + res == "0x1111111111111111111111111111111111111111,0x0000000000000000000000000000000000000000" + || res + == "0x0000000000000000000000000000000000000000,0x1111111111111111111111111111111111111111" + ); } } diff --git a/bin/oe/informant.rs b/bin/oe/informant.rs index ae205f6fc5..603ef8f880 100644 --- a/bin/oe/informant.rs +++ b/bin/oe/informant.rs @@ -23,8 +23,8 @@ use self::ansi_term::{ use std::{ sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, }, time::{Duration, Instant}, }; @@ -34,15 +34,14 @@ use crate::{ sync::{ManageNetwork, SyncProvider}, types::BlockNumber, }; -use atty; use ethcore::{ client::{ BlockChainClient, BlockChainInfo, BlockId, BlockInfo, BlockQueueInfo, ChainInfo, ChainNotify, Client, ClientIoMessage, ClientReport, NewBlocks, }, - snapshot::{service::Service as SnapshotService, RestorationStatus, SnapshotService as SS}, + snapshot::{RestorationStatus, SnapshotService as SS, service::Service as SnapshotService}, }; -use number_prefix::{binary_prefix, Prefixed, Standalone}; +use number_prefix::{Prefixed, Standalone, binary_prefix}; use parity_rpc::{informant::RpcStats, is_major_importing_or_waiting}; use parking_lot::{Mutex, RwLock}; @@ -206,10 +205,10 @@ impl Informant { ) -> Self { Informant { last_tick: RwLock::new(Instant::now()), - with_color: with_color, - target: target, - snapshot: snapshot, - rpc_stats: rpc_stats, + with_color, + target, + snapshot, + rpc_stats, last_import: Mutex::new(Instant::now()), skipped: AtomicUsize::new(0), skipped_txs: AtomicUsize::new(0), @@ -252,11 +251,11 @@ impl Informant { } = full_report; let rpc_stats = self.rpc_stats.as_ref(); - let snapshot_sync = sync_info.as_ref().map_or(false, |s| s.snapshot_sync) + let snapshot_sync = sync_info.as_ref().is_some_and(|s| s.snapshot_sync) && self .snapshot .as_ref() - .map_or(false, |s| match s.restoration_status() { + .is_some_and(|s| match s.restoration_status() { RestorationStatus::Ongoing { .. } | RestorationStatus::Initializing { .. } => { true } @@ -274,7 +273,8 @@ impl Informant { false => t, }; - info!(target: "import", "{}{} {} {} {}", + info!(target: "import", "#{} {} {} {} {} {}", + chain_info.best_block_number /* Block */, match importing { true => match snapshot_sync { false => format!("Syncing {} {} {} {}+{} Qed", @@ -315,15 +315,13 @@ impl Informant { None => String::new(), }, match sync_info.as_ref() { - Some(ref sync_info) => format!("{}{}/{} peers", + Some(sync_info) => format!("{}{}/{} peers", match importing { - true => format!("{}", - if self.target.executes_transactions() { + true => (if self.target.executes_transactions() { paint(Green.bold(), format!("{:>8} ", format!("LI:#{}", sync_info.last_imported_block_number))) } else { String::new() - } - ), + }).to_string(), false => match sync_info.last_imported_ancient_number { Some(number) => format!("{} ", paint(Yellow.bold(), format!("{:>8}", format!("AB:#{}", number)))), None => String::new(), @@ -334,9 +332,9 @@ impl Informant { ), _ => String::new(), }, - cache_sizes.display(Blue.bold(), &paint), + cache_sizes.display(Blue.bold(), paint), match rpc_stats { - Some(ref rpc_stats) => format!( + Some(rpc_stats) => format!( "RPC: {} conn, {} req/s, {} µs", paint(Blue.bold(), format!("{:2}", rpc_stats.sessions())), paint(Blue.bold(), format!("{:4}", rpc_stats.requests_rate())), diff --git a/bin/oe/lib.rs b/bin/oe/lib.rs index 7202dc0dfb..6be25db33c 100644 --- a/bin/oe/lib.rs +++ b/bin/oe/lib.rs @@ -25,7 +25,6 @@ extern crate atty; extern crate dir; extern crate futures; extern crate jsonrpc_core; -extern crate num_cpus; extern crate number_prefix; extern crate parking_lot; extern crate regex; @@ -51,7 +50,7 @@ extern crate ethcore_miner as miner; extern crate ethcore_network as network; extern crate ethcore_service; extern crate ethcore_sync as sync; -extern crate ethereum_types; +use ethereum_types; extern crate ethkey; extern crate ethstore; extern crate fetch; @@ -127,7 +126,8 @@ use crate::{ use std::alloc::System; pub use self::{configuration::Configuration, run::RunningClient}; -pub use ethcore_logger::{setup_log, Config as LoggerConfig, RotatingLogger}; +pub use ethcore::exit::ShutdownManager; +pub use ethcore_logger::{Config as LoggerConfig, RotatingLogger, setup_log}; pub use parity_rpc::PubSubSession; #[cfg(feature = "memory_profiling")] @@ -146,7 +146,18 @@ fn print_hash_of(maybe_file: Option) -> Result { } #[cfg(feature = "deadlock_detection")] -fn run_deadlock_detection_thread() { +#[cfg(feature = "shutdown-on-deadlock")] +fn on_deadlock_detected(shutdown: &Arc) { + warn!("Deadlock detected, trying to shutdown the node software"); + shutdown.demand_shutdown(); +} + +#[cfg(feature = "deadlock_detection")] +#[cfg(not(feature = "shutdown-on-deadlock"))] +fn on_deadlock_detected(_: &Arc) {} + +#[cfg(feature = "deadlock_detection")] +fn run_deadlock_detection_thread(shutdown: Arc) { use ansi_term::Style; use parking_lot::deadlock; use std::{thread, time::Duration}; @@ -155,25 +166,28 @@ fn run_deadlock_detection_thread() { let builder = std::thread::Builder::new().name("DeadlockDetection".to_string()); - // Create a background thread which checks for deadlocks every 10s - let spawned = builder.spawn(move || loop { - thread::sleep(Duration::from_secs(10)); - let deadlocks = deadlock::check_deadlock(); - if deadlocks.is_empty() { - continue; - } + // Create a background thread which checks for deadlocks + let spawned = builder.spawn(move || { + loop { + thread::sleep(Duration::from_secs(10)); + let deadlocks = deadlock::check_deadlock(); + if deadlocks.is_empty() { + continue; + } - warn!( - "{} {} detected", - deadlocks.len(), - Style::new().bold().paint("deadlock(s)") - ); - for (i, threads) in deadlocks.iter().enumerate() { - warn!("{} #{}", Style::new().bold().paint("Deadlock"), i); - for t in threads { - warn!("Thread Id {:#?}", t.thread_id()); - warn!("{:#?}", t.backtrace()); + warn!( + "{} {} detected", + deadlocks.len(), + Style::new().bold().paint("deadlock(s)") + ); + for (i, threads) in deadlocks.iter().enumerate() { + warn!("{} #{}", Style::new().bold().paint("Deadlock"), i); + for t in threads { + warn!("Thread Id {:#?}", t.thread_id()); + warn!("{:#?}", t.backtrace()); + } } + on_deadlock_detected(&shutdown); } }); @@ -194,18 +208,24 @@ pub enum ExecutionAction { Instant(Option), /// The client has started running and must be shut down manually by calling `shutdown`. - /// + /// /// If you don't call `shutdown()`, execution will continue in the background. Running(RunningClient), } -fn execute(command: Execute, logger: Arc) -> Result { +fn execute( + command: Execute, + logger: Arc, + shutdown: ShutdownManager, +) -> Result { + let shutdown_arc = Arc::new(shutdown); + #[cfg(feature = "deadlock_detection")] - run_deadlock_detection_thread(); + run_deadlock_detection_thread(shutdown_arc.clone()); match command.cmd { Cmd::Run(run_cmd) => { - let outcome = run::execute(run_cmd, logger)?; + let outcome = run::execute(run_cmd, logger, shutdown_arc)?; Ok(ExecutionAction::Running(outcome)) } Cmd::Version => Ok(ExecutionAction::Instant(Some(Args::print_version()))), @@ -250,6 +270,10 @@ fn execute(command: Execute, logger: Arc) -> Result) -> Result { - execute(conf.into_command()?, logger) +pub fn start( + conf: Configuration, + logger: Arc, + shutdown: ShutdownManager, +) -> Result { + execute(conf.into_command()?, logger, shutdown) } diff --git a/bin/oe/logger/Cargo.toml b/bin/oe/logger/Cargo.toml index 3e9655a252..3772996f50 100644 --- a/bin/oe/logger/Cargo.toml +++ b/bin/oe/logger/Cargo.toml @@ -4,6 +4,7 @@ name = "ethcore-logger" version = "1.12.0" license = "GPL-3.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] log = "0.4" @@ -12,6 +13,6 @@ atty = "0.2" lazy_static = "1.0" regex = "1.0" time = "0.1" -parking_lot = "0.11.1" -arrayvec = "0.4" +parking_lot = "0.12" +arrayvec = "0.7" ansi_term = "0.10" diff --git a/bin/oe/logger/src/lib.rs b/bin/oe/logger/src/lib.rs index 11465225a4..2ae056bb60 100644 --- a/bin/oe/logger/src/lib.rs +++ b/bin/oe/logger/src/lib.rs @@ -41,7 +41,7 @@ use std::{ thread, }; -pub use rotating::{init_log, RotatingLogger}; +pub use rotating::{RotatingLogger, init_log}; #[derive(Debug, PartialEq, Clone)] pub struct Config { diff --git a/bin/oe/logger/src/rotating.rs b/bin/oe/logger/src/rotating.rs index 7a00c85c3e..e2b0567ced 100644 --- a/bin/oe/logger/src/rotating.rs +++ b/bin/oe/logger/src/rotating.rs @@ -50,7 +50,7 @@ pub struct RotatingLogger { /// Defined logger levels levels: String, /// Logs array. Latest log is always at index 0 - logs: RwLock>, + logs: RwLock>, } impl RotatingLogger { @@ -59,7 +59,7 @@ impl RotatingLogger { pub fn new(levels: String) -> Self { RotatingLogger { levels: levels, - logs: RwLock::new(ArrayVec::<[_; LOG_SIZE]>::new()), + logs: RwLock::new(ArrayVec::<_, LOG_SIZE>::new()), } } @@ -78,7 +78,7 @@ impl RotatingLogger { } /// Return logs - pub fn logs(&self) -> RwLockReadGuard> { + pub fn logs(&self) -> RwLockReadGuard> { self.logs.read() } } diff --git a/bin/oe/main.rs b/bin/oe/main.rs index e9317e9ba6..c3d84b0ca6 100644 --- a/bin/oe/main.rs +++ b/bin/oe/main.rs @@ -33,31 +33,25 @@ extern crate ethcore_logger; #[cfg(windows)] extern crate winapi; +extern crate ethcore; + use std::{ io::Write, - process, + process::{self}, sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, }; use ansi_term::Colour; -use diamond_node::{start, ExecutionAction}; +use diamond_node::{ExecutionAction, ShutdownManager, start}; +use ethcore::exit::ExitStatus; use ethcore_logger::setup_log; use fdlimit::raise_fd_limit; use parity_daemonize::AsHandle; use parking_lot::{Condvar, Mutex}; -#[derive(Debug)] -/// Status used to exit or restart the program. -struct ExitStatus { - /// Whether the program panicked. - panicking: bool, - /// Whether the program should exit. - should_exit: bool, -} - fn main() -> Result<(), i32> { let conf = { let args = std::env::args().collect::>(); @@ -92,20 +86,17 @@ fn main() -> Result<(), i32> { // increase max number of open files raise_fd_limit(); - let exit = Arc::new(( - Mutex::new(ExitStatus { - panicking: false, - should_exit: false, - }), - Condvar::new(), - )); + //let lockMutex = + + let exit = Arc::new((Mutex::new(ExitStatus::new()), Condvar::new())); // Double panic can happen. So when we lock `ExitStatus` after the main thread is notified, it cannot be locked // again. let exiting = Arc::new(AtomicBool::new(false)); trace!(target: "mode", "Not hypervised: not setting exit handlers."); - let exec = start(conf, logger); + let shutdown = ShutdownManager::new(&exit); + let exec = start(conf, logger, shutdown); match exec { Ok(result) => match result { @@ -119,13 +110,10 @@ fn main() -> Result<(), i32> { let e = exit.clone(); let exiting = exiting.clone(); move |panic_msg| { - warn!("Panic occured, see stderr for details"); + warn!("Panic occured! {}", panic_msg); eprintln!("{}", panic_msg); if !exiting.swap(true, Ordering::SeqCst) { - *e.0.lock() = ExitStatus { - panicking: true, - should_exit: true, - }; + *e.0.lock() = ExitStatus::new_panicking(); e.1.notify_all(); } } @@ -136,20 +124,14 @@ fn main() -> Result<(), i32> { let exiting = exiting.clone(); move || { if !exiting.swap(true, Ordering::SeqCst) { - *e.0.lock() = ExitStatus { - panicking: false, - should_exit: true, - }; + *e.0.lock() = ExitStatus::new_should_exit(); e.1.notify_all(); } } }); - match res_set_handler { - Err(err) => { - warn!("could not setup ctrl+c handler: {:?}", err) - } - _ => {} + if let Err(err) = res_set_handler { + warn!("could not setup ctrl+c handler: {:?}", err) } // so the client has started successfully @@ -160,13 +142,13 @@ fn main() -> Result<(), i32> { // Wait for signal let mut lock = exit.0.lock(); - if !lock.should_exit { - let _ = exit.1.wait(&mut lock); + if !lock.should_exit() { + exit.1.wait(&mut lock); } client.shutdown(); - if lock.panicking { + if lock.is_panicking() { return Err(1); } } diff --git a/bin/oe/metrics.rs b/bin/oe/metrics.rs index 7c7119bcf7..76b9c1c09d 100644 --- a/bin/oe/metrics.rs +++ b/bin/oe/metrics.rs @@ -4,11 +4,11 @@ use crate::{futures::Future, rpc, rpc_apis}; use parking_lot::Mutex; -use hyper::{service::service_fn_ok, Body, Method, Request, Response, Server, StatusCode}; +use hyper::{Body, Method, Request, Response, Server, StatusCode, service::service_fn_ok}; use stats::{ - prometheus::{self, Encoder}, PrometheusMetrics, PrometheusRegistry, + prometheus::{self, Encoder}, }; #[derive(Debug, Clone, PartialEq)] @@ -59,6 +59,8 @@ fn handle_request( elapsed.as_millis() as i64, ); + reg.register_version(); + let mut buffer = vec![]; let encoder = prometheus::TextEncoder::new(); let metric_families = reg.registry().gather(); diff --git a/bin/oe/modules.rs b/bin/oe/modules.rs index d88a8b51a9..19bd211756 100644 --- a/bin/oe/modules.rs +++ b/bin/oe/modules.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use std::sync::{mpsc, Arc}; +use std::sync::{Arc, mpsc}; use crate::{ sync::{self, ConnectionFilter, NetworkConfiguration, Params, SyncConfig}, diff --git a/bin/oe/params.rs b/bin/oe/params.rs index af2747ab17..e3829d3d03 100644 --- a/bin/oe/params.rs +++ b/bin/oe/params.rs @@ -36,8 +36,9 @@ use parity_version::version_data; use crate::configuration; -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Default)] pub enum SpecType { + #[default] Foundation, Poanet, Xdai, @@ -58,12 +59,6 @@ pub enum SpecType { Custom(String), } -impl Default for SpecType { - fn default() -> Self { - SpecType::Foundation - } -} - impl str::FromStr for SpecType { type Err = String; @@ -155,18 +150,13 @@ impl SpecType { } } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Default)] pub enum Pruning { Specific(Algorithm), + #[default] Auto, } -impl Default for Pruning { - fn default() -> Self { - Pruning::Auto - } -} - impl str::FromStr for Pruning { type Err = String; @@ -214,10 +204,7 @@ impl str::FromStr for ResealPolicy { x => return Err(format!("Invalid reseal value: {}", x)), }; - let reseal = ResealPolicy { - own: own, - external: external, - }; + let reseal = ResealPolicy { own, external }; Ok(reseal) } @@ -276,8 +263,8 @@ impl GasPricerConfig { ref api_endpoint, } => GasPricer::new_calibrated(GasPriceCalibrator::new( GasPriceCalibratorOptions { - usd_per_tx: usd_per_tx, - recalibration_period: recalibration_period, + usd_per_tx, + recalibration_period, }, fetch, p, @@ -311,22 +298,17 @@ impl Default for MinerExtras { } /// 3-value enum. -#[derive(Debug, Clone, Copy, PartialEq)] +#[derive(Debug, Clone, Copy, PartialEq, Default)] pub enum Switch { /// True. On, /// False. Off, /// Auto. + #[default] Auto, } -impl Default for Switch { - fn default() -> Self { - Switch::Auto - } -} - impl str::FromStr for Switch { type Err = String; @@ -357,13 +339,12 @@ pub fn fatdb_switch_to_bool( user_defaults: &UserDefaults, _algorithm: Algorithm, ) -> Result { - let result = match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { + match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { (false, Switch::On, false) => Err("FatDB resync required".into()), (_, Switch::On, _) => Ok(true), (_, Switch::Off, _) => Ok(false), (_, Switch::Auto, def) => Ok(def), - }; - result + } } pub fn mode_switch_to_bool( @@ -375,7 +356,7 @@ pub fn mode_switch_to_bool( #[cfg(test)] mod tests { - use super::{tracing_switch_to_bool, Pruning, ResealPolicy, SpecType, Switch}; + use super::{Pruning, ResealPolicy, SpecType, Switch, tracing_switch_to_bool}; use crate::user_defaults::UserDefaults; use journaldb::Algorithm; diff --git a/bin/oe/presale.rs b/bin/oe/presale.rs index c44c9fd1cf..188bbf89bb 100644 --- a/bin/oe/presale.rs +++ b/bin/oe/presale.rs @@ -15,10 +15,10 @@ // along with OpenEthereum. If not, see . use crate::{ + crypto::publickey, helpers::{password_from_file, password_prompt}, params::SpecType, }; -use crypto::publickey; use ethkey::Password; use ethstore::PresaleWallet; @@ -50,8 +50,8 @@ pub fn execute(cmd: ImportWallet) -> Result { #[cfg(feature = "accounts")] pub fn import_account(cmd: &ImportWallet, kp: publickey::KeyPair, password: Password) { - use accounts::{AccountProvider, AccountProviderSettings}; - use ethstore::{accounts_dir::RootDiskDirectory, EthStore}; + use crate::accounts::{AccountProvider, AccountProviderSettings}; + use ethstore::{EthStore, accounts_dir::RootDiskDirectory}; let dir = Box::new(RootDiskDirectory::create(cmd.path.clone()).unwrap()); let secret_store = Box::new(EthStore::open_with_iterations(dir, cmd.iterations).unwrap()); diff --git a/bin/oe/reserved_peer_management.rs b/bin/oe/reserved_peer_management.rs index 2d9ba32d37..54b7ed3b87 100644 --- a/bin/oe/reserved_peer_management.rs +++ b/bin/oe/reserved_peer_management.rs @@ -43,18 +43,18 @@ impl ReservedPeersManagement for ReservedPeersWrapper { // this remove should never fail, because we check just before self.current_reserved_peers.remove(peer); } - return remove_result - .map_err(|_e| format!("remove_reserved_peer failed for peer: {peer}")); + remove_result + .map_err(|_e| format!("remove_reserved_peer failed for peer: {peer}")) } None => { warn!("ManageNetwork instance not available."); - return Err("ManageNetwork instance not available.".to_string()); + Err("ManageNetwork instance not available.".to_string()) } } } else { - return Err(format!( + Err(format!( "Cannot remove reserved Peer: Peer not reserved: {peer}" - )); + )) } } @@ -82,7 +82,7 @@ impl ReservedPeersManagement for ReservedPeersWrapper { } } - return disconnected; + disconnected } /// Returns the devp2p network endpoint IP and Port information that is used to communicate with other peers. @@ -100,13 +100,15 @@ impl ReservedPeersManagement for ReservedPeersWrapper { #[cfg(test)] mod tests { use super::*; - use network::{NetworkContext, ProtocolId}; + use crate::{ + network::{NetworkContext, ProtocolId}, + sync::ManageNetwork, + }; use std::{ net::{Ipv4Addr, SocketAddrV4}, ops::RangeInclusive, sync::Arc, }; - use sync::ManageNetwork; pub struct TestManageNetwork; diff --git a/bin/oe/rpc.rs b/bin/oe/rpc.rs index d098b27763..8f49aed808 100644 --- a/bin/oe/rpc.rs +++ b/bin/oe/rpc.rs @@ -23,17 +23,16 @@ use crate::{ use dir::{default_data_path, helpers::replace_home}; use jsonrpc_core::MetaIoHandler; use parity_rpc::{ - self as rpc, + self as rpc, DomainsValidation, Metadata, informant::{Middleware, RpcStats}, - DomainsValidation, Metadata, }; use parity_runtime::Executor; -pub use parity_rpc::{HttpServer, IpcServer, RequestMiddleware}; +pub use parity_rpc::{HttpServer, IpcServer}; //pub use parity_rpc::ws::Server as WsServer; -pub use parity_rpc::ws::{ws, Server as WsServer}; +pub use parity_rpc::ws::{Server as WsServer, ws}; -pub const DAPPS_DOMAIN: &'static str = "web3.site"; +pub const DAPPS_DOMAIN: &str = "web3.site"; #[derive(Debug, Clone, PartialEq)] pub struct HttpConfiguration { @@ -195,8 +194,8 @@ pub fn new_ws( allowed_origins, allowed_hosts, conf.max_connections, - rpc::WsExtractor::new(path.clone()), - rpc::WsExtractor::new(path.clone()), + rpc::WsExtractor::new(path), + rpc::WsExtractor::new(path), rpc::WsStats::new(deps.stats.clone()), conf.max_payload, ); @@ -209,14 +208,16 @@ pub fn new_ws( // Err(e) => Err(format!("WebSockets error: {:?}", e)), // } match start_result { - Ok(server) => Ok(Some(server)), - Err(rpc::ws::Error::WsError(ws::Error { - kind: ws::ErrorKind::Io(ref err), .. - })) if err.kind() == io::ErrorKind::AddrInUse => Err( - format!("WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", url) - ), - Err(e) => Err(format!("WebSockets error: {:?}", e)), - } + Ok(server) => Ok(Some(server)), + Err(rpc::ws::Error::WsError(ws::Error { + kind: ws::ErrorKind::Io(ref err), + .. + })) if err.kind() == io::ErrorKind::AddrInUse => Err(format!( + "WebSockets address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --ws-port and --ws-interface options.", + url + )), + Err(e) => Err(format!("WebSockets error: {:?}", e)), + } } pub fn new_http( @@ -253,12 +254,13 @@ pub fn new_http( ); match start_result { - Ok(server) => Ok(Some(server)), - Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err( - format!("{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", id, url, options, options) - ), - Err(e) => Err(format!("{} error: {:?}", id, e)), - } + Ok(server) => Ok(Some(server)), + Err(ref err) if err.kind() == io::ErrorKind::AddrInUse => Err(format!( + "{} address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --{}-port and --{}-interface options.", + id, url, options, options + )), + Err(e) => Err(format!("{} error: {:?}", id, e)), + } } pub fn new_ipc( @@ -275,7 +277,7 @@ pub fn new_ipc( // Windows pipe paths are not on the FS. if !cfg!(windows) { if let Some(dir) = path.parent() { - ::std::fs::create_dir_all(&dir).map_err(|err| { + ::std::fs::create_dir_all(dir).map_err(|err| { format!( "Unable to create IPC directory at {}: {}", dir.display(), @@ -314,7 +316,7 @@ fn with_domain( items.insert(host.to_string()); items.insert(host.replace("127.0.0.1", "localhost")); items.insert(format!("http://*.{}", domain)); //proxypac - if let Some(port) = extract_port(&*host) { + if let Some(port) = extract_port(&host) { items.insert(format!("http://*.{}:{}", domain, port)); } } diff --git a/bin/oe/rpc_apis.rs b/bin/oe/rpc_apis.rs index 50db479cb6..a024714e57 100644 --- a/bin/oe/rpc_apis.rs +++ b/bin/oe/rpc_apis.rs @@ -33,9 +33,9 @@ use ethcore_logger::RotatingLogger; use fetch::Client as FetchClient; use jsonrpc_core::{self as core, MetaIoHandler}; use parity_rpc::{ + Host, Metadata, NetworkSettings, dispatch::FullDispatcher, informant::{ActivityNotifier, ClientNotifier}, - Host, Metadata, NetworkSettings, }; use parity_runtime::Executor; use parking_lot::Mutex; @@ -99,9 +99,10 @@ impl FromStr for Api { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub enum ApiSet { // Unsafe context (like jsonrpc over http) + #[default] UnsafeContext, // All possible APIs (safe context like token-protected WS interface) All, @@ -113,12 +114,6 @@ pub enum ApiSet { List(HashSet), } -impl Default for ApiSet { - fn default() -> Self { - ApiSet::UnsafeContext - } -} - impl PartialEq for ApiSet { fn eq(&self, other: &Self) -> bool { self.list_apis() == other.list_apis() @@ -219,6 +214,7 @@ pub struct FullDependencies { pub client: Arc, pub snapshot: Arc, pub sync: Arc, + #[allow(dead_code)] pub net: Arc, pub accounts: Arc, pub miner: Arc, @@ -265,7 +261,7 @@ impl FullDependencies { handler.extend_with(DebugClient::new(self.client.clone()).to_delegate()); } Api::Web3 => { - handler.extend_with(Web3Client::default().to_delegate()); + handler.extend_with(Web3Client.to_delegate()); } Api::Net => { handler.extend_with(NetClient::new(&self.sync).to_delegate()); @@ -411,7 +407,7 @@ impl FullDependencies { } Api::Traces => handler.extend_with(TracesClient::new(&self.client).to_delegate()), Api::Rpc => { - let modules = to_modules(&apis); + let modules = to_modules(apis); handler.extend_with(RpcClient::new(modules).to_delegate()); } Api::SecretStore => { diff --git a/bin/oe/run.rs b/bin/oe/run.rs index 23bc9817a3..92dc4feb52 100644 --- a/bin/oe/run.rs +++ b/bin/oe/run.rs @@ -17,7 +17,7 @@ use std::{ any::Any, str::FromStr, - sync::{atomic, Arc, Weak}, + sync::{Arc, Weak, atomic}, thread, time::{Duration, Instant}, }; @@ -28,16 +28,16 @@ use crate::{ db, helpers::{execute_upgrades, passwords_from_files, to_client_config}, informant::{FullNodeInformantData, Informant}, - metrics::{start_prometheus_metrics, MetricsConfiguration}, + metrics::{MetricsConfiguration, start_prometheus_metrics}, miner::{external::ExternalMiner, work_notify::WorkPoster}, modules, params::{ - fatdb_switch_to_bool, mode_switch_to_bool, tracing_switch_to_bool, AccountsConfig, - GasPricerConfig, MinerExtras, Pruning, SpecType, Switch, + AccountsConfig, GasPricerConfig, MinerExtras, Pruning, SpecType, Switch, + fatdb_switch_to_bool, mode_switch_to_bool, tracing_switch_to_bool, }, reserved_peer_management::ReservedPeersWrapper, rpc, rpc_apis, secretstore, signer, - sync::{self, SyncConfig, SyncProvider}, + sync::{self, SyncConfig, SyncProvider, SyncState}, user_defaults::UserDefaults, }; use ansi_term::Colour; @@ -46,7 +46,8 @@ use ethcore::{ client::{ BlockChainClient, BlockInfo, ChainSyncing, Client, DatabaseCompactionProfile, Mode, VMType, }, - miner::{self, stratum, Miner, MinerOptions, MinerService}, + exit::ShutdownManager, + miner::{self, Miner, MinerOptions, MinerService, stratum}, snapshot::{self, SnapshotConfiguration}, verification::queue::VerifierSettings, }; @@ -55,7 +56,7 @@ use ethcore_service::ClientService; use ethereum_types::{H256, U64}; use journaldb::Algorithm; use node_filter::NodeFilter; -use parity_rpc::{informant, is_major_importing, NetworkSettings}; +use parity_rpc::{NetworkSettings, informant, is_major_importing}; use parity_runtime::Runtime; use parity_version::version; @@ -112,6 +113,7 @@ pub struct RunCmd { pub no_persistent_txqueue: bool, pub max_round_blocks_to_import: usize, pub metrics_conf: MetricsConfiguration, + pub shutdown_on_missing_block_import: Option, } // node info fetcher for the local store. @@ -152,9 +154,24 @@ impl ChainSyncing for SyncProviderWrapper { Some(client_arc) => { is_major_importing(Some(sync_arc.status().state), client_arc.queue_info()) } - None => true, + None => { + debug!(target: "sync", "is_major_syncing: Client has been destroyed."); + true + } }, // We also indicate the "syncing" state when the SyncProvider has already been destroyed. + None => { + debug!(target: "sync", "is_major_syncing: sync_provider has been destroyed."); + true + } + } + } + + /// are we syncing in any means ? + fn is_syncing(&self) -> bool { + match self.sync_provider.upgrade() { + Some(sync_arc) => sync_arc.status().state != SyncState::Idle, + // We also indicate the "syncing" state when the SyncProvider has already been destroyed. None => true, } } @@ -163,7 +180,11 @@ impl ChainSyncing for SyncProviderWrapper { /// Executes the given run command. /// /// On error, returns what to print on stderr. -pub fn execute(cmd: RunCmd, logger: Arc) -> Result { +pub fn execute( + cmd: RunCmd, + logger: Arc, + shutdown: Arc, +) -> Result { // load spec let spec = cmd.spec.spec(&cmd.dirs.cache)?; @@ -209,7 +230,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result) -> Result) -> Result) -> Result) -> Result) -> Result { info!("Finishing work, please wait..."); + + // this is a backup thread that exits the process after 90 seconds, + // in case the shutdown routine does not finish in time. + std::thread::Builder::new() + .name("diamond-node-force-quit".to_string()) + .spawn(move || { + + let duration_soft = 5; + // we make a force quit if after 90 seconds, if this shutdown routine + std::thread::sleep(Duration::from_secs(duration_soft)); + warn!(target: "shutdown", "shutdown not happened within {duration_soft} seconds, starting force exiting the process."); + std::thread::sleep(Duration::from_secs(1)); + std::process::exit(1); + }) + .expect("Failed to spawn Force shutdown thread"); + // Create a weak reference to the client so that we can wait on shutdown // until it is dropped let weak_client = Arc::downgrade(&client); @@ -738,8 +777,8 @@ fn print_running_environment(data_dir: &str, dirs: &Directories, db_dirs: &Datab fn wait_for_drop(w: Weak) { const SLEEP_DURATION: Duration = Duration::from_secs(1); - const WARN_TIMEOUT: Duration = Duration::from_secs(60); - const MAX_TIMEOUT: Duration = Duration::from_secs(300); + const WARN_TIMEOUT: Duration = Duration::from_secs(30); + const MAX_TIMEOUT: Duration = Duration::from_secs(60); let instant = Instant::now(); let mut warned = false; diff --git a/bin/oe/secretstore.rs b/bin/oe/secretstore.rs index d62cd844f2..c000e5cf89 100644 --- a/bin/oe/secretstore.rs +++ b/bin/oe/secretstore.rs @@ -14,8 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use crate::{account_utils::AccountProvider, sync::SyncProvider}; -use crypto::publickey::{Public, Secret}; +use crate::{ + account_utils::AccountProvider, + crypto::publickey::{Public, Secret}, + sync::SyncProvider, +}; use dir::{default_data_path, helpers::replace_home}; use ethcore::{client::Client, miner::Miner}; use ethereum_types::Address; @@ -84,6 +87,9 @@ pub struct Configuration { } /// Secret store dependencies +/// TODO: The compiler complains that none of the struct members are ever used +/// Remove this struct and all its dependencies +#[allow(dead_code)] pub struct Dependencies<'a> { /// Blockchain client. pub client: Arc, @@ -329,5 +335,5 @@ pub fn start( return Ok(None); } - KeyServer::new(conf, deps, executor).map(|s| Some(s)) + KeyServer::new(conf, deps, executor).map(Some) } diff --git a/bin/oe/signer.rs b/bin/oe/signer.rs index ff30c11f2b..0630a3d221 100644 --- a/bin/oe/signer.rs +++ b/bin/oe/signer.rs @@ -22,12 +22,10 @@ use std::{ use crate::{path::restrict_permissions_owner, rpc, rpc_apis}; use ansi_term::Colour::White; use ethcore_logger::Config as LogConfig; -use parity_rpc; -pub const CODES_FILENAME: &'static str = "authcodes"; +pub const CODES_FILENAME: &str = "authcodes"; pub struct NewToken { - pub token: String, pub message: String, } @@ -70,7 +68,6 @@ pub fn generate_token_and_url( }; Ok(NewToken { - token: code.clone(), message: format!( r#" Generated token: @@ -91,7 +88,7 @@ fn generate_new_token(path: &Path, logger_config_color: bool) -> io::Result format!("{}", White.bold().paint(&code[..])), - false => format!("{}", &code[..]), + false => code[..].to_string(), } ); Ok(code) diff --git a/bin/oe/snapshot.rs b/bin/oe/snapshot.rs index 3ed7f0f24c..e56f34cca1 100644 --- a/bin/oe/snapshot.rs +++ b/bin/oe/snapshot.rs @@ -25,11 +25,12 @@ use std::{ use crate::{hash::keccak, types::ids::BlockId}; use ethcore::{ client::{DatabaseCompactionProfile, Mode, VMType}, + exit::ShutdownManager, miner::Miner, snapshot::{ + Progress, RestorationStatus, SnapshotConfiguration, SnapshotService as SS, io::{PackedReader, PackedWriter, SnapshotReader}, service::Service as SnapshotService, - Progress, RestorationStatus, SnapshotConfiguration, SnapshotService as SS, }, }; use ethcore_service::ClientService; @@ -38,7 +39,7 @@ use crate::{ cache::CacheConfig, db, helpers::{execute_upgrades, to_client_config}, - params::{fatdb_switch_to_bool, tracing_switch_to_bool, Pruning, SpecType, Switch}, + params::{Pruning, SpecType, Switch, fatdb_switch_to_bool, tracing_switch_to_bool}, user_defaults::UserDefaults, }; use dir::Directories; @@ -220,6 +221,7 @@ impl SnapshotCommand { self.pruning_memory, true, self.max_round_blocks_to_import, + None, ); client_config.snapshot = self.snapshot_conf; @@ -239,6 +241,7 @@ impl SnapshotCommand { // TODO [ToDr] don't use test miner here // (actually don't require miner at all) Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ShutdownManager::null()), ) .map_err(|e| format!("Client service error: {:?}", e))?; @@ -317,7 +320,7 @@ impl SnapshotCommand { } }); - if let Err(e) = service.client().take_snapshot(writer, block_at, &*progress) { + if let Err(e) = service.client().take_snapshot(writer, block_at, &progress) { let _ = ::std::fs::remove_file(&file_path); return Err(format!( "Encountered fatal error while creating snapshot: {}", diff --git a/bin/oe/upgrade.rs b/bin/oe/upgrade.rs index c1a38d2c26..bc7e413353 100644 --- a/bin/oe/upgrade.rs +++ b/bin/oe/upgrade.rs @@ -16,17 +16,18 @@ //! Parity upgrade logic -use dir::{default_data_path, helpers::replace_home, home_dir, DatabaseDirectories}; +use dir::{DatabaseDirectories, default_data_path, helpers::replace_home, home_dir}; use journaldb::Algorithm; use semver::{SemVerError, Version}; use std::{ collections::*, - fs::{self, create_dir_all, File}, + fs::{self, File, create_dir_all}, io, io::{Read, Write}, path::{Path, PathBuf}, }; +#[allow(dead_code)] #[derive(Debug)] pub enum Error { CannotCreateConfigPath(io::Error), @@ -41,7 +42,7 @@ impl From for Error { } } -const CURRENT_VERSION: &'static str = env!("CARGO_PKG_VERSION"); +const CURRENT_VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Hash, PartialEq, Eq)] struct UpgradeKey { @@ -133,11 +134,11 @@ where } pub fn upgrade(db_path: &str) -> Result { - with_locked_version(db_path, |ver| upgrade_from_version(ver)) + with_locked_version(db_path, upgrade_from_version) } fn file_exists(path: &Path) -> bool { - match fs::metadata(&path) { + match fs::metadata(path) { Err(ref e) if e.kind() == io::ErrorKind::NotFound => false, _ => true, } @@ -145,12 +146,12 @@ fn file_exists(path: &Path) -> bool { #[cfg(any(test, feature = "accounts"))] pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) { - match fs::create_dir_all(&to).and_then(|()| fs::read_dir(from)) { + match fs::create_dir_all(to).and_then(|()| fs::read_dir(from)) { Ok(entries) => { let files: Vec<_> = entries .filter_map(|f| { f.ok().and_then(|f| { - if f.file_type().ok().map_or(false, |f| f.is_file()) { + if f.file_type().ok().is_some_and(|f| f.is_file()) { f.file_name().to_str().map(|s| s.to_owned()) } else { None @@ -190,11 +191,11 @@ pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) { } fn upgrade_dir_location(source: &PathBuf, dest: &PathBuf) { - if file_exists(&source) { - if !file_exists(&dest) { + if file_exists(source) { + if !file_exists(dest) { let mut parent = dest.clone(); parent.pop(); - if let Err(e) = fs::create_dir_all(&parent).and_then(|()| fs::rename(&source, &dest)) { + if let Err(e) = fs::create_dir_all(&parent).and_then(|()| fs::rename(source, dest)) { debug!("Skipped path {:?} -> {:?} :{:?}", source, dest, e); } else { info!( @@ -242,5 +243,5 @@ pub fn upgrade_data_paths(base_path: &str, dirs: &DatabaseDirectories, pruning: upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning)); upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path()); upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path()); - upgrade_user_defaults(&dirs); + upgrade_user_defaults(dirs); } diff --git a/bin/oe/user_defaults.rs b/bin/oe/user_defaults.rs index 95fa66a78e..33b8362c36 100644 --- a/bin/oe/user_defaults.rs +++ b/bin/oe/user_defaults.rs @@ -41,9 +41,9 @@ impl From for Seconds { } } -impl Into for Seconds { - fn into(self) -> Duration { - self.0 +impl From for Duration { + fn from(val: Seconds) -> Self { + val.0 } } @@ -77,9 +77,9 @@ pub enum Mode { Offline, } -impl Into for Mode { - fn into(self) -> ClientMode { - match self { +impl From for ClientMode { + fn from(val: Mode) -> Self { + match val { Mode::Active => ClientMode::Active, Mode::Passive { timeout, alarm } => ClientMode::Passive(timeout.into(), alarm.into()), Mode::Dark { timeout } => ClientMode::Dark(timeout.into()), @@ -127,7 +127,7 @@ impl UserDefaults { mod algorithm_serde { use journaldb::Algorithm; - use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; + use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; pub fn serialize(algorithm: &Algorithm, serializer: S) -> Result where diff --git a/crates/accounts/Cargo.toml b/crates/accounts/Cargo.toml index a7282ecf8c..4a94992f09 100644 --- a/crates/accounts/Cargo.toml +++ b/crates/accounts/Cargo.toml @@ -1,6 +1,6 @@ [package] -description = "OpenEthereum Account Management" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node Account Management" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore-accounts" version = "0.1.0" @@ -13,7 +13,7 @@ ethkey = { path = "ethkey" } ethstore = { path = "ethstore" } log = "0.4" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } -parking_lot = "0.11.1" +parking_lot = "0.12" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" diff --git a/crates/accounts/ethkey/Cargo.toml b/crates/accounts/ethkey/Cargo.toml index 5b32bbaa2e..3b2d7d0ce9 100644 --- a/crates/accounts/ethkey/Cargo.toml +++ b/crates/accounts/ethkey/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum Keys Generator" name = "ethkey" version = "0.3.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] edit-distance = "2.0" diff --git a/crates/accounts/ethkey/src/brain.rs b/crates/accounts/ethkey/src/brain.rs index 2cb6806fc5..d679809984 100644 --- a/crates/accounts/ethkey/src/brain.rs +++ b/crates/accounts/ethkey/src/brain.rs @@ -15,11 +15,11 @@ // along with OpenEthereum. If not, see . use parity_crypto::{ - publickey::{KeyPair, Secret}, Keccak256, + publickey::{KeyPair, Secret}, }; -use parity_wordlist; +use crate::{WordlistError, parity_wordlist}; /// Simple brainwallet. pub struct Brain(String); @@ -28,7 +28,7 @@ impl Brain { Brain(s) } - pub fn validate_phrase(phrase: &str, expected_words: usize) -> Result<(), ::WordlistError> { + pub fn validate_phrase(phrase: &str, expected_words: usize) -> Result<(), WordlistError> { parity_wordlist::validate_phrase(phrase, expected_words) } @@ -57,7 +57,7 @@ impl Brain { #[cfg(test)] mod tests { - use Brain; + use crate::Brain; #[test] fn test_brain() { diff --git a/crates/accounts/ethkey/src/brain_prefix.rs b/crates/accounts/ethkey/src/brain_prefix.rs index 819dce564e..d7226eabcb 100644 --- a/crates/accounts/ethkey/src/brain_prefix.rs +++ b/crates/accounts/ethkey/src/brain_prefix.rs @@ -56,7 +56,7 @@ impl BrainPrefix { #[cfg(test)] mod tests { - use BrainPrefix; + use crate::BrainPrefix; #[test] fn prefix_generator() { diff --git a/crates/accounts/ethkey/src/prefix.rs b/crates/accounts/ethkey/src/prefix.rs index e037c41fe2..4233ec8fc1 100644 --- a/crates/accounts/ethkey/src/prefix.rs +++ b/crates/accounts/ethkey/src/prefix.rs @@ -41,7 +41,7 @@ impl Prefix { #[cfg(test)] mod tests { - use Prefix; + use crate::Prefix; #[test] fn prefix_generator() { diff --git a/crates/accounts/ethstore/Cargo.toml b/crates/accounts/ethstore/Cargo.toml index 5405540592..d526be59d8 100644 --- a/crates/accounts/ethstore/Cargo.toml +++ b/crates/accounts/ethstore/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum Key Management" name = "ethstore" version = "0.2.1" authors = ["Parity Technologies "] +edition = "2018" [dependencies] log = "0.4" @@ -15,10 +16,10 @@ serde_derive = "1.0" rustc-hex = "1.0" time = "0.1.34" itertools = "0.5" -parking_lot = "0.11.1" +parking_lot = "0.12" parity-crypto = { version = "0.6.2", features = [ "publickey"] } ethereum-types = "0.9.2" -smallvec = "0.6" +smallvec = "0.6.14" parity-wordlist = "1.3" tempdir = "0.3" lazy_static = "1.2.0" diff --git a/crates/accounts/ethstore/src/account/cipher.rs b/crates/accounts/ethstore/src/account/cipher.rs index ba3c1e7056..2407f10531 100644 --- a/crates/accounts/ethstore/src/account/cipher.rs +++ b/crates/accounts/ethstore/src/account/cipher.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use json; +use crate::json; #[derive(Debug, PartialEq, Clone)] pub struct Aes128Ctr { diff --git a/crates/accounts/ethstore/src/account/crypto.rs b/crates/accounts/ethstore/src/account/crypto.rs index ea5149c8ff..bb021fbbdf 100644 --- a/crates/accounts/ethstore/src/account/crypto.rs +++ b/crates/accounts/ethstore/src/account/crypto.rs @@ -14,14 +14,16 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use account::{Aes128Ctr, Cipher, Kdf, Pbkdf2, Prf}; +use crate::{ + account::{Aes128Ctr, Cipher, Kdf, Pbkdf2, Prf}, + json, + random::Random, + Error, +}; use crypto::{self, publickey::Secret, Keccak256}; use ethkey::Password; -use json; -use random::Random; use smallvec::SmallVec; use std::{num::NonZeroU32, str}; -use Error; /// Encrypted data #[derive(Debug, PartialEq, Clone)] diff --git a/crates/accounts/ethstore/src/account/kdf.rs b/crates/accounts/ethstore/src/account/kdf.rs index 2841f17767..080dc15f53 100644 --- a/crates/accounts/ethstore/src/account/kdf.rs +++ b/crates/accounts/ethstore/src/account/kdf.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use json; +use crate::json; use std::num::NonZeroU32; #[derive(Debug, PartialEq, Clone)] diff --git a/crates/accounts/ethstore/src/account/mod.rs b/crates/accounts/ethstore/src/account/mod.rs index b1757474f5..ceb8116ac4 100644 --- a/crates/accounts/ethstore/src/account/mod.rs +++ b/crates/accounts/ethstore/src/account/mod.rs @@ -23,7 +23,7 @@ mod version; pub use self::{ cipher::{Aes128Ctr, Cipher}, crypto::Crypto, - kdf::{Kdf, Pbkdf2, Prf, Scrypt}, + kdf::{Kdf, Pbkdf2, Prf}, safe_account::SafeAccount, version::Version, }; diff --git a/crates/accounts/ethstore/src/account/safe_account.rs b/crates/accounts/ethstore/src/account/safe_account.rs index 89b4d17a51..66fa807e33 100644 --- a/crates/accounts/ethstore/src/account/safe_account.rs +++ b/crates/accounts/ethstore/src/account/safe_account.rs @@ -15,15 +15,13 @@ // along with OpenEthereum. If not, see . use super::crypto::Crypto; -use account::Version; +use crate::{account::Version, json, Error}; use crypto::{ self, publickey::{ecdh::agree, sign, Address, KeyPair, Message, Public, Secret, Signature}, }; use ethkey::Password; -use json; use std::num::NonZeroU32; -use Error; /// Account representation. #[derive(Debug, PartialEq, Clone)] diff --git a/crates/accounts/ethstore/src/account/version.rs b/crates/accounts/ethstore/src/account/version.rs index 514873868f..b68e258128 100644 --- a/crates/accounts/ethstore/src/account/version.rs +++ b/crates/accounts/ethstore/src/account/version.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use json; +use crate::json; #[derive(Debug, PartialEq, Clone)] pub enum Version { diff --git a/crates/accounts/ethstore/src/accounts_dir/disk.rs b/crates/accounts/ethstore/src/accounts_dir/disk.rs index 13a8b7c288..78346f715b 100644 --- a/crates/accounts/ethstore/src/accounts_dir/disk.rs +++ b/crates/accounts/ethstore/src/accounts_dir/disk.rs @@ -18,8 +18,11 @@ use super::{ vault::{VaultDiskDirectory, VAULT_FILE_NAME}, KeyDirectory, VaultKey, VaultKeyDirectory, VaultKeyDirectoryProvider, }; +use crate::{ + json::{self, Uuid}, + Error, SafeAccount, +}; use ethkey::Password; -use json::{self, Uuid}; use std::{ collections::HashMap, fs, io, @@ -27,8 +30,6 @@ use std::{ path::{Path, PathBuf}, }; use time; -use Error; -use SafeAccount; const IGNORED_FILES: &'static [&'static str] = &[ "thumbs.db", @@ -59,7 +60,7 @@ pub fn find_unique_filename_using_random_suffix( )); } - let suffix = ::random::random_string(4); + let suffix = crate::random::random_string(4); deduped_filename = format!("{}-{}", original_filename, suffix); path.set_file_name(&deduped_filename); retries += 1; @@ -419,7 +420,7 @@ mod test { use self::tempdir::TempDir; use super::{KeyDirectory, RootDiskDirectory, VaultKey}; - use account::SafeAccount; + use crate::account::SafeAccount; use crypto::publickey::{Generator, Random}; use std::{env, fs, num::NonZeroU32}; diff --git a/crates/accounts/ethstore/src/accounts_dir/memory.rs b/crates/accounts/ethstore/src/accounts_dir/memory.rs index 73f48ef7c3..c03ab5c3ff 100644 --- a/crates/accounts/ethstore/src/accounts_dir/memory.rs +++ b/crates/accounts/ethstore/src/accounts_dir/memory.rs @@ -20,8 +20,7 @@ use parking_lot::RwLock; use std::collections::HashMap; use super::KeyDirectory; -use Error; -use SafeAccount; +use crate::{Error, SafeAccount}; /// Accounts in-memory storage. #[derive(Default)] diff --git a/crates/accounts/ethstore/src/accounts_dir/mod.rs b/crates/accounts/ethstore/src/accounts_dir/mod.rs index 29fc50f2b0..d6e4337f35 100644 --- a/crates/accounts/ethstore/src/accounts_dir/mod.rs +++ b/crates/accounts/ethstore/src/accounts_dir/mod.rs @@ -16,10 +16,9 @@ //! Accounts Directory +use crate::{Error, SafeAccount}; use ethkey::Password; use std::{num::NonZeroU32, path::PathBuf}; -use Error; -use SafeAccount; mod disk; mod memory; diff --git a/crates/accounts/ethstore/src/accounts_dir/vault.rs b/crates/accounts/ethstore/src/accounts_dir/vault.rs index 3c4cf3f5ad..b981bf00d2 100644 --- a/crates/accounts/ethstore/src/accounts_dir/vault.rs +++ b/crates/accounts/ethstore/src/accounts_dir/vault.rs @@ -19,15 +19,13 @@ use super::{ disk::{self, DiskDirectory, KeyFileManager}, KeyDirectory, SetKeyError, VaultKey, VaultKeyDirectory, }; +use crate::{json, Error, SafeAccount}; use crypto::Keccak256; -use json; use parking_lot::Mutex; use std::{ fs, io, path::{Path, PathBuf}, }; -use Error; -use SafeAccount; /// Name of vault metadata file pub const VAULT_FILE_NAME: &'static str = "vault.json"; diff --git a/crates/accounts/ethstore/src/ethkey.rs b/crates/accounts/ethstore/src/ethkey.rs index aed0695bb3..3d8671acab 100644 --- a/crates/accounts/ethstore/src/ethkey.rs +++ b/crates/accounts/ethstore/src/ethkey.rs @@ -15,9 +15,9 @@ // along with OpenEthereum. If not, see . //! ethkey reexport to make documentation look pretty. +use crate::json; pub use _ethkey::*; pub use crypto::publickey::Address; -use json; impl Into for Address { fn into(self) -> json::H160 { diff --git a/crates/accounts/ethstore/src/ethstore.rs b/crates/accounts/ethstore/src/ethstore.rs index bf674e4581..9621a796e1 100644 --- a/crates/accounts/ethstore/src/ethstore.rs +++ b/crates/accounts/ethstore/src/ethstore.rs @@ -22,22 +22,19 @@ use std::{ time::{Duration, Instant}, }; -use account::SafeAccount; -use accounts_dir::{KeyDirectory, SetKeyError, VaultKey, VaultKeyDirectory}; +use crate::{ + account::SafeAccount, + accounts_dir::{KeyDirectory, SetKeyError, VaultKey, VaultKeyDirectory}, + json::{self, OpaqueKeyFile, Uuid}, + presale::PresaleWallet, + random::Random, + Derivation, Error, OpaqueSecret, SecretStore, SecretVaultRef, SimpleSecretStore, + StoreAccountRef, +}; use crypto::publickey::{ self, Address, ExtendedKeyPair, KeyPair, Message, Public, Secret, Signature, }; use ethkey::Password; -use json::{self, OpaqueKeyFile, Uuid}; -use presale::PresaleWallet; -use random::Random; -use Derivation; -use Error; -use OpaqueSecret; -use SecretStore; -use SecretVaultRef; -use SimpleSecretStore; -use StoreAccountRef; lazy_static! { static ref KEY_ITERATIONS: NonZeroU32 = @@ -898,12 +895,14 @@ mod tests { use self::tempdir::TempDir; use super::{EthMultiStore, EthStore}; - use accounts_dir::{KeyDirectory, MemoryDirectory, RootDiskDirectory}; + use crate::{ + accounts_dir::{KeyDirectory, MemoryDirectory, RootDiskDirectory}, + secret_store::{ + Derivation, SecretStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef, + }, + }; use crypto::publickey::{Generator, KeyPair, Random}; use ethereum_types::H256; - use secret_store::{ - Derivation, SecretStore, SecretVaultRef, SimpleSecretStore, StoreAccountRef, - }; fn keypair() -> KeyPair { Random.generate() diff --git a/crates/accounts/ethstore/src/import.rs b/crates/accounts/ethstore/src/import.rs index 20cbcd787b..ff199167fa 100644 --- a/crates/accounts/ethstore/src/import.rs +++ b/crates/accounts/ethstore/src/import.rs @@ -16,9 +16,11 @@ use std::{collections::HashSet, fs, path::Path}; -use accounts_dir::{DiskKeyFileManager, KeyDirectory, KeyFileManager}; +use crate::{ + accounts_dir::{DiskKeyFileManager, KeyDirectory, KeyFileManager}, + Error, +}; use crypto::publickey::Address; -use Error; /// Import an account from a file. pub fn import_account(path: &Path, dst: &dyn KeyDirectory) -> Result { diff --git a/crates/accounts/ethstore/src/json/key_file.rs b/crates/accounts/ethstore/src/json/key_file.rs index 9c2aec51e0..00e9b9548c 100644 --- a/crates/accounts/ethstore/src/json/key_file.rs +++ b/crates/accounts/ethstore/src/json/key_file.rs @@ -206,7 +206,7 @@ impl KeyFile { #[cfg(test)] mod tests { - use json::{Aes128Ctr, Cipher, Crypto, Kdf, KeyFile, Scrypt, Uuid, Version}; + use crate::json::{Aes128Ctr, Cipher, Crypto, Kdf, KeyFile, Scrypt, Uuid, Version}; use serde_json; use std::str::FromStr; diff --git a/crates/accounts/ethstore/src/json/mod.rs b/crates/accounts/ethstore/src/json/mod.rs index 02614eaea8..27d2c994eb 100644 --- a/crates/accounts/ethstore/src/json/mod.rs +++ b/crates/accounts/ethstore/src/json/mod.rs @@ -32,13 +32,13 @@ mod version; pub use self::{ bytes::Bytes, cipher::{Aes128Ctr, Cipher, CipherSer, CipherSerParams}, - crypto::{CipherText, Crypto}, + crypto::Crypto, error::Error, hash::{H128, H160, H256}, id::Uuid, kdf::{Kdf, KdfSer, KdfSerParams, Pbkdf2, Prf, Scrypt}, key_file::{KeyFile, OpaqueKeyFile}, - presale::{Encseed, PresaleWallet}, + presale::PresaleWallet, vault_file::VaultFile, vault_key_file::{ insert_vault_name_to_json_meta, remove_vault_name_from_json_meta, VaultKeyFile, diff --git a/crates/accounts/ethstore/src/json/presale.rs b/crates/accounts/ethstore/src/json/presale.rs index a2c9d6ce2d..84b51f5b68 100644 --- a/crates/accounts/ethstore/src/json/presale.rs +++ b/crates/accounts/ethstore/src/json/presale.rs @@ -38,7 +38,7 @@ impl PresaleWallet { #[cfg(test)] mod tests { - use json::{PresaleWallet, H160}; + use crate::json::{PresaleWallet, H160}; use serde_json; use std::str::FromStr; diff --git a/crates/accounts/ethstore/src/json/vault_file.rs b/crates/accounts/ethstore/src/json/vault_file.rs index cd3de8e156..87f03d737e 100644 --- a/crates/accounts/ethstore/src/json/vault_file.rs +++ b/crates/accounts/ethstore/src/json/vault_file.rs @@ -45,7 +45,7 @@ impl VaultFile { #[cfg(test)] mod test { - use json::{Aes128Ctr, Cipher, Crypto, Kdf, Pbkdf2, Prf, VaultFile}; + use crate::json::{Aes128Ctr, Cipher, Crypto, Kdf, Pbkdf2, Prf, VaultFile}; use serde_json; use std::num::NonZeroU32; diff --git a/crates/accounts/ethstore/src/json/vault_key_file.rs b/crates/accounts/ethstore/src/json/vault_key_file.rs index 47b53a2908..f48d52a4a0 100644 --- a/crates/accounts/ethstore/src/json/vault_key_file.rs +++ b/crates/accounts/ethstore/src/json/vault_key_file.rs @@ -117,7 +117,7 @@ impl VaultKeyMeta { #[cfg(test)] mod test { - use json::{ + use crate::json::{ insert_vault_name_to_json_meta, remove_vault_name_from_json_meta, Aes128Ctr, Cipher, Crypto, Kdf, Pbkdf2, Prf, VaultKeyFile, Version, }; diff --git a/crates/accounts/ethstore/src/lib.rs b/crates/accounts/ethstore/src/lib.rs index 5abfdf2ae3..ebe5b0ec14 100644 --- a/crates/accounts/ethstore/src/lib.rs +++ b/crates/accounts/ethstore/src/lib.rs @@ -29,7 +29,6 @@ extern crate smallvec; extern crate tempdir; extern crate time; -extern crate ethereum_types; extern crate ethkey as _ethkey; extern crate parity_crypto as crypto; extern crate parity_wordlist; diff --git a/crates/accounts/ethstore/src/presale.rs b/crates/accounts/ethstore/src/presale.rs index 45cc0de5d6..73118db225 100644 --- a/crates/accounts/ethstore/src/presale.rs +++ b/crates/accounts/ethstore/src/presale.rs @@ -14,15 +14,14 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{json, Error}; use crypto::{ self, pbkdf2, publickey::{Address, KeyPair, Secret}, Keccak256, }; use ethkey::Password; -use json; use std::{fs, num::NonZeroU32, path::Path}; -use Error; /// Pre-sale wallet. pub struct PresaleWallet { @@ -87,7 +86,7 @@ impl PresaleWallet { #[cfg(test)] mod tests { use super::PresaleWallet; - use json; + use crate::json; #[test] fn test() { diff --git a/crates/accounts/ethstore/src/secret_store.rs b/crates/accounts/ethstore/src/secret_store.rs index 36c6f9ed57..4876352306 100644 --- a/crates/accounts/ethstore/src/secret_store.rs +++ b/crates/accounts/ethstore/src/secret_store.rs @@ -14,17 +14,18 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + json::{OpaqueKeyFile, Uuid}, + Error, OpaqueSecret, +}; use crypto::publickey::{Address, Message, Public, Secret, Signature}; use ethereum_types::H256; use ethkey::Password; -use json::{OpaqueKeyFile, Uuid}; use std::{ cmp::Ordering, hash::{Hash, Hasher}, path::PathBuf, }; -use Error; -use OpaqueSecret; /// Key directory reference #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] diff --git a/crates/accounts/ethstore/tests/api.rs b/crates/accounts/ethstore/tests/api.rs index 1eb2fa7f0e..fde95be71b 100644 --- a/crates/accounts/ethstore/tests/api.rs +++ b/crates/accounts/ethstore/tests/api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -extern crate ethereum_types; +use ethereum_types; extern crate ethstore; extern crate parity_crypto as crypto; extern crate rand; diff --git a/crates/concensus/ethash/Cargo.toml b/crates/concensus/ethash/Cargo.toml index 448c14f49b..e3a1ff054c 100644 --- a/crates/concensus/ethash/Cargo.toml +++ b/crates/concensus/ethash/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum Ethash & ProgPoW Implementations" name = "ethash" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] crunchy = "0.1.0" @@ -12,7 +13,7 @@ keccak-hash = "0.5.0" tiny-keccak = "2.0.2" log = "0.4" memmap = "0.6" -parking_lot = "0.11.1" +parking_lot = "0.12" primal = "0.2.3" [dev-dependencies] diff --git a/crates/concensus/ethash/benches/progpow.rs b/crates/concensus/ethash/benches/progpow.rs index 939818e7b0..68fee06906 100644 --- a/crates/concensus/ethash/benches/progpow.rs +++ b/crates/concensus/ethash/benches/progpow.rs @@ -7,7 +7,7 @@ extern crate tempdir; use criterion::Criterion; use ethash::progpow; -use ethash::{compute::light_compute, NodeCacheBuilder, OptimizeFor}; +use ethash::{NodeCacheBuilder, OptimizeFor, compute::light_compute}; use rustc_hex::FromHex; use tempdir::TempDir; diff --git a/crates/concensus/ethash/src/cache.rs b/crates/concensus/ethash/src/cache.rs index 38b2b2de65..bcb087f0f8 100644 --- a/crates/concensus/ethash/src/cache.rs +++ b/crates/concensus/ethash/src/cache.rs @@ -14,14 +14,16 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use compute::Light; +use crate::{ + compute::Light, + keccak::{H256, keccak_512}, + seed_compute::SeedHashCompute, +}; use either::Either; -use keccak::{keccak_512, H256}; use memmap::MmapMut; use parking_lot::Mutex; -use seed_compute::SeedHashCompute; -use shared::{epoch, get_cache_size, to_hex, Node, ETHASH_CACHE_ROUNDS, NODE_BYTES}; +use crate::shared::{ETHASH_CACHE_ROUNDS, NODE_BYTES, Node, epoch, get_cache_size, to_hex}; use std::{ borrow::Cow, @@ -318,39 +320,41 @@ impl AsRef<[Node]> for NodeCache { // out. It counts as a read and causes all writes afterwards to be elided. Yes, really. I know, I // want to refactor this to use less `unsafe` as much as the next rustacean. unsafe fn initialize_memory(memory: *mut Node, num_nodes: usize, ident: &H256) { - // We use raw pointers here, see above - let dst = slice::from_raw_parts_mut(memory as *mut u8, NODE_BYTES); + unsafe { + // We use raw pointers here, see above + let dst = slice::from_raw_parts_mut(memory as *mut u8, NODE_BYTES); - debug_assert_eq!(ident.len(), 32); - keccak_512::write(&ident[..], dst); + debug_assert_eq!(ident.len(), 32); + keccak_512::write(&ident[..], dst); - for i in 1..num_nodes { - // We use raw pointers here, see above - let dst = slice::from_raw_parts_mut(memory.offset(i as _) as *mut u8, NODE_BYTES); - let src = slice::from_raw_parts(memory.offset(i as isize - 1) as *mut u8, NODE_BYTES); - keccak_512::write(src, dst); - } + for i in 1..num_nodes { + // We use raw pointers here, see above + let dst = slice::from_raw_parts_mut(memory.offset(i as _) as *mut u8, NODE_BYTES); + let src = slice::from_raw_parts(memory.offset(i as isize - 1) as *mut u8, NODE_BYTES); + keccak_512::write(src, dst); + } - // Now this is initialized, we can treat it as a slice. - let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes); + // Now this is initialized, we can treat it as a slice. + let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes); - for _ in 0..ETHASH_CACHE_ROUNDS { - for i in 0..num_nodes { - let data_idx = (num_nodes - 1 + i) % num_nodes; - let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes; + for _ in 0..ETHASH_CACHE_ROUNDS { + for i in 0..num_nodes { + let data_idx = (num_nodes - 1 + i) % num_nodes; + let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes; - let data = { - let mut data: Node = nodes.get_unchecked(data_idx).clone(); - let rhs: &Node = nodes.get_unchecked(idx); + let data = { + let mut data: Node = nodes.get_unchecked(data_idx).clone(); + let rhs: &Node = nodes.get_unchecked(idx); - for (a, b) in data.as_dwords_mut().iter_mut().zip(rhs.as_dwords()) { - *a ^= *b; - } + for (a, b) in data.as_dwords_mut().iter_mut().zip(rhs.as_dwords()) { + *a ^= *b; + } - data - }; + data + }; - keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes); + keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes); + } } } } diff --git a/crates/concensus/ethash/src/compute.rs b/crates/concensus/ethash/src/compute.rs index 00bbcd96d8..761f4ada9d 100644 --- a/crates/concensus/ethash/src/compute.rs +++ b/crates/concensus/ethash/src/compute.rs @@ -19,11 +19,13 @@ // TODO: fix endianess for big endian -use cache::{NodeCache, NodeCacheBuilder}; -use keccak::{keccak_256, keccak_512, H256}; -use progpow::{generate_cdag, keccak_f800_long, keccak_f800_short, progpow, CDag}; -use seed_compute::SeedHashCompute; -use shared::*; +use crate::{ + cache::{NodeCache, NodeCacheBuilder}, + keccak::{H256, keccak_256, keccak_512}, + progpow::{CDag, generate_cdag, keccak_f800_long, keccak_f800_short, progpow}, + seed_compute::SeedHashCompute, + shared::*, +}; use std::io; use std::{mem, path::Path}; @@ -180,7 +182,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64) use std::mem; debug_assert_eq!(val.len() * mem::size_of::(), $n * mem::size_of::()); - &mut *(val.as_mut_ptr() as *mut [U; $n]) + unsafe { &mut *(val.as_mut_ptr() as *mut [U; $n]) } } make_const_array($value) diff --git a/crates/concensus/ethash/src/keccak.rs b/crates/concensus/ethash/src/keccak.rs index 134a11fb2a..66611562e4 100644 --- a/crates/concensus/ethash/src/keccak.rs +++ b/crates/concensus/ethash/src/keccak.rs @@ -22,14 +22,12 @@ pub mod keccak_512 { use super::hash; pub use self::hash::{ - keccak512 as inplace, keccak512_range as inplace_range, keccak_512 as write, + keccak_512 as write, keccak512 as inplace, keccak512_range as inplace_range, }; } pub mod keccak_256 { use super::hash; - pub use self::hash::{ - keccak256 as inplace, keccak256_range as inplace_range, keccak_256 as write, - }; + pub use self::hash::{keccak_256 as write, keccak256 as inplace}; } diff --git a/crates/concensus/ethash/src/lib.rs b/crates/concensus/ethash/src/lib.rs index 2232d24319..6b695f9e6f 100644 --- a/crates/concensus/ethash/src/lib.rs +++ b/crates/concensus/ethash/src/lib.rs @@ -15,7 +15,7 @@ // along with OpenEthereum. If not, see . extern crate either; -extern crate ethereum_types; +use ethereum_types; extern crate memmap; extern crate parking_lot; extern crate primal; @@ -52,7 +52,7 @@ mod progpow; pub use cache::{NodeCacheBuilder, OptimizeFor}; use compute::Light; -pub use compute::{quick_get_difficulty, slow_hash_block_number, ProofOfWork}; +pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number}; use ethereum_types::{BigEndianHash, U256, U512}; use keccak::H256; use parking_lot::Mutex; diff --git a/crates/concensus/ethash/src/progpow.rs b/crates/concensus/ethash/src/progpow.rs index 131f90c2c0..efce6b970f 100644 --- a/crates/concensus/ethash/src/progpow.rs +++ b/crates/concensus/ethash/src/progpow.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use compute::{calculate_dag_item, FNV_PRIME}; -use keccak::H256; -use shared::{get_data_size, Node, ETHASH_ACCESSES, ETHASH_MIX_BYTES}; +use crate::{ + compute::{FNV_PRIME, calculate_dag_item}, + keccak::H256, + shared::{ETHASH_ACCESSES, ETHASH_MIX_BYTES, Node, get_data_size}, +}; const PROGPOW_CACHE_BYTES: usize = 16 * 1024; const PROGPOW_CACHE_WORDS: usize = PROGPOW_CACHE_BYTES / 4; @@ -394,8 +396,10 @@ mod test { use tempdir::TempDir; use super::*; - use cache::{NodeCacheBuilder, OptimizeFor}; - use keccak::H256; + use crate::{ + cache::{NodeCacheBuilder, OptimizeFor}, + keccak::H256, + }; use rustc_hex::FromHex; use serde_json::{self, Value}; use std::collections::VecDeque; diff --git a/crates/concensus/ethash/src/seed_compute.rs b/crates/concensus/ethash/src/seed_compute.rs index 6ca9aba7e3..95a6c436a8 100644 --- a/crates/concensus/ethash/src/seed_compute.rs +++ b/crates/concensus/ethash/src/seed_compute.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use keccak::{keccak_256, H256}; -use shared; +use crate::{ + keccak::{H256, keccak_256}, + shared, +}; use std::cell::Cell; diff --git a/crates/concensus/ethash/src/shared.rs b/crates/concensus/ethash/src/shared.rs index 4f72b61a8a..f09ed90480 100644 --- a/crates/concensus/ethash/src/shared.rs +++ b/crates/concensus/ethash/src/shared.rs @@ -81,10 +81,11 @@ macro_rules! static_assert_size_eq { } }; (@inner $a:ty, $b:ty) => { - unsafe { - let val: $b = ::mem::MaybeUninit::uninit().assume_init(); - let _: $a = ::std::mem::transmute(val); - } + // Use const assertion instead of trait implementation + const _: () = assert!( + ::std::mem::size_of::<$a>() == ::std::mem::size_of::<$b>(), + concat!("Size mismatch between ", stringify!($a), " and ", stringify!($b)) + ); }; ($($rest:ty),*) => { static_assert_size_eq!(size_eq: $($rest),*); @@ -97,7 +98,7 @@ macro_rules! static_assert_size_eq { }; } -static_assert_size_eq!(Node, NodeBytes, NodeWords, NodeDwords); +static_assert_size_eq!(Node, NodeBytes, NodeWords); #[repr(C)] pub union Node { diff --git a/crates/concensus/miner/Cargo.toml b/crates/concensus/miner/Cargo.toml index 10f0de5a17..00bb5461ce 100644 --- a/crates/concensus/miner/Cargo.toml +++ b/crates/concensus/miner/Cargo.toml @@ -1,7 +1,8 @@ [package] -description = "OpenEthereum Miner Interface." +description = "diamond-node Miner Interface." name = "ethcore-miner" -homepage = "https://github.com/openethereum/openethereum" +edition = "2018" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" version = "1.12.0" authors = ["Parity Technologies "] @@ -19,7 +20,7 @@ common-types = { path = "../../ethcore/types" } error-chain = "0.12" ethabi = "12.0.0" ethabi-derive = { git = 'https://github.com/rimrakhimov/ethabi', branch = 'rimrakhimov/remove-syn-export-span' } -ethabi-contract = "11.0.0" +ethabi-contract = "16.0.0" ethcore-call-contract = { path = "../../vm/call-contract" } ethereum-types = "0.9.2" futures = "0.1" @@ -29,7 +30,7 @@ log = "0.4" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } parity-runtime = { path = "../../runtime/runtime" } parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" price-info = { path = "./price-info", optional = true } rlp = { version = "0.4.6" } serde = { version = "1.0", features = ["derive"] } @@ -37,6 +38,7 @@ serde_derive = "1.0" serde_json = "1.0" trace-time = "0.1" txpool = { path = "../../transaction-pool" } +rustc-hex = "1.0" [dev-dependencies] env_logger = "0.5" diff --git a/crates/concensus/miner/local-store/Cargo.toml b/crates/concensus/miner/local-store/Cargo.toml index 554ea93015..ee9137f819 100644 --- a/crates/concensus/miner/local-store/Cargo.toml +++ b/crates/concensus/miner/local-store/Cargo.toml @@ -3,6 +3,7 @@ name = "parity-local-store" description = "Manages persistent local node data." version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] common-types = { path = "../../../ethcore/types" } diff --git a/crates/concensus/miner/local-store/src/lib.rs b/crates/concensus/miner/local-store/src/lib.rs index d1aee10651..78b8c53e28 100644 --- a/crates/concensus/miner/local-store/src/lib.rs +++ b/crates/concensus/miner/local-store/src/lib.rs @@ -18,12 +18,14 @@ use std::{fmt, sync::Arc, time::Duration}; -use ethcore_db::KeyValueDB; -use io::IoHandler; -use types::transaction::{ - Condition as TransactionCondition, PendingTransaction, SignedTransaction, TypedTransaction, - UnverifiedTransaction, +use crate::{ + io::IoHandler, + types::transaction::{ + Condition as TransactionCondition, PendingTransaction, SignedTransaction, TypedTransaction, + UnverifiedTransaction, + }, }; +use ethcore_db::KeyValueDB; extern crate common_types as types; extern crate ethcore_db; @@ -239,9 +241,9 @@ impl Drop for LocalDataStore { mod tests { use super::NodeInfo; + use crate::types::transaction::{Condition, PendingTransaction, Transaction, TypedTransaction}; use ethkey::Brain; use std::sync::Arc; - use types::transaction::{Condition, PendingTransaction, Transaction, TypedTransaction}; // we want to test: round-trip of good transactions. // failure to roundtrip bad transactions (but that it doesn't panic) diff --git a/crates/concensus/miner/price-info/Cargo.toml b/crates/concensus/miner/price-info/Cargo.toml index af02b90506..8af227ded3 100644 --- a/crates/concensus/miner/price-info/Cargo.toml +++ b/crates/concensus/miner/price-info/Cargo.toml @@ -1,10 +1,11 @@ [package] description = "Fetch current ETH price" -homepage = "https://github.com/openethereum/openethereum" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "price-info" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2018" [dependencies] fetch = { path = "../../../net/fetch" } @@ -14,5 +15,5 @@ log = "0.4" serde_json = "1.0" [dev-dependencies] -parking_lot = "0.11.1" +parking_lot = "0.12" fake-fetch = { path = "../../../net/fake-fetch" } diff --git a/crates/concensus/miner/price-info/src/lib.rs b/crates/concensus/miner/price-info/src/lib.rs index 7cb422724b..17e166350a 100644 --- a/crates/concensus/miner/price-info/src/lib.rs +++ b/crates/concensus/miner/price-info/src/lib.rs @@ -145,13 +145,13 @@ impl Client { #[cfg(test)] mod test { + use crate::Client; use fake_fetch::FakeFetch; use parity_runtime::{Executor, Runtime}; use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; - use Client; fn price_info_ok(response: &str, executor: Executor) -> Client> { Client::new( diff --git a/crates/concensus/miner/src/gas_pricer.rs b/crates/concensus/miner/src/gas_pricer.rs index 6e60c447bf..7cbc61df06 100644 --- a/crates/concensus/miner/src/gas_pricer.rs +++ b/crates/concensus/miner/src/gas_pricer.rs @@ -16,9 +16,9 @@ //! Auto-updates minimal gas price requirement. -use ethereum_types::U256; #[cfg(feature = "price-info")] -use gas_price_calibrator::GasPriceCalibrator; +use crate::gas_price_calibrator::GasPriceCalibrator; +use ethereum_types::U256; /// Struct to look after updating the acceptable gas price of a miner. #[derive(Debug, PartialEq)] diff --git a/crates/concensus/miner/src/lib.rs b/crates/concensus/miner/src/lib.rs index bdd0ad67e1..ac408ccb72 100644 --- a/crates/concensus/miner/src/lib.rs +++ b/crates/concensus/miner/src/lib.rs @@ -24,7 +24,6 @@ extern crate common_types as types; extern crate ethabi; extern crate ethabi_derive; extern crate ethcore_call_contract as call_contract; -extern crate ethereum_types; extern crate futures; extern crate keccak_hash as hash; extern crate linked_hash_map; @@ -35,6 +34,7 @@ extern crate parking_lot; #[cfg(feature = "price-info")] extern crate price_info; extern crate rlp; +extern crate rustc_hex; extern crate txpool; #[macro_use] @@ -52,8 +52,6 @@ extern crate trace_time; extern crate env_logger; #[cfg(test)] extern crate ethkey; -#[cfg(test)] -extern crate rustc_hex; pub mod external; #[cfg(feature = "price-info")] diff --git a/crates/concensus/miner/src/pool/client.rs b/crates/concensus/miner/src/pool/client.rs index c85f17f740..8697bd703e 100644 --- a/crates/concensus/miner/src/pool/client.rs +++ b/crates/concensus/miner/src/pool/client.rs @@ -22,8 +22,8 @@ use std::fmt; +use crate::types::transaction; use ethereum_types::{H160 as Address, H256, U256}; -use types::transaction; /// Account Details #[derive(Debug, Clone)] diff --git a/crates/concensus/miner/src/pool/listener.rs b/crates/concensus/miner/src/pool/listener.rs index 5d1b936b99..6ed6fcf6be 100644 --- a/crates/concensus/miner/src/pool/listener.rs +++ b/crates/concensus/miner/src/pool/listener.rs @@ -21,7 +21,7 @@ use std::{fmt, sync::Arc}; use ethereum_types::H256; use txpool::{self, VerifiedTransaction}; -use pool::VerifiedTransaction as Transaction; +use crate::pool::VerifiedTransaction as Transaction; type Listener = Box; @@ -124,10 +124,10 @@ impl txpool::Listener for Logger { #[cfg(test)] mod tests { use super::*; + use crate::types::transaction; use ethereum_types::H160; use parking_lot::Mutex; use txpool::Listener; - use types::transaction; #[test] fn should_notify_listeners() { diff --git a/crates/concensus/miner/src/pool/local_transactions.rs b/crates/concensus/miner/src/pool/local_transactions.rs index bb86f76f93..f423a6cd14 100644 --- a/crates/concensus/miner/src/pool/local_transactions.rs +++ b/crates/concensus/miner/src/pool/local_transactions.rs @@ -18,9 +18,9 @@ use std::{fmt, sync::Arc}; +use crate::pool::{ScoredTransaction, VerifiedTransaction as Transaction}; use ethereum_types::H256; use linked_hash_map::LinkedHashMap; -use pool::{ScoredTransaction, VerifiedTransaction as Transaction}; use txpool::{self, VerifiedTransaction}; /// Status of local transaction. @@ -225,7 +225,7 @@ impl txpool::Listener for LocalTransactionsList { return; } - warn!(target: "own_tx", "Transaction canceled (hash {:?})", tx.hash()); + warn!(target: "own_tx", "Transaction canceled (hash {:?}), zero gas price: {}, nonce: {} data: {} ", tx.hash(), tx.has_zero_gas_price(), tx.nonce(), rustc_hex::ToHex::to_hex(tx.transaction.tx().data.as_slice())); self.insert(*tx.hash(), Status::Canceled(tx.clone())); self.clear_old(); } @@ -254,12 +254,12 @@ impl txpool::Listener for LocalTransactionsList { #[cfg(test)] mod tests { use super::*; + use crate::types::transaction; use crypto::publickey::{Generator, Random}; use ethereum_types::U256; use txpool::Listener; - use types::transaction; - use pool; + use crate::pool; #[test] fn should_add_transaction_as_pending() { diff --git a/crates/concensus/miner/src/pool/mod.rs b/crates/concensus/miner/src/pool/mod.rs index 4818aa59de..72cacdb4ad 100644 --- a/crates/concensus/miner/src/pool/mod.rs +++ b/crates/concensus/miner/src/pool/mod.rs @@ -16,10 +16,10 @@ //! Transaction Pool +use crate::types::transaction; use ethereum_types::{Address, H256, U256}; use parity_util_mem::MallocSizeOfExt; use txpool; -use types::transaction; mod listener; mod queue; diff --git a/crates/concensus/miner/src/pool/queue.rs b/crates/concensus/miner/src/pool/queue.rs index adede8bf47..3034123d46 100644 --- a/crates/concensus/miner/src/pool/queue.rs +++ b/crates/concensus/miner/src/pool/queue.rs @@ -24,15 +24,16 @@ use std::{ atomic::{self, AtomicUsize}, Arc, }, + time::Duration, }; use self::scoring::ScoringEvent; +use crate::types::transaction; use ethereum_types::{Address, H256, U256}; use parking_lot::RwLock; use txpool::{self, Verifier}; -use types::transaction; -use pool::{ +use crate::pool::{ self, client, listener, local_transactions::LocalTransactionsList, ready, replace, scoring, @@ -117,6 +118,14 @@ impl CachedPending { self.pending = None; } + /// Find transaction by hash in cached pending set. + /// NOTE: Linear lookup, bad performance. + pub fn find(&self, hash: &H256) -> Option> { + self.pending + .as_ref() + .and_then(|pending| pending.iter().find(|tx| tx.hash == *hash).cloned()) + } + /// Returns cached pending set (if any) if it's valid. pub fn pending( &self, @@ -458,6 +467,7 @@ impl TransactionQueue { // Double check after acquiring write lock let mut cached_pending = cached.write(); + if let Some(pending) = cached_pending.pending(block_number, current_timestamp, nonce_cap.as_ref(), max_len) { @@ -536,6 +546,20 @@ impl TransactionQueue { ) } + /// Returns status of a local transaction by its hash. + pub fn local_transaction_status( + &self, + tx_hash: &H256, + ) -> Option { + self.pool + .read() + .listener() + .0 + .all_transactions() + .get(tx_hash) + .cloned() + } + /// Collect pending transactions. /// /// NOTE This is re-computing the pending set and it might be expensive to do so. @@ -660,7 +684,34 @@ impl TransactionQueue { /// Given transaction hash looks up that transaction in the pool /// and returns a shared pointer to it or `None` if it's not present. pub fn find(&self, hash: &H256) -> Option> { - self.pool.read().find(hash) + self.cached_enforced_pending + .read() + .find(hash) + .or(self.cached_non_enforced_pending.read().find(hash)) + .or(self.pool.read().find(hash)) + } + + /// Retrieve a transaction from the pool, if the pool is readable. + /// + /// Given transaction hash looks up that transaction in the pool + /// and returns a shared pointer to it or `None` if it's not present, or a readlock could not get acquired. + pub fn find_if_readable( + &self, + hash: &H256, + max_lock_duration: &Duration, + ) -> Option> { + let splitted_duration = max_lock_duration.div_f32(3.0); + self.cached_enforced_pending + .try_read_for(splitted_duration.clone())? + .find(hash) + .or(self + .cached_non_enforced_pending + .try_read_for(splitted_duration.clone())? + .find(hash)) + .or(self + .pool + .try_read_for(splitted_duration.clone())? + .find(hash)) } /// Remove a set of transactions from the pool. @@ -774,12 +825,13 @@ impl TransactionQueue { (pool.listener_mut().1).0.add(f); } - /// Check if pending set is cached. + /// Check if pending set is cached. (enforced) #[cfg(test)] pub fn is_enforced_pending_cached(&self) -> bool { self.cached_enforced_pending.read().pending.is_some() } + /// Check if pending set is cached. (non-enforced) #[cfg(test)] pub fn is_non_enforced_pending_cached(&self) -> bool { self.cached_non_enforced_pending.read().pending.is_some() @@ -802,7 +854,7 @@ fn convert_error(err: txpool::Error) -> transa #[cfg(test)] mod tests { use super::*; - use pool::tests::client::TestClient; + use crate::pool::tests::client::TestClient; #[test] fn should_get_pending_transactions() { diff --git a/crates/concensus/miner/src/pool/ready.rs b/crates/concensus/miner/src/pool/ready.rs index d8914be5c9..d833b2e5e7 100644 --- a/crates/concensus/miner/src/pool/ready.rs +++ b/crates/concensus/miner/src/pool/ready.rs @@ -40,9 +40,9 @@ use std::{cmp, collections::HashMap}; +use crate::types::transaction; use ethereum_types::{H160 as Address, U256}; use txpool::{self, VerifiedTransaction as PoolVerifiedTransaction}; -use types::transaction; use super::{client::NonceClient, VerifiedTransaction}; @@ -165,7 +165,7 @@ impl Option> txpool::Ready for Opt #[cfg(test)] mod tests { use super::*; - use pool::tests::{ + use crate::pool::tests::{ client::TestClient, tx::{Tx, TxExt}, }; diff --git a/crates/concensus/miner/src/pool/replace.rs b/crates/concensus/miner/src/pool/replace.rs index ac104580db..67019b3c2e 100644 --- a/crates/concensus/miner/src/pool/replace.rs +++ b/crates/concensus/miner/src/pool/replace.rs @@ -239,8 +239,7 @@ where mod tests { use super::*; - use crypto::publickey::{Generator, KeyPair, Random}; - use pool::{ + use crate::pool::{ scoring::*, tests::{ client::TestClient, @@ -248,12 +247,13 @@ mod tests { }, PrioritizationStrategy, VerifiedTransaction, }; + use crypto::publickey::{Generator, KeyPair, Random}; use std::sync::Arc; use txpool::{scoring::Choice::*, ShouldReplace}; fn local_tx_verified(tx: Tx, keypair: &KeyPair) -> VerifiedTransaction { let mut verified_tx = tx.unsigned().sign(keypair.secret(), None).verified(); - verified_tx.priority = ::pool::Priority::Local; + verified_tx.priority = crate::pool::Priority::Local; verified_tx } @@ -476,7 +476,7 @@ mod tests { ..Default::default() }; let mut verified_tx = tx.signed().verified(); - verified_tx.priority = ::pool::Priority::Local; + verified_tx.priority = crate::pool::Priority::Local; verified_tx }; let tx_local_high_gas = { @@ -486,7 +486,7 @@ mod tests { ..Default::default() }; let mut verified_tx = tx.signed().verified(); - verified_tx.priority = ::pool::Priority::Local; + verified_tx.priority = crate::pool::Priority::Local; verified_tx }; diff --git a/crates/concensus/miner/src/pool/scoring.rs b/crates/concensus/miner/src/pool/scoring.rs index c8ef7ade52..9f258ea033 100644 --- a/crates/concensus/miner/src/pool/scoring.rs +++ b/crates/concensus/miner/src/pool/scoring.rs @@ -182,7 +182,7 @@ where mod tests { use super::*; - use pool::tests::tx::{Tx, TxExt}; + use crate::pool::tests::tx::{Tx, TxExt}; use std::sync::Arc; use txpool::Scoring; @@ -200,9 +200,9 @@ mod tests { .map(|(i, tx)| { let mut verified = tx.verified(); verified.priority = match i { - 0 => ::pool::Priority::Local, - 1 => ::pool::Priority::Retracted, - _ => ::pool::Priority::Regular, + 0 => crate::pool::Priority::Local, + 1 => crate::pool::Priority::Retracted, + _ => crate::pool::Priority::Regular, }; txpool::Transaction { insertion_id: 0, diff --git a/crates/concensus/miner/src/pool/tests/client.rs b/crates/concensus/miner/src/pool/tests/client.rs index 1d2fd9ede3..c24a2b9426 100644 --- a/crates/concensus/miner/src/pool/tests/client.rs +++ b/crates/concensus/miner/src/pool/tests/client.rs @@ -16,13 +16,13 @@ use std::sync::{atomic, Arc}; -use ethereum_types::{Address, H256, U256}; -use rlp::Rlp; -use types::transaction::{ +use crate::types::transaction::{ self, SignedTransaction, Transaction, TypedTransaction, UnverifiedTransaction, }; +use ethereum_types::{Address, H256, U256}; +use rlp::Rlp; -use pool::{self, client::AccountDetails}; +use crate::pool::{self, client::AccountDetails}; const MAX_TRANSACTION_SIZE: usize = 15 * 1024; diff --git a/crates/concensus/miner/src/pool/tests/mod.rs b/crates/concensus/miner/src/pool/tests/mod.rs index a31cefa25f..cd1955e69f 100644 --- a/crates/concensus/miner/src/pool/tests/mod.rs +++ b/crates/concensus/miner/src/pool/tests/mod.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::types::transaction::{self, PendingTransaction}; use ethereum_types::U256; use hash::KECCAK_EMPTY; use txpool; -use types::transaction::{self, PendingTransaction}; -use pool::{ +use crate::pool::{ transaction_filter::TransactionFilter, verifier, PendingOrdering, PendingSettings, PrioritizationStrategy, TransactionQueue, }; diff --git a/crates/concensus/miner/src/pool/tests/tx.rs b/crates/concensus/miner/src/pool/tests/tx.rs index 9a55d8d29e..b129d249e3 100644 --- a/crates/concensus/miner/src/pool/tests/tx.rs +++ b/crates/concensus/miner/src/pool/tests/tx.rs @@ -14,15 +14,15 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use crypto::publickey::{Generator, Random}; -use ethereum_types::{H256, U256}; -use rustc_hex::FromHex; -use types::transaction::{ +use crate::types::transaction::{ self, AccessListTx, EIP1559TransactionTx, SignedTransaction, Transaction, TypedTransaction, UnverifiedTransaction, }; +use crypto::publickey::{Generator, Random}; +use ethereum_types::{H256, U256}; +use rustc_hex::FromHex; -use pool::{verifier, VerifiedTransaction}; +use crate::pool::{verifier, VerifiedTransaction}; #[derive(Clone)] pub struct Tx { diff --git a/crates/concensus/miner/src/pool/transaction_filter.rs b/crates/concensus/miner/src/pool/transaction_filter.rs index b261fd08a8..338d6dad00 100644 --- a/crates/concensus/miner/src/pool/transaction_filter.rs +++ b/crates/concensus/miner/src/pool/transaction_filter.rs @@ -20,8 +20,7 @@ use ethereum_types::{Address, U256}; -use pool::VerifiedTransaction; -use types::transaction::Action; +use crate::{pool::VerifiedTransaction, types::transaction::Action}; #[allow(non_camel_case_types)] #[derive(Debug, Deserialize, Serialize)] diff --git a/crates/concensus/miner/src/pool/verifier.rs b/crates/concensus/miner/src/pool/verifier.rs index 4e06e5d075..7ca19bf435 100644 --- a/crates/concensus/miner/src/pool/verifier.rs +++ b/crates/concensus/miner/src/pool/verifier.rs @@ -30,10 +30,10 @@ use std::{ }, }; +use crate::types::transaction; use ethereum_types::{H256, U256}; use hash::KECCAK_EMPTY; use txpool; -use types::transaction; use super::{ client::{Client, TransactionType}, @@ -206,7 +206,7 @@ impl Verifier { } impl txpool::Verifier - for Verifier + for Verifier { type Error = transaction::Error; type VerifiedTransaction = VerifiedTransaction; diff --git a/crates/concensus/miner/src/service_transaction_checker.rs b/crates/concensus/miner/src/service_transaction_checker.rs index d4ccfc8f6a..d2a2697e35 100644 --- a/crates/concensus/miner/src/service_transaction_checker.rs +++ b/crates/concensus/miner/src/service_transaction_checker.rs @@ -16,20 +16,18 @@ //! A service transactions contract checker. +use crate::types::{ids::BlockId, transaction::SignedTransaction}; use call_contract::{CallContract, RegistryInfo}; use ethabi::FunctionOutputDecoder; use ethereum_types::Address; use parking_lot::RwLock; -use std::{collections::HashMap, mem, sync::Arc}; -use types::{ids::BlockId, transaction::SignedTransaction}; +use std::{collections::HashMap, str::FromStr, sync::Arc}; use_contract!( service_transaction, "res/contracts/service_transaction.json" ); -const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker"; - /// Service transactions checker. #[derive(Default, Clone)] pub struct ServiceTransactionChecker { @@ -58,7 +56,7 @@ impl ServiceTransactionChecker { client: &C, sender: Address, ) -> Result { - trace!(target: "txqueue", "Checking service transaction checker contract from {}", sender); + trace!(target: "txqueue", "Checking service transaction checker contract for {}", sender); if let Some(allowed) = self .certified_addresses_cache .try_read() @@ -67,12 +65,11 @@ impl ServiceTransactionChecker { { return Ok(*allowed); } - let contract_address = client - .registry_address( - SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned(), - BlockId::Latest, - ) - .ok_or_else(|| "Certifier contract is not configured")?; + let x = Address::from_str("5000000000000000000000000000000000000001".into()).unwrap(); + let contract_address = x; + + trace!(target: "txfilter", "Checking service transaction from contract for: {}", sender); + self.call_contract(client, contract_address, sender) .and_then(|allowed| { if let Some(mut cache) = self.certified_addresses_cache.try_write() { @@ -85,31 +82,13 @@ impl ServiceTransactionChecker { /// Refresh certified addresses cache pub fn refresh_cache( &self, - client: &C, + _client: &C, ) -> Result { trace!(target: "txqueue", "Refreshing certified addresses cache"); - // replace the cache with an empty list, - // since it's not recent it won't be used anyway. - let cache = mem::replace( - &mut *self.certified_addresses_cache.write(), - HashMap::default(), - ); - - if let Some(contract_address) = client.registry_address( - SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME.to_owned(), - BlockId::Latest, - ) { - let addresses: Vec<_> = cache.keys().collect(); - let mut cache: HashMap = HashMap::default(); - for address in addresses { - let allowed = self.call_contract(client, contract_address, *address)?; - cache.insert(*address, allowed); - } - *self.certified_addresses_cache.write() = cache; - Ok(true) - } else { - Ok(false) - } + + self.certified_addresses_cache.write().clear(); + + Ok(true) } fn call_contract( diff --git a/crates/concensus/miner/stratum/Cargo.toml b/crates/concensus/miner/stratum/Cargo.toml index 44ba6a9a42..b36d587d9f 100644 --- a/crates/concensus/miner/stratum/Cargo.toml +++ b/crates/concensus/miner/stratum/Cargo.toml @@ -4,6 +4,7 @@ name = "ethcore-stratum" version = "1.12.0" license = "GPL-3.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] ethereum-types = "0.9.2" @@ -11,7 +12,7 @@ keccak-hash = "0.5.0" jsonrpc-core = "15.0.0" jsonrpc-tcp-server = "15.0.0" log = "0.4" -parking_lot = "0.11.1" +parking_lot = "0.12" [dev-dependencies] env_logger = "0.5" diff --git a/crates/concensus/miner/stratum/src/lib.rs b/crates/concensus/miner/stratum/src/lib.rs index e1e79a548e..ccf5c33a40 100644 --- a/crates/concensus/miner/stratum/src/lib.rs +++ b/crates/concensus/miner/stratum/src/lib.rs @@ -16,7 +16,7 @@ //! Stratum protocol implementation for parity ethereum/bitcoin clients -extern crate ethereum_types; +use ethereum_types; extern crate jsonrpc_core; extern crate jsonrpc_tcp_server; extern crate keccak_hash as hash; @@ -36,7 +36,7 @@ mod traits; pub use traits::{Error, JobDispatcher, PushWorkHandler, ServiceConfiguration}; -use jsonrpc_core::{to_value, Compatibility, IoDelegate, MetaIoHandler, Metadata, Params, Value}; +use jsonrpc_core::{Compatibility, IoDelegate, MetaIoHandler, Metadata, Params, Value, to_value}; use jsonrpc_tcp_server::{ Dispatcher, MetaExtractor, PushMessageError, RequestContext, Server as JsonRpcServer, ServerBuilder as JsonRpcServerBuilder, @@ -318,7 +318,7 @@ mod tests { sync::Arc, }; - use jsonrpc_core::futures::{future, Future}; + use jsonrpc_core::futures::{Future, future}; use tokio::{ io, net::TcpStream, @@ -504,8 +504,9 @@ mod tests { .expect("Response should be utf-8"); assert_eq!( - "{ \"id\": 17, \"method\": \"mining.notify\", \"params\": { \"00040008\", \"100500\" } }\n", - response); + "{ \"id\": 17, \"method\": \"mining.notify\", \"params\": { \"00040008\", \"100500\" } }\n", + response + ); } #[test] diff --git a/crates/concensus/miner/using-queue/Cargo.toml b/crates/concensus/miner/using-queue/Cargo.toml index ac581a96dc..bfca910dd2 100644 --- a/crates/concensus/miner/using-queue/Cargo.toml +++ b/crates/concensus/miner/using-queue/Cargo.toml @@ -2,3 +2,4 @@ name = "using_queue" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" \ No newline at end of file diff --git a/crates/concensus/miner/using-queue/src/lib.rs b/crates/concensus/miner/using-queue/src/lib.rs index 78ad918310..a4e77de1b9 100644 --- a/crates/concensus/miner/using-queue/src/lib.rs +++ b/crates/concensus/miner/using-queue/src/lib.rs @@ -127,11 +127,7 @@ impl UsingQueue { { // a bit clumsy - TODO: think about a nicer way of expressing this. if let Some(ref x) = self.pending { - if predicate(x) { - Some(x.clone()) - } else { - None - } + if predicate(x) { Some(x.clone()) } else { None } } else { self.in_use .last() diff --git a/crates/db/bloom/Cargo.toml b/crates/db/bloom/Cargo.toml index 8c366a8522..7527fd1562 100644 --- a/crates/db/bloom/Cargo.toml +++ b/crates/db/bloom/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] description = "Journaling bloom filter" license = "GPL3" +edition = "2024" [lib] path = "src/lib.rs" diff --git a/crates/db/blooms-db/Cargo.toml b/crates/db/blooms-db/Cargo.toml index 6d49b0430d..3d8eccf426 100644 --- a/crates/db/blooms-db/Cargo.toml +++ b/crates/db/blooms-db/Cargo.toml @@ -3,11 +3,12 @@ name = "blooms-db" version = "0.1.0" license = "GPL-3.0" authors = ["Parity Technologies "] +edition = "2018" [dependencies] byteorder = "1.2" ethbloom = "0.9.1" -parking_lot = "0.11.1" +parking_lot = "0.12" tiny-keccak = "1.4" [dev-dependencies] diff --git a/crates/db/blooms-db/src/db.rs b/crates/db/blooms-db/src/db.rs index f174ffeb13..ee6ee4b8e4 100644 --- a/crates/db/blooms-db/src/db.rs +++ b/crates/db/blooms-db/src/db.rs @@ -21,7 +21,7 @@ use std::{ use ethbloom; -use file::{File, FileIterator}; +use crate::file::{File, FileIterator}; fn other_io_err(e: E) -> io::Error where diff --git a/crates/db/db/Cargo.toml b/crates/db/db/Cargo.toml index 372c986f3d..3ee1c0fb3e 100644 --- a/crates/db/db/Cargo.toml +++ b/crates/db/db/Cargo.toml @@ -1,6 +1,6 @@ [package] -description = "OpenEthereum DB access utilities" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node DB access utilities" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore-db" version = "0.1.0" @@ -14,7 +14,7 @@ kvdb = "0.1" kvdb-rocksdb = "0.1.3" kvdb-memorydb = "0.1" parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" rlp = { version = "0.4.6" } -rlp_derive = { path = "../../util/rlp-derive" } +rlp-derive = { version = "0.2"} stats = { path = "../../util/stats" } \ No newline at end of file diff --git a/crates/db/journaldb/Cargo.toml b/crates/db/journaldb/Cargo.toml index 5eb8d67608..f13fa9cb36 100644 --- a/crates/db/journaldb/Cargo.toml +++ b/crates/db/journaldb/Cargo.toml @@ -4,6 +4,7 @@ version = "0.2.0" authors = ["Parity Technologies "] description = "A `HashDB` which can manage a short-term journal potentially containing many forks of mutually exclusive actions" license = "GPL3" +edition = "2024" [dependencies] parity-bytes = "0.1" @@ -15,7 +16,7 @@ kvdb = "0.1" log = "0.4" memory-db = { path = "../memory-db" } parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" fastmap = { path = "../../util/fastmap" } rlp = { version = "0.4.6" } diff --git a/crates/db/journaldb/src/archivedb.rs b/crates/db/journaldb/src/archivedb.rs index 65fbf2593a..9a3649c6de 100644 --- a/crates/db/journaldb/src/archivedb.rs +++ b/crates/db/journaldb/src/archivedb.rs @@ -17,22 +17,21 @@ //! Disk-backed `HashDB` implementation. use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, hash_map::Entry}, io, sync::Arc, }; use super::{ - error_key_already_exists, error_negatively_reference_hash, memory_db::*, LATEST_ERA_KEY, + LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash, memory_db::*, }; +use crate::{DB_PREFIX_LEN, traits::JournalDB}; use bytes::Bytes; use ethcore_db::{DBTransaction, DBValue, KeyValueDB}; use ethereum_types::H256; use hash_db::HashDB; use keccak_hasher::KeccakHasher; use rlp::{decode, encode}; -use traits::JournalDB; -use DB_PREFIX_LEN; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -56,7 +55,7 @@ impl ArchiveDB { .expect("Low-level database error.") .map(|val| decode::(&val).expect("decoding db value failed")); ArchiveDB { - overlay: ::new_memory_db(), + overlay: crate::new_memory_db(), backing, latest_era, column, @@ -97,7 +96,7 @@ impl HashDB for ArchiveDB { } } -impl ::traits::KeyedHashDB for ArchiveDB { +impl crate::traits::KeyedHashDB for ArchiveDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self .backing @@ -228,10 +227,10 @@ impl JournalDB for ArchiveDB { mod tests { use super::*; + use JournalDB; use ethcore_db::InMemoryWithMetrics; use hash_db::HashDB; use keccak::keccak; - use JournalDB; #[test] fn insert_same_in_fork() { diff --git a/crates/db/journaldb/src/as_hash_db_impls.rs b/crates/db/journaldb/src/as_hash_db_impls.rs index d9eef2d5bb..c426ecf5a3 100644 --- a/crates/db/journaldb/src/as_hash_db_impls.rs +++ b/crates/db/journaldb/src/as_hash_db_impls.rs @@ -15,15 +15,13 @@ // along with OpenEthereum. If not, see . //! Impls of the `AsHashDB` upcast trait for all different variants of DB -use crate::{AsKeyedHashDB, KeyedHashDB}; -use archivedb::ArchiveDB; -use earlymergedb::EarlyMergeDB; +use crate::{ + AsKeyedHashDB, KeyedHashDB, archivedb::ArchiveDB, earlymergedb::EarlyMergeDB, + overlaydb::OverlayDB, overlayrecentdb::OverlayRecentDB, refcounteddb::RefCountedDB, +}; use hash_db::{AsHashDB, HashDB}; use keccak_hasher::KeccakHasher; use kvdb::DBValue; -use overlaydb::OverlayDB; -use overlayrecentdb::OverlayRecentDB; -use refcounteddb::RefCountedDB; impl AsHashDB for ArchiveDB { fn as_hash_db(&self) -> &dyn HashDB { diff --git a/crates/db/journaldb/src/earlymergedb.rs b/crates/db/journaldb/src/earlymergedb.rs index 80b6e07a48..43a7a1905b 100644 --- a/crates/db/journaldb/src/earlymergedb.rs +++ b/crates/db/journaldb/src/earlymergedb.rs @@ -17,13 +17,17 @@ //! Disk-backed `HashDB` implementation. use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, hash_map::Entry}, io, sync::Arc, }; use super::{ - error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, LATEST_ERA_KEY, + LATEST_ERA_KEY, error_key_already_exists, error_negatively_reference_hash, traits::JournalDB, +}; +use crate::{ + DB_PREFIX_LEN, + util::{DatabaseKey, DatabaseValueRef, DatabaseValueView}, }; use bytes::Bytes; use ethcore_db::{DBTransaction, DBValue, KeyValueDB}; @@ -34,8 +38,6 @@ use memory_db::*; use parity_util_mem::MallocSizeOf; use parking_lot::RwLock; use rlp::{decode, encode}; -use util::{DatabaseKey, DatabaseValueRef, DatabaseValueView}; -use DB_PREFIX_LEN; #[derive(Debug, Clone, PartialEq, Eq, MallocSizeOf)] struct RefInfo { @@ -119,7 +121,7 @@ impl EarlyMergeDB { let (latest_era, refs) = EarlyMergeDB::read_refs(&*backing, col); let refs = Some(Arc::new(RwLock::new(refs))); EarlyMergeDB { - overlay: ::new_memory_db(), + overlay: crate::new_memory_db(), backing: backing, refs: refs, latest_era: latest_era, @@ -363,7 +365,7 @@ impl HashDB for EarlyMergeDB { } } -impl ::traits::KeyedHashDB for EarlyMergeDB { +impl crate::traits::KeyedHashDB for EarlyMergeDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self .backing diff --git a/crates/db/journaldb/src/lib.rs b/crates/db/journaldb/src/lib.rs index 4ac6da48c9..a00a7519be 100644 --- a/crates/db/journaldb/src/lib.rs +++ b/crates/db/journaldb/src/lib.rs @@ -20,7 +20,7 @@ extern crate log; extern crate ethcore_db; -extern crate ethereum_types; +use ethereum_types; extern crate fastmap; extern crate hash_db; extern crate keccak_hasher; diff --git a/crates/db/journaldb/src/overlaydb.rs b/crates/db/journaldb/src/overlaydb.rs index c11db5a3b4..3606ecd41c 100644 --- a/crates/db/journaldb/src/overlaydb.rs +++ b/crates/db/journaldb/src/overlaydb.rs @@ -17,7 +17,7 @@ //! Disk-backed `HashDB` implementation. use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{HashMap, hash_map::Entry}, io, sync::Arc, }; @@ -28,7 +28,7 @@ use ethereum_types::H256; use hash_db::HashDB; use keccak_hasher::KeccakHasher; use memory_db::*; -use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream, decode, encode}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay. /// @@ -79,7 +79,7 @@ impl OverlayDB { /// Create a new instance of OverlayDB given a `backing` database. pub fn new(backing: Arc, col: Option) -> OverlayDB { OverlayDB { - overlay: ::new_memory_db(), + overlay: crate::new_memory_db(), backing: backing, column: col, } diff --git a/crates/db/journaldb/src/overlayrecentdb.rs b/crates/db/journaldb/src/overlayrecentdb.rs index 60c88957c0..973c88acd1 100644 --- a/crates/db/journaldb/src/overlayrecentdb.rs +++ b/crates/db/journaldb/src/overlayrecentdb.rs @@ -17,12 +17,13 @@ //! `JournalDB` over in-memory overlay use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, hash_map::Entry}, io, sync::Arc, }; -use super::{error_negatively_reference_hash, JournalDB, DB_PREFIX_LEN, LATEST_ERA_KEY}; +use super::{DB_PREFIX_LEN, JournalDB, LATEST_ERA_KEY, error_negatively_reference_hash}; +use crate::util::DatabaseKey; use bytes::Bytes; use ethcore_db::{DBTransaction, DBValue, KeyValueDB}; use ethereum_types::H256; @@ -32,8 +33,7 @@ use keccak_hasher::KeccakHasher; use memory_db::*; use parity_util_mem::MallocSizeOf; use parking_lot::RwLock; -use rlp::{decode, encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; -use util::DatabaseKey; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream, decode, encode}; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. @@ -156,7 +156,7 @@ impl OverlayRecentDB { pub fn new(backing: Arc, col: Option) -> OverlayRecentDB { let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&*backing, col))); OverlayRecentDB { - transaction_overlay: ::new_memory_db(), + transaction_overlay: crate::new_memory_db(), backing: backing, journal_overlay: journal_overlay, column: col, @@ -182,7 +182,7 @@ impl OverlayRecentDB { fn read_overlay(db: &dyn KeyValueDB, col: Option) -> JournalOverlay { let mut journal = HashMap::new(); - let mut overlay = ::new_memory_db(); + let mut overlay = crate::new_memory_db(); let mut count = 0; let mut latest_era = None; let mut earliest_era = None; @@ -256,7 +256,7 @@ fn to_short_key(key: &H256) -> H256 { k } -impl ::traits::KeyedHashDB for OverlayRecentDB { +impl crate::traits::KeyedHashDB for OverlayRecentDB { fn keys(&self) -> HashMap { let mut ret: HashMap = self .backing @@ -569,9 +569,9 @@ impl HashDB for OverlayRecentDB { mod tests { use super::*; + use JournalDB; use hash_db::HashDB; use keccak::keccak; - use JournalDB; fn new_db() -> OverlayRecentDB { let backing = Arc::new(ethcore_db::InMemoryWithMetrics::create(0)); diff --git a/crates/db/journaldb/src/refcounteddb.rs b/crates/db/journaldb/src/refcounteddb.rs index 3ba75e7174..dd61c59a03 100644 --- a/crates/db/journaldb/src/refcounteddb.rs +++ b/crates/db/journaldb/src/refcounteddb.rs @@ -22,18 +22,20 @@ use std::{ sync::Arc, }; -use super::{traits::JournalDB, LATEST_ERA_KEY}; +use super::{LATEST_ERA_KEY, traits::JournalDB}; +use crate::{ + DB_PREFIX_LEN, + overlaydb::OverlayDB, + util::{DatabaseKey, DatabaseValueRef, DatabaseValueView}, +}; use bytes::Bytes; use ethcore_db::{DBTransaction, DBValue, KeyValueDB}; use ethereum_types::H256; use hash_db::HashDB; use keccak_hasher::KeccakHasher; use memory_db::MemoryDB; -use overlaydb::OverlayDB; -use parity_util_mem::{allocators::new_malloc_size_ops, MallocSizeOf}; +use parity_util_mem::{MallocSizeOf, allocators::new_malloc_size_ops}; use rlp::{decode, encode}; -use util::{DatabaseKey, DatabaseValueRef, DatabaseValueView}; -use DB_PREFIX_LEN; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -104,7 +106,7 @@ impl HashDB for RefCountedDB { } } -impl ::traits::KeyedHashDB for RefCountedDB { +impl crate::traits::KeyedHashDB for RefCountedDB { fn keys(&self) -> HashMap { self.forward.keys() } @@ -259,9 +261,9 @@ impl JournalDB for RefCountedDB { mod tests { use super::*; + use JournalDB; use hash_db::HashDB; use keccak::keccak; - use JournalDB; fn new_db() -> RefCountedDB { let backing = Arc::new(ethcore_db::InMemoryWithMetrics::create(0)); diff --git a/crates/db/journaldb/src/traits.rs b/crates/db/journaldb/src/traits.rs index 7e8edd8152..6325f5ffd4 100644 --- a/crates/db/journaldb/src/traits.rs +++ b/crates/db/journaldb/src/traits.rs @@ -70,7 +70,7 @@ pub trait JournalDB: KeyedHashDB { /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) - -> io::Result; + -> io::Result; /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. diff --git a/crates/db/memory-db/Cargo.toml b/crates/db/memory-db/Cargo.toml index 90c3cd8954..f19e4248a3 100644 --- a/crates/db/memory-db/Cargo.toml +++ b/crates/db/memory-db/Cargo.toml @@ -17,6 +17,7 @@ authors = ["Parity Technologies "] description = "In-memory implementation of hash-db, useful for tests" license = "Apache-2.0" repository = "https://github.com/paritytech/parity-common" +edition = "2024" [[bench]] name = "bench" diff --git a/crates/db/memory-db/benches/bench.rs b/crates/db/memory-db/benches/bench.rs index a0212f435d..769ff118d4 100644 --- a/crates/db/memory-db/benches/bench.rs +++ b/crates/db/memory-db/benches/bench.rs @@ -14,7 +14,7 @@ #[macro_use] extern crate criterion; -use criterion::{black_box, Criterion}; +use criterion::{Criterion, black_box}; criterion_group!( benches, instantiation, @@ -44,7 +44,7 @@ fn instantiation(b: &mut Criterion) { fn compare_to_null_embedded_in_struct(b: &mut Criterion) { struct X { a_hash: ::Out, - }; + } let x = X { a_hash: KeccakHasher::hash(&[0u8][..]), }; diff --git a/crates/db/memory-db/src/lib.rs b/crates/db/memory-db/src/lib.rs index 7baa58cabc..87f93a640a 100644 --- a/crates/db/memory-db/src/lib.rs +++ b/crates/db/memory-db/src/lib.rs @@ -23,7 +23,7 @@ extern crate keccak_hasher; use hash_db::{AsHashDB, AsPlainDB, HashDB, HashDBRef, Hasher as KeyHasher, PlainDB, PlainDBRef}; use parity_util_mem::MallocSizeOf; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{HashMap, hash_map::Entry}, hash, mem, }; diff --git a/crates/db/migration-rocksdb/Cargo.toml b/crates/db/migration-rocksdb/Cargo.toml index 025ed98cc9..85c8b29806 100644 --- a/crates/db/migration-rocksdb/Cargo.toml +++ b/crates/db/migration-rocksdb/Cargo.toml @@ -2,6 +2,7 @@ name = "migration-rocksdb" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] log = "0.4" diff --git a/crates/db/patricia-trie-ethereum/Cargo.toml b/crates/db/patricia-trie-ethereum/Cargo.toml index 1064bdb303..966fd548b0 100644 --- a/crates/db/patricia-trie-ethereum/Cargo.toml +++ b/crates/db/patricia-trie-ethereum/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] description = "Merkle-Patricia Trie (Ethereum Style)" license = "GPL-3.0" +edition = "2018" [dependencies] trie-db = "0.11.0" diff --git a/crates/db/patricia-trie-ethereum/src/lib.rs b/crates/db/patricia-trie-ethereum/src/lib.rs index d6828ee701..59cade7a9a 100644 --- a/crates/db/patricia-trie-ethereum/src/lib.rs +++ b/crates/db/patricia-trie-ethereum/src/lib.rs @@ -17,7 +17,7 @@ //! Façade crate for `patricia_trie` for Ethereum specific impls extern crate elastic_array; -extern crate ethereum_types; +use ethereum_types; extern crate hash_db; extern crate keccak_hasher; extern crate parity_bytes; @@ -47,7 +47,7 @@ pub type RlpCodec = RlpNodeCodec; /// extern crate hash_db; /// extern crate keccak_hasher; /// extern crate memory_db; -/// extern crate ethereum_types; +/// use ethereum_types; /// extern crate elastic_array; /// extern crate journaldb; /// @@ -92,7 +92,7 @@ pub type FatDB<'db> = trie::FatDB<'db, KeccakHasher, RlpCodec>; /// extern crate keccak_hash; /// extern crate keccak_hasher; /// extern crate memory_db; -/// extern crate ethereum_types; +/// use ethereum_types; /// extern crate elastic_array; /// extern crate journaldb; /// diff --git a/crates/ethcore/Cargo.toml b/crates/ethcore/Cargo.toml index 58f97beb9b..f7cfcd6fb9 100644 --- a/crates/ethcore/Cargo.toml +++ b/crates/ethcore/Cargo.toml @@ -1,10 +1,11 @@ [package] -description = "OpenEthereum (EthCore) Library" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node (EthCore) Library" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] ansi_term = "0.10" @@ -17,7 +18,7 @@ eip-152 = { version = "0.1", path = "../util/EIP-152" } env_logger = { version = "0.5", optional = true } error-chain = { version = "0.12", default-features = false } ethabi = "12.0.0" -ethabi-contract = "11.0.0" +ethabi-contract = "16.0.0" ethabi-derive = { git = 'https://github.com/rimrakhimov/ethabi', branch = 'rimrakhimov/remove-syn-export-span' } ethash = { path = "../concensus/ethash" } ethcore-blockchain = { path = "./blockchain" } @@ -34,8 +35,8 @@ ethkey = { path = "../accounts/ethkey" } evm = { path = "../vm/evm" } globset = "0.4" hash-db = "0.11.0" -hbbft = { git = "https://github.com/surfingnerd/hbbft", rev = "cf0c45aa669b9c10abab1a0f4f2b33595879b60b" } -hbbft_testing = { git = "https://github.com/poanetwork/hbbft" } +hbbft = { git = "https://github.com/DMDcoin/hbbft.git", rev = "4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074" } +hbbft_testing = { git = "https://github.com/DMDcoin/hbbft.git", rev = "4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074" } hex_fmt = "0.3.0" itertools = "0.5" journaldb = { path = "../db/journaldb" } @@ -57,7 +58,7 @@ parity-bytes = "0.1" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } parity-snappy = "0.1" parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" trie-db = "0.11.0" patricia-trie-ethereum = { path = "../db/patricia-trie-ethereum" } rand_065 = { package = "rand", version = "0.6.5" } @@ -67,7 +68,7 @@ rayon = "1.1" regex = "1.3.9" rmp-serde = "1.1.0" rlp = { version = "0.4.6" } -rlp_derive = { path = "../util/rlp-derive" } +rlp-derive = { version = "0.2" } rustc-hex = "1.0" serde = "1.0" serde_derive = "1.0" @@ -87,6 +88,7 @@ walkdir = "2.3" wasm = { path = "../vm/wasm" } derive_more = "0.99" scopeguard = "1.1.0" +fastmap = { path = "../util/fastmap"} [dev-dependencies] blooms-db = { path = "../db/blooms-db" } diff --git a/crates/ethcore/benches/builtin.rs b/crates/ethcore/benches/builtin.rs index 47c6cd568f..9c51235111 100644 --- a/crates/ethcore/benches/builtin.rs +++ b/crates/ethcore/benches/builtin.rs @@ -21,7 +21,7 @@ extern crate criterion; extern crate lazy_static; extern crate ethcore; extern crate ethcore_builtin; -extern crate ethereum_types; +use ethereum_types; extern crate parity_bytes as bytes; extern crate rustc_hex; @@ -50,8 +50,7 @@ impl<'a> BuiltinBenchmark<'a> { let builtin = builtins .get(&H160::from_str(builtin_address).unwrap()) - .unwrap() - .clone(); + .unwrap(); let input = FromHex::from_hex(input).unwrap(); let expected = FromHex::from_hex(expected).unwrap(); @@ -144,492 +143,492 @@ criterion_main!(builtin); fn ecrecover(b: &mut Criterion) { bench( - "ecrecover", - "0000000000000000000000000000000000000001", // ecrecover - "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", - "000000000000000000000000ceaccac640adf55b2028469bd36ba501f28b699d", - b, - ); + "ecrecover", + "0000000000000000000000000000000000000001", // ecrecover + "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", + "000000000000000000000000ceaccac640adf55b2028469bd36ba501f28b699d", + b, + ); } fn sha256(b: &mut Criterion) { bench( - "sha256", - "0000000000000000000000000000000000000002", // sha256 - "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", - "811c7003375852fabd0d362e40e68607a12bdabae61a7d068fe5fdd1dbbf2a5d", - b, - ); + "sha256", + "0000000000000000000000000000000000000002", // sha256 + "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", + "811c7003375852fabd0d362e40e68607a12bdabae61a7d068fe5fdd1dbbf2a5d", + b, + ); } fn ripemd(b: &mut Criterion) { bench( - "ripemd", - "0000000000000000000000000000000000000003", // ripemd - "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", - "0000000000000000000000009215b8d9882ff46f0dfde6684d78e831467f65e6", - b, - ); + "ripemd", + "0000000000000000000000000000000000000003", // ripemd + "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", + "0000000000000000000000009215b8d9882ff46f0dfde6684d78e831467f65e6", + b, + ); } fn identity(b: &mut Criterion) { bench( - "identity", - "0000000000000000000000000000000000000004", // identity - "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", - "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", - b, - ); + "identity", + "0000000000000000000000000000000000000004", // identity + "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", + "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02", + b, + ); } fn modexp_eip_example1(b: &mut Criterion) { bench( - "modexp_eip_example1", - "0000000000000000000000000000000000000005", // modexp - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002003fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "modexp_eip_example1", + "0000000000000000000000000000000000000005", // modexp + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002003fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn modexp_eip_example2(b: &mut Criterion) { bench( - "modexp_eip_example2", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - "0000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "modexp_eip_example2", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "0000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn modexp_nagydani_1_square(b: &mut Criterion) { bench( - "modexp_nagydani_1_square", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", - "60008f1614cc01dcfb6bfb09c625cf90b47d4468db81b5f8b7a39d42f332eab9b2da8f2d95311648a8f243f4bb13cfb3d8f7f2a3c014122ebb3ed41b02783adc", - b, - ); + "modexp_nagydani_1_square", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", + "60008f1614cc01dcfb6bfb09c625cf90b47d4468db81b5f8b7a39d42f332eab9b2da8f2d95311648a8f243f4bb13cfb3d8f7f2a3c014122ebb3ed41b02783adc", + b, + ); } fn modexp_nagydani_1_qube(b: &mut Criterion) { bench( - "modexp_nagydani_1_qube", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", - "4834a46ba565db27903b1c720c9d593e84e4cbd6ad2e64b31885d944f68cd801f92225a8961c952ddf2797fa4701b330c85c4b363798100b921a1a22a46a7fec", - b, - ); + "modexp_nagydani_1_qube", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", + "4834a46ba565db27903b1c720c9d593e84e4cbd6ad2e64b31885d944f68cd801f92225a8961c952ddf2797fa4701b330c85c4b363798100b921a1a22a46a7fec", + b, + ); } fn modexp_nagydani_1_pow0x10001(b: &mut Criterion) { bench( - "modexp_nagydani_1_pow0x10001", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", - "c36d804180c35d4426b57b50c5bfcca5c01856d104564cd513b461d3c8b8409128a5573e416d0ebe38f5f736766d9dc27143e4da981dfa4d67f7dc474cbee6d2", - b, - ); + "modexp_nagydani_1_pow0x10001", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", + "c36d804180c35d4426b57b50c5bfcca5c01856d104564cd513b461d3c8b8409128a5573e416d0ebe38f5f736766d9dc27143e4da981dfa4d67f7dc474cbee6d2", + b, + ); } fn modexp_nagydani_2_square(b: &mut Criterion) { bench( - "modexp_nagydani_2_square", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", - "981dd99c3b113fae3e3eaa9435c0dc96779a23c12a53d1084b4f67b0b053a27560f627b873e3f16ad78f28c94f14b6392def26e4d8896c5e3c984e50fa0b3aa44f1da78b913187c6128baa9340b1e9c9a0fd02cb78885e72576da4a8f7e5a113e173a7a2889fde9d407bd9f06eb05bc8fc7b4229377a32941a02bf4edcc06d70", - b, - ); + "modexp_nagydani_2_square", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", + "981dd99c3b113fae3e3eaa9435c0dc96779a23c12a53d1084b4f67b0b053a27560f627b873e3f16ad78f28c94f14b6392def26e4d8896c5e3c984e50fa0b3aa44f1da78b913187c6128baa9340b1e9c9a0fd02cb78885e72576da4a8f7e5a113e173a7a2889fde9d407bd9f06eb05bc8fc7b4229377a32941a02bf4edcc06d70", + b, + ); } fn modexp_nagydani_2_qube(b: &mut Criterion) { bench( - "modexp_nagydani_2_qube", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", - "d89ceb68c32da4f6364978d62aaa40d7b09b59ec61eb3c0159c87ec3a91037f7dc6967594e530a69d049b64adfa39c8fa208ea970cfe4b7bcd359d345744405afe1cbf761647e32b3184c7fbe87cee8c6c7ff3b378faba6c68b83b6889cb40f1603ee68c56b4c03d48c595c826c041112dc941878f8c5be828154afd4a16311f", - b, - ); + "modexp_nagydani_2_qube", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", + "d89ceb68c32da4f6364978d62aaa40d7b09b59ec61eb3c0159c87ec3a91037f7dc6967594e530a69d049b64adfa39c8fa208ea970cfe4b7bcd359d345744405afe1cbf761647e32b3184c7fbe87cee8c6c7ff3b378faba6c68b83b6889cb40f1603ee68c56b4c03d48c595c826c041112dc941878f8c5be828154afd4a16311f", + b, + ); } fn modexp_nagydani_2_pow0x10001(b: &mut Criterion) { bench( - "modexp_nagydani_2_pow0x10001", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", - "ad85e8ef13fd1dd46eae44af8b91ad1ccae5b7a1c92944f92a19f21b0b658139e0cabe9c1f679507c2de354bf2c91ebd965d1e633978a830d517d2f6f8dd5fd58065d58559de7e2334a878f8ec6992d9b9e77430d4764e863d77c0f87beede8f2f7f2ab2e7222f85cc9d98b8467f4bb72e87ef2882423ebdb6daf02dddac6db2", - b, - ); + "modexp_nagydani_2_pow0x10001", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", + "ad85e8ef13fd1dd46eae44af8b91ad1ccae5b7a1c92944f92a19f21b0b658139e0cabe9c1f679507c2de354bf2c91ebd965d1e633978a830d517d2f6f8dd5fd58065d58559de7e2334a878f8ec6992d9b9e77430d4764e863d77c0f87beede8f2f7f2ab2e7222f85cc9d98b8467f4bb72e87ef2882423ebdb6daf02dddac6db2", + b, + ); } fn modexp_nagydani_3_square(b: &mut Criterion) { bench( - "modexp_nagydani_3_square", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", - "affc7507ea6d84751ec6b3f0d7b99dbcc263f33330e450d1b3ff0bc3d0874320bf4edd57debd587306988157958cb3cfd369cc0c9c198706f635c9e0f15d047df5cb44d03e2727f26b083c4ad8485080e1293f171c1ed52aef5993a5815c35108e848c951cf1e334490b4a539a139e57b68f44fee583306f5b85ffa57206b3ee5660458858534e5386b9584af3c7f67806e84c189d695e5eb96e1272d06ec2df5dc5fabc6e94b793718c60c36be0a4d031fc84cd658aa72294b2e16fc240aef70cb9e591248e38bd49c5a554d1afa01f38dab72733092f7555334bbef6c8c430119840492380aa95fa025dcf699f0a39669d812b0c6946b6091e6e235337b6f8", - b, - ); + "modexp_nagydani_3_square", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", + "affc7507ea6d84751ec6b3f0d7b99dbcc263f33330e450d1b3ff0bc3d0874320bf4edd57debd587306988157958cb3cfd369cc0c9c198706f635c9e0f15d047df5cb44d03e2727f26b083c4ad8485080e1293f171c1ed52aef5993a5815c35108e848c951cf1e334490b4a539a139e57b68f44fee583306f5b85ffa57206b3ee5660458858534e5386b9584af3c7f67806e84c189d695e5eb96e1272d06ec2df5dc5fabc6e94b793718c60c36be0a4d031fc84cd658aa72294b2e16fc240aef70cb9e591248e38bd49c5a554d1afa01f38dab72733092f7555334bbef6c8c430119840492380aa95fa025dcf699f0a39669d812b0c6946b6091e6e235337b6f8", + b, + ); } fn modexp_nagydani_3_qube(b: &mut Criterion) { bench( - "modexp_nagydani_3_qube", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", - "1b280ecd6a6bf906b806d527c2a831e23b238f89da48449003a88ac3ac7150d6a5e9e6b3be4054c7da11dd1e470ec29a606f5115801b5bf53bc1900271d7c3ff3cd5ed790d1c219a9800437a689f2388ba1a11d68f6a8e5b74e9a3b1fac6ee85fc6afbac599f93c391f5dc82a759e3c6c0ab45ce3f5d25d9b0c1bf94cf701ea6466fc9a478dacc5754e593172b5111eeba88557048bceae401337cd4c1182ad9f700852bc8c99933a193f0b94cf1aedbefc48be3bc93ef5cb276d7c2d5462ac8bb0c8fe8923a1db2afe1c6b90d59c534994a6a633f0ead1d638fdc293486bb634ff2c8ec9e7297c04241a61c37e3ae95b11d53343d4ba2b4cc33d2cfa7eb705e", - b, - ); + "modexp_nagydani_3_qube", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", + "1b280ecd6a6bf906b806d527c2a831e23b238f89da48449003a88ac3ac7150d6a5e9e6b3be4054c7da11dd1e470ec29a606f5115801b5bf53bc1900271d7c3ff3cd5ed790d1c219a9800437a689f2388ba1a11d68f6a8e5b74e9a3b1fac6ee85fc6afbac599f93c391f5dc82a759e3c6c0ab45ce3f5d25d9b0c1bf94cf701ea6466fc9a478dacc5754e593172b5111eeba88557048bceae401337cd4c1182ad9f700852bc8c99933a193f0b94cf1aedbefc48be3bc93ef5cb276d7c2d5462ac8bb0c8fe8923a1db2afe1c6b90d59c534994a6a633f0ead1d638fdc293486bb634ff2c8ec9e7297c04241a61c37e3ae95b11d53343d4ba2b4cc33d2cfa7eb705e", + b, + ); } fn modexp_nagydani_3_pow0x10001(b: &mut Criterion) { bench( - "modexp_nagydani_3_pow0x10001", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", - "37843d7c67920b5f177372fa56e2a09117df585f81df8b300fba245b1175f488c99476019857198ed459ed8d9799c377330e49f4180c4bf8e8f66240c64f65ede93d601f957b95b83efdee1e1bfde74169ff77002eaf078c71815a9220c80b2e3b3ff22c2f358111d816ebf83c2999026b6de50bfc711ff68705d2f40b753424aefc9f70f08d908b5a20276ad613b4ab4309a3ea72f0c17ea9df6b3367d44fb3acab11c333909e02e81ea2ed404a712d3ea96bba87461720e2d98723e7acd0520ac1a5212dbedcd8dc0c1abf61d4719e319ff4758a774790b8d463cdfe131d1b2dcfee52d002694e98e720cb6ae7ccea353bc503269ba35f0f63bf8d7b672a76", - b, - ); + "modexp_nagydani_3_pow0x10001", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", + "37843d7c67920b5f177372fa56e2a09117df585f81df8b300fba245b1175f488c99476019857198ed459ed8d9799c377330e49f4180c4bf8e8f66240c64f65ede93d601f957b95b83efdee1e1bfde74169ff77002eaf078c71815a9220c80b2e3b3ff22c2f358111d816ebf83c2999026b6de50bfc711ff68705d2f40b753424aefc9f70f08d908b5a20276ad613b4ab4309a3ea72f0c17ea9df6b3367d44fb3acab11c333909e02e81ea2ed404a712d3ea96bba87461720e2d98723e7acd0520ac1a5212dbedcd8dc0c1abf61d4719e319ff4758a774790b8d463cdfe131d1b2dcfee52d002694e98e720cb6ae7ccea353bc503269ba35f0f63bf8d7b672a76", + b, + ); } fn modexp_nagydani_4_square(b: &mut Criterion) { bench( - "modexp_nagydani_4_square", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", - "8a5aea5f50dcc03dc7a7a272b5aeebc040554dbc1ffe36753c4fc75f7ed5f6c2cc0de3a922bf96c78bf0643a73025ad21f45a4a5cadd717612c511ab2bff1190fe5f1ae05ba9f8fe3624de1de2a817da6072ddcdb933b50216811dbe6a9ca79d3a3c6b3a476b079fd0d05f04fb154e2dd3e5cb83b148a006f2bcbf0042efb2ae7b916ea81b27aac25c3bf9a8b6d35440062ad8eae34a83f3ffa2cc7b40346b62174a4422584f72f95316f6b2bee9ff232ba9739301c97c99a9ded26c45d72676eb856ad6ecc81d36a6de36d7f9dafafee11baa43a4b0d5e4ecffa7b9b7dcefd58c397dd373e6db4acd2b2c02717712e6289bed7c813b670c4a0c6735aa7f3b0f1ce556eae9fcc94b501b2c8781ba50a8c6220e8246371c3c7359fe4ef9da786ca7d98256754ca4e496be0a9174bedbecb384bdf470779186d6a833f068d2838a88d90ef3ad48ff963b67c39cc5a3ee123baf7bf3125f64e77af7f30e105d72c4b9b5b237ed251e4c122c6d8c1405e736299c3afd6db16a28c6a9cfa68241e53de4cd388271fe534a6a9b0dbea6171d170db1b89858468885d08fecbd54c8e471c3e25d48e97ba450b96d0d87e00ac732aaa0d3ce4309c1064bd8a4c0808a97e0143e43a24cfa847635125cd41c13e0574487963e9d725c01375db99c31da67b4cf65eff555f0c0ac416c727ff8d438ad7c42030551d68c2e7adda0abb1ca7c10", - b, - ); + "modexp_nagydani_4_square", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", + "8a5aea5f50dcc03dc7a7a272b5aeebc040554dbc1ffe36753c4fc75f7ed5f6c2cc0de3a922bf96c78bf0643a73025ad21f45a4a5cadd717612c511ab2bff1190fe5f1ae05ba9f8fe3624de1de2a817da6072ddcdb933b50216811dbe6a9ca79d3a3c6b3a476b079fd0d05f04fb154e2dd3e5cb83b148a006f2bcbf0042efb2ae7b916ea81b27aac25c3bf9a8b6d35440062ad8eae34a83f3ffa2cc7b40346b62174a4422584f72f95316f6b2bee9ff232ba9739301c97c99a9ded26c45d72676eb856ad6ecc81d36a6de36d7f9dafafee11baa43a4b0d5e4ecffa7b9b7dcefd58c397dd373e6db4acd2b2c02717712e6289bed7c813b670c4a0c6735aa7f3b0f1ce556eae9fcc94b501b2c8781ba50a8c6220e8246371c3c7359fe4ef9da786ca7d98256754ca4e496be0a9174bedbecb384bdf470779186d6a833f068d2838a88d90ef3ad48ff963b67c39cc5a3ee123baf7bf3125f64e77af7f30e105d72c4b9b5b237ed251e4c122c6d8c1405e736299c3afd6db16a28c6a9cfa68241e53de4cd388271fe534a6a9b0dbea6171d170db1b89858468885d08fecbd54c8e471c3e25d48e97ba450b96d0d87e00ac732aaa0d3ce4309c1064bd8a4c0808a97e0143e43a24cfa847635125cd41c13e0574487963e9d725c01375db99c31da67b4cf65eff555f0c0ac416c727ff8d438ad7c42030551d68c2e7adda0abb1ca7c10", + b, + ); } fn modexp_nagydani_4_qube(b: &mut Criterion) { bench( - "modexp_nagydani_4_qube", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", - "5a2664252aba2d6e19d9600da582cdd1f09d7a890ac48e6b8da15ae7c6ff1856fc67a841ac2314d283ffa3ca81a0ecf7c27d89ef91a5a893297928f5da0245c99645676b481b7e20a566ee6a4f2481942bee191deec5544600bb2441fd0fb19e2ee7d801ad8911c6b7750affec367a4b29a22942c0f5f4744a4e77a8b654da2a82571037099e9c6d930794efe5cdca73c7b6c0844e386bdca8ea01b3d7807146bb81365e2cdc6475f8c23e0ff84463126189dc9789f72bbce2e3d2d114d728a272f1345122de23df54c922ec7a16e5c2a8f84da8871482bd258c20a7c09bbcd64c7a96a51029bbfe848736a6ba7bf9d931a9b7de0bcaf3635034d4958b20ae9ab3a95a147b0421dd5f7ebff46c971010ebfc4adbbe0ad94d5498c853e7142c450d8c71de4b2f84edbf8acd2e16d00c8115b150b1c30e553dbb82635e781379fe2a56360420ff7e9f70cc64c00aba7e26ed13c7c19622865ae07248daced36416080f35f8cc157a857ed70ea4f347f17d1bee80fa038abd6e39b1ba06b97264388b21364f7c56e192d4b62d9b161405f32ab1e2594e86243e56fcf2cb30d21adef15b9940f91af681da24328c883d892670c6aa47940867a81830a82b82716895db810df1b834640abefb7db2092dd92912cb9a735175bc447be40a503cf22dfe565b4ed7a3293ca0dfd63a507430b323ee248ec82e843b673c97ad730728cebc", - b, - ); + "modexp_nagydani_4_qube", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", + "5a2664252aba2d6e19d9600da582cdd1f09d7a890ac48e6b8da15ae7c6ff1856fc67a841ac2314d283ffa3ca81a0ecf7c27d89ef91a5a893297928f5da0245c99645676b481b7e20a566ee6a4f2481942bee191deec5544600bb2441fd0fb19e2ee7d801ad8911c6b7750affec367a4b29a22942c0f5f4744a4e77a8b654da2a82571037099e9c6d930794efe5cdca73c7b6c0844e386bdca8ea01b3d7807146bb81365e2cdc6475f8c23e0ff84463126189dc9789f72bbce2e3d2d114d728a272f1345122de23df54c922ec7a16e5c2a8f84da8871482bd258c20a7c09bbcd64c7a96a51029bbfe848736a6ba7bf9d931a9b7de0bcaf3635034d4958b20ae9ab3a95a147b0421dd5f7ebff46c971010ebfc4adbbe0ad94d5498c853e7142c450d8c71de4b2f84edbf8acd2e16d00c8115b150b1c30e553dbb82635e781379fe2a56360420ff7e9f70cc64c00aba7e26ed13c7c19622865ae07248daced36416080f35f8cc157a857ed70ea4f347f17d1bee80fa038abd6e39b1ba06b97264388b21364f7c56e192d4b62d9b161405f32ab1e2594e86243e56fcf2cb30d21adef15b9940f91af681da24328c883d892670c6aa47940867a81830a82b82716895db810df1b834640abefb7db2092dd92912cb9a735175bc447be40a503cf22dfe565b4ed7a3293ca0dfd63a507430b323ee248ec82e843b673c97ad730728cebc", + b, + ); } fn modexp_nagydani_4_pow0x10001(b: &mut Criterion) { bench( - "modexp_nagydani_4_pow0x10001", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", - "bed8b970c4a34849fc6926b08e40e20b21c15ed68d18f228904878d4370b56322d0da5789da0318768a374758e6375bfe4641fca5285ec7171828922160f48f5ca7efbfee4d5148612c38ad683ae4e3c3a053d2b7c098cf2b34f2cb19146eadd53c86b2d7ccf3d83b2c370bfb840913ee3879b1057a6b4e07e110b6bcd5e958bc71a14798c91d518cc70abee264b0d25a4110962a764b364ac0b0dd1ee8abc8426d775ec0f22b7e47b32576afaf1b5a48f64573ed1c5c29f50ab412188d9685307323d990802b81dacc06c6e05a1e901830ba9fcc67688dc29c5e27bde0a6e845ca925f5454b6fb3747edfaa2a5820838fb759eadf57f7cb5cec57fc213ddd8a4298fa079c3c0f472b07fb15aa6a7f0a3780bd296ff6a62e58ef443870b02260bd4fd2bbc98255674b8e1f1f9f8d33c7170b0ebbea4523b695911abbf26e41885344823bd0587115fdd83b721a4e8457a31c9a84b3d3520a07e0e35df7f48e5a9d534d0ec7feef1ff74de6a11e7f93eab95175b6ce22c68d78a642ad642837897ec11349205d8593ac19300207572c38d29ca5dfa03bc14cdbc32153c80e5cc3e739403d34c75915e49beb43094cc6dcafb3665b305ddec9286934ae66ec6b777ca528728c851318eb0f207b39f1caaf96db6eeead6b55ed08f451939314577d42bcc9f97c0b52d0234f88fd07e4c1d7780fdebc025cfffcb572cb27a8c33963", - b, - ); + "modexp_nagydani_4_pow0x10001", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", + "bed8b970c4a34849fc6926b08e40e20b21c15ed68d18f228904878d4370b56322d0da5789da0318768a374758e6375bfe4641fca5285ec7171828922160f48f5ca7efbfee4d5148612c38ad683ae4e3c3a053d2b7c098cf2b34f2cb19146eadd53c86b2d7ccf3d83b2c370bfb840913ee3879b1057a6b4e07e110b6bcd5e958bc71a14798c91d518cc70abee264b0d25a4110962a764b364ac0b0dd1ee8abc8426d775ec0f22b7e47b32576afaf1b5a48f64573ed1c5c29f50ab412188d9685307323d990802b81dacc06c6e05a1e901830ba9fcc67688dc29c5e27bde0a6e845ca925f5454b6fb3747edfaa2a5820838fb759eadf57f7cb5cec57fc213ddd8a4298fa079c3c0f472b07fb15aa6a7f0a3780bd296ff6a62e58ef443870b02260bd4fd2bbc98255674b8e1f1f9f8d33c7170b0ebbea4523b695911abbf26e41885344823bd0587115fdd83b721a4e8457a31c9a84b3d3520a07e0e35df7f48e5a9d534d0ec7feef1ff74de6a11e7f93eab95175b6ce22c68d78a642ad642837897ec11349205d8593ac19300207572c38d29ca5dfa03bc14cdbc32153c80e5cc3e739403d34c75915e49beb43094cc6dcafb3665b305ddec9286934ae66ec6b777ca528728c851318eb0f207b39f1caaf96db6eeead6b55ed08f451939314577d42bcc9f97c0b52d0234f88fd07e4c1d7780fdebc025cfffcb572cb27a8c33963", + b, + ); } fn modexp_nagydani_5_square(b: &mut Criterion) { bench( - "modexp_nagydani_5_square", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", - "d61fe4e3f32ac260915b5b03b78a86d11bfc41d973fce5b0cc59035cf8289a8a2e3878ea15fa46565b0d806e2f85b53873ea20ed653869b688adf83f3ef444535bf91598ff7e80f334fb782539b92f39f55310cc4b35349ab7b278346eda9bc37c0d8acd3557fae38197f412f8d9e57ce6a76b7205c23564cab06e5615be7c6f05c3d05ec690cba91da5e89d55b152ff8dd2157dc5458190025cf94b1ad98f7cbe64e9482faba95e6b33844afc640892872b44a9932096508f4a782a4805323808f23e54b6ff9b841dbfa87db3505ae4f687972c18ea0f0d0af89d36c1c2a5b14560c153c3fee406f5cf15cfd1c0bb45d767426d465f2f14c158495069d0c5955a00150707862ecaae30624ebacdd8ac33e4e6aab3ff90b6ba445a84689386b9e945d01823a65874444316e83767290fcff630d2477f49d5d8ffdd200e08ee1274270f86ed14c687895f6caf5ce528bd970c20d2408a9ba66216324c6a011ac4999098362dbd98a038129a2d40c8da6ab88318aa3046cb660327cc44236d9e5d2163bd0959062195c51ed93d0088b6f92051fc99050ece2538749165976233697ab4b610385366e5ce0b02ad6b61c168ecfbedcdf74278a38de340fd7a5fead8e588e294795f9b011e2e60377a89e25c90e145397cdeabc60fd32444a6b7642a611a83c464d8b8976666351b4865c37b02e6dc21dbcdf5f930341707b618cc0f03c3122646b3385c9df9f2ec730eec9d49e7dfc9153b6e6289da8c4f0ebea9ccc1b751948e3bb7171c9e4d57423b0eeeb79095c030cb52677b3f7e0b45c30f645391f3f9c957afa549c4e0b2465b03c67993cd200b1af01035962edbc4c9e89b31c82ac121987d6529dafdeef67a132dc04b6dc68e77f22862040b75e2ceb9ff16da0fca534e6db7bd12fa7b7f51b6c08c1e23dfcdb7acbd2da0b51c87ffbced065a612e9b1c8bba9b7e2d8d7a2f04fcc4aaf355b60d764879a76b5e16762d5f2f55d585d0c8e82df6940960cddfb72c91dfa71f6b4e1c6ca25dfc39a878e998a663c04fe29d5e83b9586d047b4d7ff70a9f0d44f127e7d741685ca75f11629128d916a0ffef4be586a30c4b70389cc746e84ebf177c01ee8a4511cfbb9d1ecf7f7b33c7dd8177896e10bbc82f838dcd6db7ac67de62bf46b6a640fb580c5d1d2708f3862e3d2b645d0d18e49ef088053e3a220adc0e033c2afcfe61c90e32151152eb3caaf746c5e377d541cafc6cbb0cc0fa48b5caf1728f2e1957f5addfc234f1a9d89e40d49356c9172d0561a695fce6dab1d412321bbf407f63766ffd7b6b3d79bcfa07991c5a9709849c1008689e3b47c50d613980bec239fb64185249d055b30375ccb4354d71fe4d05648fbf6c80634dfc3575f2f24abb714c1e4c95e8896763bf4316e954c7ad19e5780ab7a040ca6fb9271f90a8b22ae738daf6cb", - b, - ); + "modexp_nagydani_5_square", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", + "d61fe4e3f32ac260915b5b03b78a86d11bfc41d973fce5b0cc59035cf8289a8a2e3878ea15fa46565b0d806e2f85b53873ea20ed653869b688adf83f3ef444535bf91598ff7e80f334fb782539b92f39f55310cc4b35349ab7b278346eda9bc37c0d8acd3557fae38197f412f8d9e57ce6a76b7205c23564cab06e5615be7c6f05c3d05ec690cba91da5e89d55b152ff8dd2157dc5458190025cf94b1ad98f7cbe64e9482faba95e6b33844afc640892872b44a9932096508f4a782a4805323808f23e54b6ff9b841dbfa87db3505ae4f687972c18ea0f0d0af89d36c1c2a5b14560c153c3fee406f5cf15cfd1c0bb45d767426d465f2f14c158495069d0c5955a00150707862ecaae30624ebacdd8ac33e4e6aab3ff90b6ba445a84689386b9e945d01823a65874444316e83767290fcff630d2477f49d5d8ffdd200e08ee1274270f86ed14c687895f6caf5ce528bd970c20d2408a9ba66216324c6a011ac4999098362dbd98a038129a2d40c8da6ab88318aa3046cb660327cc44236d9e5d2163bd0959062195c51ed93d0088b6f92051fc99050ece2538749165976233697ab4b610385366e5ce0b02ad6b61c168ecfbedcdf74278a38de340fd7a5fead8e588e294795f9b011e2e60377a89e25c90e145397cdeabc60fd32444a6b7642a611a83c464d8b8976666351b4865c37b02e6dc21dbcdf5f930341707b618cc0f03c3122646b3385c9df9f2ec730eec9d49e7dfc9153b6e6289da8c4f0ebea9ccc1b751948e3bb7171c9e4d57423b0eeeb79095c030cb52677b3f7e0b45c30f645391f3f9c957afa549c4e0b2465b03c67993cd200b1af01035962edbc4c9e89b31c82ac121987d6529dafdeef67a132dc04b6dc68e77f22862040b75e2ceb9ff16da0fca534e6db7bd12fa7b7f51b6c08c1e23dfcdb7acbd2da0b51c87ffbced065a612e9b1c8bba9b7e2d8d7a2f04fcc4aaf355b60d764879a76b5e16762d5f2f55d585d0c8e82df6940960cddfb72c91dfa71f6b4e1c6ca25dfc39a878e998a663c04fe29d5e83b9586d047b4d7ff70a9f0d44f127e7d741685ca75f11629128d916a0ffef4be586a30c4b70389cc746e84ebf177c01ee8a4511cfbb9d1ecf7f7b33c7dd8177896e10bbc82f838dcd6db7ac67de62bf46b6a640fb580c5d1d2708f3862e3d2b645d0d18e49ef088053e3a220adc0e033c2afcfe61c90e32151152eb3caaf746c5e377d541cafc6cbb0cc0fa48b5caf1728f2e1957f5addfc234f1a9d89e40d49356c9172d0561a695fce6dab1d412321bbf407f63766ffd7b6b3d79bcfa07991c5a9709849c1008689e3b47c50d613980bec239fb64185249d055b30375ccb4354d71fe4d05648fbf6c80634dfc3575f2f24abb714c1e4c95e8896763bf4316e954c7ad19e5780ab7a040ca6fb9271f90a8b22ae738daf6cb", + b, + ); } fn modexp_nagydani_5_qube(b: &mut Criterion) { bench( - "modexp_nagydani_5_qube", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", - "5f9c70ec884926a89461056ad20ac4c30155e817f807e4d3f5bb743d789c83386762435c3627773fa77da5144451f2a8aad8adba88e0b669f5377c5e9bad70e45c86fe952b613f015a9953b8a5de5eaee4566acf98d41e327d93a35bd5cef4607d025e58951167957df4ff9b1627649d3943805472e5e293d3efb687cfd1e503faafeb2840a3e3b3f85d016051a58e1c9498aab72e63b748d834b31eb05d85dcde65e27834e266b85c75cc4ec0135135e0601cb93eeeb6e0010c8ceb65c4c319623c5e573a2c8c9fbbf7df68a930beb412d3f4dfd146175484f45d7afaa0d2e60684af9b34730f7c8438465ad3e1d0c3237336722f2aa51095bd5759f4b8ab4dda111b684aa3dac62a761722e7ae43495b7709933512c81c4e3c9133a51f7ce9f2b51fcec064f65779666960b4e45df3900f54311f5613e8012dd1b8efd359eda31a778264c72aa8bb419d862734d769076bce2810011989a45374e5c5d8729fec21427f0bf397eacbb4220f603cf463a4b0c94efd858ffd9768cd60d6ce68d755e0fbad007ce5c2223d70c7018345a102e4ab3c60a13a9e7794303156d4c2063e919f2153c13961fb324c80b240742f47773a7a8e25b3e3fb19b00ce839346c6eb3c732fbc6b888df0b1fe0a3d07b053a2e9402c267b2d62f794d8a2840526e3ade15ce2264496ccd7519571dfde47f7a4bb16292241c20b2be59f3f8fb4f6383f232d838c5a22d8c95b6834d9d2ca493f5a505ebe8899503b0e8f9b19e6e2dd81c1628b80016d02097e0134de51054c4e7674824d4d758760fc52377d2cad145e259aa2ffaf54139e1a66b1e0c1c191e32ac59474c6b526f5b3ba07d3e5ec286eddf531fcd5292869be58c9f22ef91026159f7cf9d05ef66b4299f4da48cc1635bf2243051d342d378a22c83390553e873713c0454ce5f3234397111ac3fe3207b86f0ed9fc025c81903e1748103692074f83824fda6341be4f95ff00b0a9a208c267e12fa01825054cc0513629bf3dbb56dc5b90d4316f87654a8be18227978ea0a8a522760cad620d0d14fd38920fb7321314062914275a5f99f677145a6979b156bd82ecd36f23f8e1273cc2759ecc0b2c69d94dad5211d1bed939dd87ed9e07b91d49713a6e16ade0a98aea789f04994e318e4ff2c8a188cd8d43aeb52c6daa3bc29b4af50ea82a247c5cd67b573b34cbadcc0a376d3bbd530d50367b42705d870f2e27a8197ef46070528bfe408360faa2ebb8bf76e9f388572842bcb119f4d84ee34ae31f5cc594f23705a49197b181fb78ed1ec99499c690f843a4d0cf2e226d118e9372271054fbabdcc5c92ae9fefaef0589cd0e722eaf30c1703ec4289c7fd81beaa8a455ccee5298e31e2080c10c366a6fcf56f7d13582ad0bcad037c612b710fc595b70fbefaaca23623b60c6c39b11beb8e5843b6b3dac60f", - b, - ); + "modexp_nagydani_5_qube", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", + "5f9c70ec884926a89461056ad20ac4c30155e817f807e4d3f5bb743d789c83386762435c3627773fa77da5144451f2a8aad8adba88e0b669f5377c5e9bad70e45c86fe952b613f015a9953b8a5de5eaee4566acf98d41e327d93a35bd5cef4607d025e58951167957df4ff9b1627649d3943805472e5e293d3efb687cfd1e503faafeb2840a3e3b3f85d016051a58e1c9498aab72e63b748d834b31eb05d85dcde65e27834e266b85c75cc4ec0135135e0601cb93eeeb6e0010c8ceb65c4c319623c5e573a2c8c9fbbf7df68a930beb412d3f4dfd146175484f45d7afaa0d2e60684af9b34730f7c8438465ad3e1d0c3237336722f2aa51095bd5759f4b8ab4dda111b684aa3dac62a761722e7ae43495b7709933512c81c4e3c9133a51f7ce9f2b51fcec064f65779666960b4e45df3900f54311f5613e8012dd1b8efd359eda31a778264c72aa8bb419d862734d769076bce2810011989a45374e5c5d8729fec21427f0bf397eacbb4220f603cf463a4b0c94efd858ffd9768cd60d6ce68d755e0fbad007ce5c2223d70c7018345a102e4ab3c60a13a9e7794303156d4c2063e919f2153c13961fb324c80b240742f47773a7a8e25b3e3fb19b00ce839346c6eb3c732fbc6b888df0b1fe0a3d07b053a2e9402c267b2d62f794d8a2840526e3ade15ce2264496ccd7519571dfde47f7a4bb16292241c20b2be59f3f8fb4f6383f232d838c5a22d8c95b6834d9d2ca493f5a505ebe8899503b0e8f9b19e6e2dd81c1628b80016d02097e0134de51054c4e7674824d4d758760fc52377d2cad145e259aa2ffaf54139e1a66b1e0c1c191e32ac59474c6b526f5b3ba07d3e5ec286eddf531fcd5292869be58c9f22ef91026159f7cf9d05ef66b4299f4da48cc1635bf2243051d342d378a22c83390553e873713c0454ce5f3234397111ac3fe3207b86f0ed9fc025c81903e1748103692074f83824fda6341be4f95ff00b0a9a208c267e12fa01825054cc0513629bf3dbb56dc5b90d4316f87654a8be18227978ea0a8a522760cad620d0d14fd38920fb7321314062914275a5f99f677145a6979b156bd82ecd36f23f8e1273cc2759ecc0b2c69d94dad5211d1bed939dd87ed9e07b91d49713a6e16ade0a98aea789f04994e318e4ff2c8a188cd8d43aeb52c6daa3bc29b4af50ea82a247c5cd67b573b34cbadcc0a376d3bbd530d50367b42705d870f2e27a8197ef46070528bfe408360faa2ebb8bf76e9f388572842bcb119f4d84ee34ae31f5cc594f23705a49197b181fb78ed1ec99499c690f843a4d0cf2e226d118e9372271054fbabdcc5c92ae9fefaef0589cd0e722eaf30c1703ec4289c7fd81beaa8a455ccee5298e31e2080c10c366a6fcf56f7d13582ad0bcad037c612b710fc595b70fbefaaca23623b60c6c39b11beb8e5843b6b3dac60f", + b, + ); } fn modexp_nagydani_5_pow0x10001(b: &mut Criterion) { bench( - "modexp_nagydani_5_pow0x10001", - "0000000000000000000000000000000000000005", // modexp - "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", - "5a0eb2bdf0ac1cae8e586689fa16cd4b07dfdedaec8a110ea1fdb059dd5253231b6132987598dfc6e11f86780428982d50cf68f67ae452622c3b336b537ef3298ca645e8f89ee39a26758206a5a3f6409afc709582f95274b57b71fae5c6b74619ae6f089a5393c5b79235d9caf699d23d88fb873f78379690ad8405e34c19f5257d596580c7a6a7206a3712825afe630c76b31cdb4a23e7f0632e10f14f4e282c81a66451a26f8df2a352b5b9f607a7198449d1b926e27036810368e691a74b91c61afa73d9d3b99453e7c8b50fd4f09c039a2f2feb5c419206694c31b92df1d9586140cb3417b38d0c503c7b508cc2ed12e813a1c795e9829eb39ee78eeaf360a169b491a1d4e419574e712402de9d48d54c1ae5e03739b7156615e8267e1fb0a897f067afd11fb33f6e24182d7aaaaa18fe5bc1982f20d6b871e5a398f0f6f718181d31ec225cfa9a0a70124ed9a70031bdf0c1c7829f708b6e17d50419ef361cf77d99c85f44607186c8d683106b8bd38a49b5d0fb503b397a83388c5678dcfcc737499d84512690701ed621a6f0172aecf037184ddf0f2453e4053024018e5ab2e30d6d5363b56e8b41509317c99042f517247474ab3abc848e00a07f69c254f46f2a05cf6ed84e5cc906a518fdcfdf2c61ce731f24c5264f1a25fc04934dc28aec112134dd523f70115074ca34e3807aa4cb925147f3a0ce152d323bd8c675ace446d0fd1ae30c4b57f0eb2c23884bc18f0964c0114796c5b6d080c3d89175665fbf63a6381a6a9da39ad070b645c8bb1779506da14439a9f5b5d481954764ea114fac688930bc68534d403cff4210673b6a6ff7ae416b7cd41404c3d3f282fcd193b86d0f54d0006c2a503b40d5c3930da980565b8f9630e9493a79d1c03e74e5f93ac8e4dc1a901ec5e3b3e57049124c7b72ea345aa359e782285d9e6a5c144a378111dd02c40855ff9c2be9b48425cb0b2fd62dc8678fd151121cf26a65e917d65d8e0dacfae108eb5508b601fb8ffa370be1f9a8b749a2d12eeab81f41079de87e2d777994fa4d28188c579ad327f9957fb7bdecec5c680844dd43cb57cf87aeb763c003e65011f73f8c63442df39a92b946a6bd968a1c1e4d5fa7d88476a68bd8e20e5b70a99259c7d3f85fb1b65cd2e93972e6264e74ebf289b8b6979b9b68a85cd5b360c1987f87235c3c845d62489e33acf85d53fa3561fe3a3aee18924588d9c6eba4edb7a4d106b31173e42929f6f0c48c80ce6a72d54eca7c0fe870068b7a7c89c63cdda593f5b32d3cb4ea8a32c39f00ab449155757172d66763ed9527019d6de6c9f2416aa6203f4d11c9ebee1e1d3845099e55504446448027212616167eb36035726daa7698b075286f5379cd3e93cb3e0cf4f9cb8d017facbb5550ed32d5ec5400ae57e47e2bf78d1eaeff9480cc765ceff39db500", - b, - ); + "modexp_nagydani_5_pow0x10001", + "0000000000000000000000000000000000000005", // modexp + "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", + "5a0eb2bdf0ac1cae8e586689fa16cd4b07dfdedaec8a110ea1fdb059dd5253231b6132987598dfc6e11f86780428982d50cf68f67ae452622c3b336b537ef3298ca645e8f89ee39a26758206a5a3f6409afc709582f95274b57b71fae5c6b74619ae6f089a5393c5b79235d9caf699d23d88fb873f78379690ad8405e34c19f5257d596580c7a6a7206a3712825afe630c76b31cdb4a23e7f0632e10f14f4e282c81a66451a26f8df2a352b5b9f607a7198449d1b926e27036810368e691a74b91c61afa73d9d3b99453e7c8b50fd4f09c039a2f2feb5c419206694c31b92df1d9586140cb3417b38d0c503c7b508cc2ed12e813a1c795e9829eb39ee78eeaf360a169b491a1d4e419574e712402de9d48d54c1ae5e03739b7156615e8267e1fb0a897f067afd11fb33f6e24182d7aaaaa18fe5bc1982f20d6b871e5a398f0f6f718181d31ec225cfa9a0a70124ed9a70031bdf0c1c7829f708b6e17d50419ef361cf77d99c85f44607186c8d683106b8bd38a49b5d0fb503b397a83388c5678dcfcc737499d84512690701ed621a6f0172aecf037184ddf0f2453e4053024018e5ab2e30d6d5363b56e8b41509317c99042f517247474ab3abc848e00a07f69c254f46f2a05cf6ed84e5cc906a518fdcfdf2c61ce731f24c5264f1a25fc04934dc28aec112134dd523f70115074ca34e3807aa4cb925147f3a0ce152d323bd8c675ace446d0fd1ae30c4b57f0eb2c23884bc18f0964c0114796c5b6d080c3d89175665fbf63a6381a6a9da39ad070b645c8bb1779506da14439a9f5b5d481954764ea114fac688930bc68534d403cff4210673b6a6ff7ae416b7cd41404c3d3f282fcd193b86d0f54d0006c2a503b40d5c3930da980565b8f9630e9493a79d1c03e74e5f93ac8e4dc1a901ec5e3b3e57049124c7b72ea345aa359e782285d9e6a5c144a378111dd02c40855ff9c2be9b48425cb0b2fd62dc8678fd151121cf26a65e917d65d8e0dacfae108eb5508b601fb8ffa370be1f9a8b749a2d12eeab81f41079de87e2d777994fa4d28188c579ad327f9957fb7bdecec5c680844dd43cb57cf87aeb763c003e65011f73f8c63442df39a92b946a6bd968a1c1e4d5fa7d88476a68bd8e20e5b70a99259c7d3f85fb1b65cd2e93972e6264e74ebf289b8b6979b9b68a85cd5b360c1987f87235c3c845d62489e33acf85d53fa3561fe3a3aee18924588d9c6eba4edb7a4d106b31173e42929f6f0c48c80ce6a72d54eca7c0fe870068b7a7c89c63cdda593f5b32d3cb4ea8a32c39f00ab449155757172d66763ed9527019d6de6c9f2416aa6203f4d11c9ebee1e1d3845099e55504446448027212616167eb36035726daa7698b075286f5379cd3e93cb3e0cf4f9cb8d017facbb5550ed32d5ec5400ae57e47e2bf78d1eaeff9480cc765ceff39db500", + b, + ); } fn alt_bn128_add_chfast1(b: &mut Criterion) { bench( - "alt_bn128_add_chfast1", - "0000000000000000000000000000000000000006", // alt_bn128_add - "18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f3726607c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7", - "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c915", - b, - ); + "alt_bn128_add_chfast1", + "0000000000000000000000000000000000000006", // alt_bn128_add + "18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f3726607c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7", + "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c915", + b, + ); } fn alt_bn128_add_chfast2(b: &mut Criterion) { bench( - "alt_bn128_add_chfast2", - "0000000000000000000000000000000000000006", // alt_bn128_add - "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c91518b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f37266", - "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb204", - b, - ); + "alt_bn128_add_chfast2", + "0000000000000000000000000000000000000006", // alt_bn128_add + "2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c91518b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f37266", + "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb204", + b, + ); } fn alt_bn128_add_cdetrio1(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio1", - "0000000000000000000000000000000000000006", // alt_bn128_add - "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_add_cdetrio1", + "0000000000000000000000000000000000000006", // alt_bn128_add + "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_add_cdetrio2(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio2", - "0000000000000000000000000000000000000006", // alt_bn128_add - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_add_cdetrio2", + "0000000000000000000000000000000000000006", // alt_bn128_add + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_add_cdetrio3(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio3", - "0000000000000000000000000000000000000006", // alt_bn128_add - "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_add_cdetrio3", + "0000000000000000000000000000000000000006", // alt_bn128_add + "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_add_cdetrio4(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio4", - "0000000000000000000000000000000000000006", // alt_bn128_add - "", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_add_cdetrio4", + "0000000000000000000000000000000000000006", // alt_bn128_add + "", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_add_cdetrio5(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio5", - "0000000000000000000000000000000000000006", // alt_bn128_add - "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_add_cdetrio5", + "0000000000000000000000000000000000000006", // alt_bn128_add + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_add_cdetrio6(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio6", - "0000000000000000000000000000000000000006", // alt_bn128_add - "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - b, - ); + "alt_bn128_add_cdetrio6", + "0000000000000000000000000000000000000006", // alt_bn128_add + "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + b, + ); } fn alt_bn128_add_cdetrio7(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio7", - "0000000000000000000000000000000000000006", // alt_bn128_add - "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - b, - ); + "alt_bn128_add_cdetrio7", + "0000000000000000000000000000000000000006", // alt_bn128_add + "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + b, + ); } fn alt_bn128_add_cdetrio8(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio8", - "0000000000000000000000000000000000000006", // alt_bn128_add - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - b, - ); + "alt_bn128_add_cdetrio8", + "0000000000000000000000000000000000000006", // alt_bn128_add + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + b, + ); } fn alt_bn128_add_cdetrio9(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio9", - "0000000000000000000000000000000000000006", // alt_bn128_add - "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - b, - ); + "alt_bn128_add_cdetrio9", + "0000000000000000000000000000000000000006", // alt_bn128_add + "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + b, + ); } fn alt_bn128_add_cdetrio10(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio10", - "0000000000000000000000000000000000000006", // alt_bn128_add - "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - b, - ); + "alt_bn128_add_cdetrio10", + "0000000000000000000000000000000000000006", // alt_bn128_add + "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + b, + ); } fn alt_bn128_add_cdetrio11(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio11", - "0000000000000000000000000000000000000006", // alt_bn128_add - "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", - "030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd315ed738c0e0a7c92e7845f96b2ae9c0a68a6a449e3538fc7ff3ebf7a5a18a2c4", - b, - ); + "alt_bn128_add_cdetrio11", + "0000000000000000000000000000000000000006", // alt_bn128_add + "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", + "030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd315ed738c0e0a7c92e7845f96b2ae9c0a68a6a449e3538fc7ff3ebf7a5a18a2c4", + b, + ); } fn alt_bn128_add_cdetrio12(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio12", - "0000000000000000000000000000000000000006", // alt_bn128_add - "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd315ed738c0e0a7c92e7845f96b2ae9c0a68a6a449e3538fc7ff3ebf7a5a18a2c4", - b, - ); + "alt_bn128_add_cdetrio12", + "0000000000000000000000000000000000000006", // alt_bn128_add + "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd315ed738c0e0a7c92e7845f96b2ae9c0a68a6a449e3538fc7ff3ebf7a5a18a2c4", + b, + ); } fn alt_bn128_add_cdetrio13(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio13", - "0000000000000000000000000000000000000006", // alt_bn128_add - "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98", - "15bf2bb17880144b5d1cd2b1f46eff9d617bffd1ca57c37fb5a49bd84e53cf66049c797f9ce0d17083deb32b5e36f2ea2a212ee036598dd7624c168993d1355f", - b, - ); + "alt_bn128_add_cdetrio13", + "0000000000000000000000000000000000000006", // alt_bn128_add + "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98", + "15bf2bb17880144b5d1cd2b1f46eff9d617bffd1ca57c37fb5a49bd84e53cf66049c797f9ce0d17083deb32b5e36f2ea2a212ee036598dd7624c168993d1355f", + b, + ); } fn alt_bn128_add_cdetrio14(b: &mut Criterion) { bench( - "alt_bn128_add_cdetrio14", - "0000000000000000000000000000000000000006", // alt_bn128_add - "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa92e83f8d734803fc370eba25ed1f6b8768bd6d83887b87165fc2434fe11a830cb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_add_cdetrio14", + "0000000000000000000000000000000000000006", // alt_bn128_add + "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7c17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa92e83f8d734803fc370eba25ed1f6b8768bd6d83887b87165fc2434fe11a830cb00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_mul_chfast1(b: &mut Criterion) { bench( - "alt_bn128_mul_chfast1", - "0000000000000000000000000000000000000007", // alt_bn128_mul - "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb20400000000000000000000000000000000000000000000000011138ce750fa15c2", - "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc", - b, - ); + "alt_bn128_mul_chfast1", + "0000000000000000000000000000000000000007", // alt_bn128_mul + "2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb20400000000000000000000000000000000000000000000000011138ce750fa15c2", + "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc", + b, + ); } fn alt_bn128_mul_chfast2(b: &mut Criterion) { bench( - "alt_bn128_mul_chfast2", - "0000000000000000000000000000000000000007", // alt_bn128_mul - "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46", - "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e", - b, - ); + "alt_bn128_mul_chfast2", + "0000000000000000000000000000000000000007", // alt_bn128_mul + "070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd46", + "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e", + b, + ); } fn alt_bn128_mul_chfast3(b: &mut Criterion) { bench( - "alt_bn128_mul_chfast3", - "0000000000000000000000000000000000000007", // alt_bn128_mul - "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e183227397098d014dc2822db40c0ac2ecbc0b548b438e5469e10460b6c3e7ea3", - "14789d0d4a730b354403b5fac948113739e276c23e0258d8596ee72f9cd9d3230af18a63153e0ec25ff9f2951dd3fa90ed0197bfef6e2a1a62b5095b9d2b4a27", - b, - ); + "alt_bn128_mul_chfast3", + "0000000000000000000000000000000000000007", // alt_bn128_mul + "025a6f4181d2b4ea8b724290ffb40156eb0adb514c688556eb79cdea0752c2bb2eff3f31dea215f1eb86023a133a996eb6300b44da664d64251d05381bb8a02e183227397098d014dc2822db40c0ac2ecbc0b548b438e5469e10460b6c3e7ea3", + "14789d0d4a730b354403b5fac948113739e276c23e0258d8596ee72f9cd9d3230af18a63153e0ec25ff9f2951dd3fa90ed0197bfef6e2a1a62b5095b9d2b4a27", + b, + ); } fn alt_bn128_mul_cdetrio1(b: &mut Criterion) { bench( - "alt_bn128_mul_cdetrio1", - "0000000000000000000000000000000000000007", // alt_bn128_mul - "1a87b0584ce92f4593d161480614f2989035225609f08058ccfa3d0f940febe31a2f3c951f6dadcc7ee9007dff81504b0fcd6d7cf59996efdc33d92bf7f9f8f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "2cde5879ba6f13c0b5aa4ef627f159a3347df9722efce88a9afbb20b763b4c411aa7e43076f6aee272755a7f9b84832e71559ba0d2e0b17d5f9f01755e5b0d11", - b, - ); + "alt_bn128_mul_cdetrio1", + "0000000000000000000000000000000000000007", // alt_bn128_mul + "1a87b0584ce92f4593d161480614f2989035225609f08058ccfa3d0f940febe31a2f3c951f6dadcc7ee9007dff81504b0fcd6d7cf59996efdc33d92bf7f9f8f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "2cde5879ba6f13c0b5aa4ef627f159a3347df9722efce88a9afbb20b763b4c411aa7e43076f6aee272755a7f9b84832e71559ba0d2e0b17d5f9f01755e5b0d11", + b, + ); } fn alt_bn128_mul_cdetrio6(b: &mut Criterion) { bench( - "alt_bn128_mul_cdetrio6", - "0000000000000000000000000000000000000007", // alt_bn128_mul - "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "29e587aadd7c06722aabba753017c093f70ba7eb1f1c0104ec0564e7e3e21f6022b1143f6a41008e7755c71c3d00b6b915d386de21783ef590486d8afa8453b1", - b, - ); + "alt_bn128_mul_cdetrio6", + "0000000000000000000000000000000000000007", // alt_bn128_mul + "17c139df0efee0f766bc0204762b774362e4ded88953a39ce849a8a7fa163fa901e0559bacb160664764a357af8a9fe70baa9258e0b959273ffc5718c6d4cc7cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "29e587aadd7c06722aabba753017c093f70ba7eb1f1c0104ec0564e7e3e21f6022b1143f6a41008e7755c71c3d00b6b915d386de21783ef590486d8afa8453b1", + b, + ); } fn alt_bn128_mul_cdetrio11(b: &mut Criterion) { bench( - "alt_bn128_mul_cdetrio11", - "0000000000000000000000000000000000000007", // alt_bn128_mul - "039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "00a1a234d08efaa2616607e31eca1980128b00b415c845ff25bba3afcb81dc00242077290ed33906aeb8e42fd98c41bcb9057ba03421af3f2d08cfc441186024", - b, - ); + "alt_bn128_mul_cdetrio11", + "0000000000000000000000000000000000000007", // alt_bn128_mul + "039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d98ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "00a1a234d08efaa2616607e31eca1980128b00b415c845ff25bba3afcb81dc00242077290ed33906aeb8e42fd98c41bcb9057ba03421af3f2d08cfc441186024", + b, + ); } fn alt_bn128_pairing_jeff1(b: &mut Criterion) { bench( - "alt_bn128_pairing_jeff1", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_jeff1", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_jeff2(b: &mut Criterion) { bench( - "alt_bn128_pairing_jeff2", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "2eca0c7238bf16e83e7a1e6c5d49540685ff51380f309842a98561558019fc0203d3260361bb8451de5ff5ecd17f010ff22f5c31cdf184e9020b06fa5997db841213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f06967a1237ebfeca9aaae0d6d0bab8e28c198c5a339ef8a2407e31cdac516db922160fa257a5fd5b280642ff47b65eca77e626cb685c84fa6d3b6882a283ddd1198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_jeff2", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "2eca0c7238bf16e83e7a1e6c5d49540685ff51380f309842a98561558019fc0203d3260361bb8451de5ff5ecd17f010ff22f5c31cdf184e9020b06fa5997db841213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f06967a1237ebfeca9aaae0d6d0bab8e28c198c5a339ef8a2407e31cdac516db922160fa257a5fd5b280642ff47b65eca77e626cb685c84fa6d3b6882a283ddd1198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_jeff3(b: &mut Criterion) { bench( - "alt_bn128_pairing_jeff3", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "0f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd216da2f5cb6be7a0aa72c440c53c9bbdfec6c36c7d515536431b3a865468acbba2e89718ad33c8bed92e210e81d1853435399a271913a6520736a4729cf0d51eb01a9e2ffa2e92599b68e44de5bcf354fa2642bd4f26b259daa6f7ce3ed57aeb314a9a87b789a58af499b314e13c3d65bede56c07ea2d418d6874857b70763713178fb49a2d6cd347dc58973ff49613a20757d0fcc22079f9abd10c3baee245901b9e027bd5cfc2cb5db82d4dc9677ac795ec500ecd47deee3b5da006d6d049b811d7511c78158de484232fc68daf8a45cf217d1c2fae693ff5871e8752d73b21198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_jeff3", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "0f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd216da2f5cb6be7a0aa72c440c53c9bbdfec6c36c7d515536431b3a865468acbba2e89718ad33c8bed92e210e81d1853435399a271913a6520736a4729cf0d51eb01a9e2ffa2e92599b68e44de5bcf354fa2642bd4f26b259daa6f7ce3ed57aeb314a9a87b789a58af499b314e13c3d65bede56c07ea2d418d6874857b70763713178fb49a2d6cd347dc58973ff49613a20757d0fcc22079f9abd10c3baee245901b9e027bd5cfc2cb5db82d4dc9677ac795ec500ecd47deee3b5da006d6d049b811d7511c78158de484232fc68daf8a45cf217d1c2fae693ff5871e8752d73b21198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_jeff4(b: &mut Criterion) { bench( - "alt_bn128_pairing_jeff4", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "2f2ea0b3da1e8ef11914acf8b2e1b32d99df51f5f4f206fc6b947eae860eddb6068134ddb33dc888ef446b648d72338684d678d2eb2371c61a50734d78da4b7225f83c8b6ab9de74e7da488ef02645c5a16a6652c3c71a15dc37fe3a5dcb7cb122acdedd6308e3bb230d226d16a105295f523a8a02bfc5e8bd2da135ac4c245d065bbad92e7c4e31bf3757f1fe7362a63fbfee50e7dc68da116e67d600d9bf6806d302580dc0661002994e7cd3a7f224e7ddc27802777486bf80f40e4ca3cfdb186bac5188a98c45e6016873d107f5cd131f3a3e339d0375e58bd6219347b008122ae2b09e539e152ec5364e7e2204b03d11d3caa038bfc7cd499f8176aacbee1f39e4e4afc4bc74790a4a028aff2c3d2538731fb755edefd8cb48d6ea589b5e283f150794b6736f670d6a1033f9b46c6f5204f50813eb85c8dc4b59db1c5d39140d97ee4d2b36d99bc49974d18ecca3e7ad51011956051b464d9e27d46cc25e0764bb98575bd466d32db7b15f582b2d5c452b36aa394b789366e5e3ca5aabd415794ab061441e51d01e94640b7e3084a07e02c78cf3103c542bc5b298669f211b88da1679b0b64a63b7e0e7bfe52aae524f73a55be7fe70c7e9bfc94b4cf0da1213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_jeff4", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "2f2ea0b3da1e8ef11914acf8b2e1b32d99df51f5f4f206fc6b947eae860eddb6068134ddb33dc888ef446b648d72338684d678d2eb2371c61a50734d78da4b7225f83c8b6ab9de74e7da488ef02645c5a16a6652c3c71a15dc37fe3a5dcb7cb122acdedd6308e3bb230d226d16a105295f523a8a02bfc5e8bd2da135ac4c245d065bbad92e7c4e31bf3757f1fe7362a63fbfee50e7dc68da116e67d600d9bf6806d302580dc0661002994e7cd3a7f224e7ddc27802777486bf80f40e4ca3cfdb186bac5188a98c45e6016873d107f5cd131f3a3e339d0375e58bd6219347b008122ae2b09e539e152ec5364e7e2204b03d11d3caa038bfc7cd499f8176aacbee1f39e4e4afc4bc74790a4a028aff2c3d2538731fb755edefd8cb48d6ea589b5e283f150794b6736f670d6a1033f9b46c6f5204f50813eb85c8dc4b59db1c5d39140d97ee4d2b36d99bc49974d18ecca3e7ad51011956051b464d9e27d46cc25e0764bb98575bd466d32db7b15f582b2d5c452b36aa394b789366e5e3ca5aabd415794ab061441e51d01e94640b7e3084a07e02c78cf3103c542bc5b298669f211b88da1679b0b64a63b7e0e7bfe52aae524f73a55be7fe70c7e9bfc94b4cf0da1213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_jeff5(b: &mut Criterion) { bench( - "alt_bn128_pairing_jeff5", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "20a754d2071d4d53903e3b31a7e98ad6882d58aec240ef981fdf0a9d22c5926a29c853fcea789887315916bbeb89ca37edb355b4f980c9a12a94f30deeed30211213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f1abb4a25eb9379ae96c84fff9f0540abcfc0a0d11aeda02d4f37e4baf74cb0c11073b3ff2cdbb38755f8691ea59e9606696b3ff278acfc098fa8226470d03869217cee0a9ad79a4493b5253e2e4e3a39fc2df38419f230d341f60cb064a0ac290a3d76f140db8418ba512272381446eb73958670f00cf46f1d9e64cba057b53c26f64a8ec70387a13e41430ed3ee4a7db2059cc5fc13c067194bcc0cb49a98552fd72bd9edb657346127da132e5b82ab908f5816c826acb499e22f2412d1a2d70f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd2198a1f162a73261f112401aa2db79c7dab1533c9935c77290a6ce3b191f2318d198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_jeff5", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "20a754d2071d4d53903e3b31a7e98ad6882d58aec240ef981fdf0a9d22c5926a29c853fcea789887315916bbeb89ca37edb355b4f980c9a12a94f30deeed30211213d2149b006137fcfb23036606f848d638d576a120ca981b5b1a5f9300b3ee2276cf730cf493cd95d64677bbb75fc42db72513a4c1e387b476d056f80aa75f21ee6226d31426322afcda621464d0611d226783262e21bb3bc86b537e986237096df1f82dff337dd5972e32a8ad43e28a78a96a823ef1cd4debe12b6552ea5f1abb4a25eb9379ae96c84fff9f0540abcfc0a0d11aeda02d4f37e4baf74cb0c11073b3ff2cdbb38755f8691ea59e9606696b3ff278acfc098fa8226470d03869217cee0a9ad79a4493b5253e2e4e3a39fc2df38419f230d341f60cb064a0ac290a3d76f140db8418ba512272381446eb73958670f00cf46f1d9e64cba057b53c26f64a8ec70387a13e41430ed3ee4a7db2059cc5fc13c067194bcc0cb49a98552fd72bd9edb657346127da132e5b82ab908f5816c826acb499e22f2412d1a2d70f25929bcb43d5a57391564615c9e70a992b10eafa4db109709649cf48c50dd2198a1f162a73261f112401aa2db79c7dab1533c9935c77290a6ce3b191f2318d198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_jeff6(b: &mut Criterion) { bench( - "alt_bn128_pairing_jeff6", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c103188585e2364128fe25c70558f1560f4f9350baf3959e603cc91486e110936198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_pairing_jeff6", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c103188585e2364128fe25c70558f1560f4f9350baf3959e603cc91486e110936198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_pairing_empty_data(b: &mut Criterion) { @@ -644,70 +643,70 @@ fn alt_bn128_pairing_empty_data(b: &mut Criterion) { fn alt_bn128_pairing_one_point(b: &mut Criterion) { bench( - "alt_bn128_pairing_one_point", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000000", - b, - ); + "alt_bn128_pairing_one_point", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000000", + b, + ); } fn alt_bn128_pairing_two_point_match_2(b: &mut Criterion) { bench( - "alt_bn128_pairing_two_point_match_2", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_two_point_match_2", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_two_point_match_3(b: &mut Criterion) { bench( - "alt_bn128_pairing_two_point_match_3", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_two_point_match_3", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_two_point_match_4(b: &mut Criterion) { bench( - "alt_bn128_pairing_two_point_match_4", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_two_point_match_4", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_ten_point_match_1(b: &mut Criterion) { bench( - "alt_bn128_pairing_ten_point_match_1", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_ten_point_match_1", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed275dc4a288d1afb3cbb1ac09187524c7db36395df7be3b99e673b13a075a65ec1d9befcd05a5323e6da4d435f3b617cdb3af83285c2df711ef39c01571827f9d", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_ten_point_match_2(b: &mut Criterion) { bench( - "alt_bn128_pairing_ten_point_match_2", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_ten_point_match_2", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002203e205db4f19b37b60121b83a7333706db86431c6d835849957ed8c3928ad7927dc7234fd11d3e8c36c59277c3e6f149d5cd3cfa9a62aee49f8130962b4b3b9195e8aa5b7827463722b8c153931579d3505566b4edf48d498e185f0509de15204bb53b8977e5f92a0bc372742c4830944a59b4fe6b1c0466e2a6dad122b5d2e030644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd31a76dae6d3272396d0cbe61fced2bc532edac647851e3ac53ce1cc9c7e645a83198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } fn alt_bn128_pairing_ten_point_match_3(b: &mut Criterion) { bench( - "alt_bn128_pairing_ten_point_match_3", - "0000000000000000000000000000000000000008", // alt_bn128_pairing - "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", - "0000000000000000000000000000000000000000000000000000000000000001", - b, - ); + "alt_bn128_pairing_ten_point_match_3", + "0000000000000000000000000000000000000008", // alt_bn128_pairing + "105456a333e6d636854f987ea7bb713dfd0ae8371a72aea313ae0c32c0bf10160cf031d41b41557f3e7e3ba0c51bebe5da8e6ecd855ec50fc87efcdeac168bcc0476be093a6d2b4bbf907172049874af11e1b6267606e00804d3ff0037ec57fd3010c68cb50161b7d1d96bb71edfec9880171954e56871abf3d93cc94d745fa114c059d74e5b6c4ec14ae5864ebe23a71781d86c29fb8fb6cce94f70d3de7a2101b33461f39d9e887dbb100f170a2345dde3c07e256d1dfa2b657ba5cd030427000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000021a2c3013d2ea92e13c800cde68ef56a294b883f6ac35d25f587c09b1b3c635f7290158a80cd3d66530f74dc94c94adb88f5cdb481acca997b6e60071f08a115f2f997f3dbd66a7afe07fe7862ce239edba9e05c5afff7f8a1259c9733b2dfbb929d1691530ca701b4a106054688728c9972c8512e9789e9567aae23e302ccd75", + "0000000000000000000000000000000000000000000000000000000000000001", + b, + ); } diff --git a/crates/ethcore/blockchain/Cargo.toml b/crates/ethcore/blockchain/Cargo.toml index 9f79c3ea1c..f9eec5ec58 100644 --- a/crates/ethcore/blockchain/Cargo.toml +++ b/crates/ethcore/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] -description = "OpenEthereum Blockchain Database, Test Generator, Configuration, Caching, Importing Blocks, and Block Information" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node Blockchain Database, Test Generator, Configuration, Caching, Importing Blocks, and Block Information" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore-blockchain" version = "0.1.0" @@ -20,12 +20,12 @@ log = "0.4" parity-bytes = "0.1" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" rand = "0.7.3" rayon = "1.1" rlp = { version = "0.4.6" } rlp_compress = { path = "../../util/rlp-compress" } -rlp_derive = { path = "../../util/rlp-derive" } +rlp-derive = { version = "0.2" } triehash-ethereum = { version = "0.2", path = "../../util/triehash-ethereum" } stats = { path = "../../util/stats" } diff --git a/crates/ethcore/blockchain/src/blockchain.rs b/crates/ethcore/blockchain/src/blockchain.rs index 548cb64787..3176f49500 100644 --- a/crates/ethcore/blockchain/src/blockchain.rs +++ b/crates/ethcore/blockchain/src/blockchain.rs @@ -1573,7 +1573,7 @@ impl BlockChain { } /// Iterator that lists `first` and then all of `first`'s ancestors, by extended header. - pub fn ancestry_with_metadata_iter<'a>(&'a self, first: H256) -> AncestryWithMetadataIter { + pub fn ancestry_with_metadata_iter<'a>(&'a self, first: H256) -> AncestryWithMetadataIter<'a> { AncestryWithMetadataIter { current: if self.is_known(&first) { first diff --git a/crates/ethcore/res/contracts/hbbft_connectivity_tracker.json b/crates/ethcore/res/contracts/hbbft_connectivity_tracker.json new file mode 100644 index 0000000000..c67daf202e --- /dev/null +++ b/crates/ethcore/res/contracts/hbbft_connectivity_tracker.json @@ -0,0 +1,213 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "caller", + "type": "address" + }, + { + "internalType": "address", + "name": "validator", + "type": "address" + }, + { + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + } + ], + "name": "checkReportReconnectCallable", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "currentEpoch", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "earlyEpochEndThreshold", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "earlyEpochEndToleranceLevel", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "validator", + "type": "address" + } + ], + "name": "getCurrentConnectionStatus", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "getFlaggedValidators", + "outputs": [ + { + "internalType": "address[]", + "name": "", + "type": "address[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "minReportAgeBlocks", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "validator", + "type": "address" + }, + { + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + } + ], + "name": "reportMissingConnectivity", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "validator", + "type": "address" + }, + { + "internalType": "uint256", + "name": "blockNumber", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "blockHash", + "type": "bytes32" + } + ], + "name": "reportReconnect", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "validatorConnectivityScore", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "epoch", + "type": "uint256" + }, + { + "internalType": "address", + "name": "validator", + "type": "address" + }, + { + "internalType": "address", + "name": "reporter", + "type": "address" + } + ], + "name": "isReported", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } + ] \ No newline at end of file diff --git a/crates/ethcore/res/local_tests/hbbft/hbbft_test_fork.json b/crates/ethcore/res/local_tests/hbbft/hbbft_test_fork.json new file mode 100644 index 0000000000..ec136e3c01 --- /dev/null +++ b/crates/ethcore/res/local_tests/hbbft/hbbft_test_fork.json @@ -0,0 +1,22 @@ +{ + "blockNumberStart": 10, + "blockNumberEnd": 100, + "validators": [ + "091d8eba3fedbd073d07d5da39fc42f61a2f4e7100018329f5b845aa86f64d6636de27aaa4c9872942811750a3db8cb8be03584fb50aaa557f8f13d4187db686", + "bf4ee1d6d3e72fe8ab1c8ccdeef3aff73665ca0dd2ef5497cd8152e329a9ebd283eba076c369977390f4697026209a9fdfa78c2d98e76c4bafb3f8fb669ee4b0" + ], + "parts": [ + "00000000000000000100000000000000b8192b72d83b24d9d13229521f74e6664135cd9dd3d5c4fee6b46244c7fb637e099a7cf83ff84f7da56f119391a5be45020000000000000099000000000000000410da71b86de8a64eda03139cb4aa2c86d7dc30b3a8125658d25454e31b6aaadc21d8a16290cd9a9367f7c81933e517f314481fe43f0385829e843a4ff5f614c0329f8725ba757f0f05543255b5ccf70e204af8ad430a8dc7be7ba923be1e3efbcd0c2852ad29e2516972dedbe8d1cfba4f2d3af8a08caeee33cd60d294b95ecd70d60e5a2b0929106eea34d457b95487f8864ea926e9b04b990000000000000004dbb6269a2ffe83a197d9687060e1a509affa81b88b8b0b5ec37b6323abd6a4220d8d445273b5f0b937b01fb900369add18030953e62239353599d6005081526d12567acda069033fb2c82109e8b8294d9f68e49fa461a81886d7162d37d8090c5e4ce633acd0ced42200a09eb9dacfb8c9dc0dcb5d5fa9b5d645318ed856fec6bdd146367d1258d5975681084122833c910f9c2c5f6ca8bf", + "00000000000000000100000000000000b38755bce7abb9935778b1803b7a4351f68c367e20a38d7208a5c24e3af245b13ded32434b982f15dfd0de99fb5188eb0200000000000000990000000000000004634fd14e7ce463dde2973d53ea2f9638239c22a7f22ee776f900f72ea093dd64c1b9747df1348a1c9293aafc5a99abbf511be721fc898d549dc9ffe94b3f4b6348eedc44322c1c1a6e62ecccc0b266fc44901bd777bddd512c351e56e6b6e3516c12cf3713edff7df201914d071f9a80cbcaea0fc1d228dc2485653d44d8829c9036040a184ac838e80074b2b6ee56f34690b6f26b85aa7a99000000000000000451456684fa30515c8d0ef1244ac419791fb0ff5d52bf2229cbef4fcf4928961d386f9c20c75fb530b1fce6e7e9ad5f0b0cd09944e8598d20e55f0208703e37ba191261e0d302824202eda3e0150731715b88e165f17203f1406023f9cd3d898e10fef65d99fc3f5e3891ce1001fda7bcbea95ac74d030f0a369c0b8cce8bdaac1d7b682649f644677953d1dc3f076bd3befa2c4aaa9d5ea3" + ], + "acks": [ + [ + "0000000000000000020000000000000091000000000000000490a193eab4de97351348833b933705dfb903c7f5875559722a25de970a7bf58a434a9c4d251fa8412ada1e11d8b38e1530a765ef4d3f5d89264967ad75d9e0559318ea05314b8317d686ac51c4bd66ce09de4f4dc6cc64efde35efc1394d5371e968592551401240d901ee9bcde99de77bbdcfa6a65d64dc9f814ca0b4f17a0b5e35562e01c21c850903c55a2ab442699100000000000000042d9c2e94e9a5d2283fdf0dae7f0671fd9a81787ecb2908d1fccee00c57c43022e4f42f2bbf3059ce2c0bebc12d7b4fcae2c87ff1072e111bb7b67839266e23a778272003f3ec618082b750c3b7fdb5800ce76312154fd8ba33a6aaf3aa6c6aaa5880f9516c6f5094d09a69f3c7918f0e2ec92542d91a0e2ba9f65722e3a374ab01c73bff571f114b7bdd0e9298eba220", + "010000000000000002000000000000009100000000000000047eee4f119edac1a8f78f0b44426ffbb26b03ea36234678b287711d4e3a47a0215fc917d83c64ff1adfa3c4e985dcaddf550953768ab3725680f8cb4046e0b4338f351cbe49e69c4fcfd3df3285f7b1deed4e1fe41a0ca2d6962de6348db84a690e018a170b3df8a1283a16514ff58be083668fc4226a4921c35a15ac59f8c2e6e3e00adb9da20df161013beacef2330c9100000000000000049d03818e04ff4631844a94e73976653117fa97d88d029159af7cfadf71ed285202f17332cef132f3bb0ca6f5ddfdb3b9cafcc6a9e5b4b86e493d9946b3a6ab5b8e981fe70792b2ec3f63ed9b682eb5b8667398191a16fa308bf1b600541418d6673ba3902d4b58f60875e117984b970dc0d646f721285cacb759e07938ee5a8dbb2bfc70c1b5d6168446571531cccf64" + ], + [ + "00000000000000000200000000000000910000000000000004a87bbba9d5bed1aaa03a2f761213b7bcc8e9b7ae4fb9704562504a07d17f058dfc8725686682e31a12795822f748bd7925085106f2632aecd1e8f2aba231c02c619d8f9263a2683e0cc87be5bad052873cad313550e4c3509ee5cd410cf1aa3350e6ac24c3741781ffa3c5f4a64676cb533f73470e56c7679caef649785494c12c999f6bfb9053c31627ca112cffac1c9100000000000000046a88660f1c401aabfc2e565105eb6ccdaa7f403f897e63512beee5b2a8f5b5a3a63f002ee74ace2778da41935a5fc44a1f5dc5e5a58484f662edd3dbb9f5a6088a7b53b0626b5c42fd2a1ec9f78374288bab74bd0e8398d7203aa294d1c78733365b646b0d38b72447f36e3908e9f2ab31a682b4f62c37ca3240b7e607f264b63917eff09c6fbaaef0331b17260b2074", + "010000000000000002000000000000009100000000000000040a911f3a00d13a252002b042e65052016a1426e05464246145247c72b3825459d92903a1dbf8381a475f38d89b79d6c3b8ea4213a379e5c0dfd78540b633ad3aac019c199c6f22c7c484e2c405bff32d55bc650ae88088b8d930f9a0b066f4576a0c781c80feeb6fc8c220b11964807aa611dcf4dafb5386cbaab7a14f210d78438472b1f427e5e85b49bfbf97e03f7a91000000000000000406c4f04455ad55cecdecd1969ec7ad7684602056eb17b7cd656adc82e741c873eab437d54d8c9e191bc51246e25d0b5c77b2440e866d6c38b6d82a408bd02b64c1bf0584231a4201b9916ed2be1282ab1640866c347ec017a5c92a924e5012334b59d82359544177b762568ea96bb3f7ce2dd6ff60cdfa6fa52c7a78cc6a3f2ec230bddfa6974934a0f6899797493f85" + ] + ] +} \ No newline at end of file diff --git a/crates/ethcore/service/Cargo.toml b/crates/ethcore/service/Cargo.toml index bab7124834..6d496afadc 100644 --- a/crates/ethcore/service/Cargo.toml +++ b/crates/ethcore/service/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum (EthCore) Client & Network Service Creation & Reg name = "ethcore-service" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] ansi_term = "0.10" diff --git a/crates/ethcore/service/src/error.rs b/crates/ethcore/service/src/error.rs index 1c1840344c..be8f51db3c 100644 --- a/crates/ethcore/service/src/error.rs +++ b/crates/ethcore/service/src/error.rs @@ -18,8 +18,8 @@ // https://github.com/openethereum/openethereum/issues/10302 #![allow(deprecated)] +use crate::io; use ethcore; -use io; error_chain! { foreign_links { diff --git a/crates/ethcore/service/src/lib.rs b/crates/ethcore/service/src/lib.rs index 289ded738a..56941eb5c8 100644 --- a/crates/ethcore/service/src/lib.rs +++ b/crates/ethcore/service/src/lib.rs @@ -19,7 +19,6 @@ extern crate ethcore; extern crate ethcore_blockchain as blockchain; extern crate ethcore_io as io; extern crate ethcore_sync as sync; -extern crate ethereum_types; extern crate kvdb; #[macro_use] @@ -41,5 +40,5 @@ mod stop_guard; #[cfg(test)] extern crate kvdb_rocksdb; -pub use error::{Error, ErrorKind}; +pub use crate::error::{Error, ErrorKind}; pub use service::ClientService; diff --git a/crates/ethcore/service/src/service.rs b/crates/ethcore/service/src/service.rs index 7e24113fd7..9885ea874b 100644 --- a/crates/ethcore/service/src/service.rs +++ b/crates/ethcore/service/src/service.rs @@ -18,23 +18,26 @@ use std::{path::Path, sync::Arc, time::Duration}; +use crate::{ + io::{IoContext, IoError, IoHandler, IoService, TimerToken}, + stop_guard::StopGuard, +}; use ansi_term::Colour; -use io::{IoContext, IoError, IoHandler, IoService, TimerToken}; -use stop_guard::StopGuard; -use blockchain::{BlockChainDB, BlockChainDBHandler}; +use crate::blockchain::{BlockChainDB, BlockChainDBHandler}; use ethcore::{ client::{ChainNotify, Client, ClientConfig, ClientIoMessage}, error::{Error as EthcoreError, ErrorKind}, + exit::ShutdownManager, miner::Miner, snapshot::{ - service::{Service as SnapshotService, ServiceParams as SnapServiceParams}, Error as SnapshotError, RestorationStatus, SnapshotService as _SnapshotService, + service::{Service as SnapshotService, ServiceParams as SnapServiceParams}, }, spec::Spec, }; -use Error; +use crate::Error; /// Client service setup. Creates and registers client and network services with the IO subsystem. pub struct ClientService { @@ -55,8 +58,9 @@ impl ClientService { restoration_db_handler: Box, _ipc_path: &Path, miner: Arc, + shutdown: Arc, ) -> Result { - let io_service = IoService::::start("Client")?; + let io_service = IoService::::start("Client", 4)?; info!( "Configured for {} using {} engine", @@ -71,6 +75,7 @@ impl ClientService { blockchain_db.clone(), miner.clone(), io_service.channel(), + shutdown, )?; miner.set_io_channel(io_service.channel()); miner.set_in_chain_checker(&client.clone()); @@ -269,6 +274,7 @@ mod tests { restoration_db_handler, tempdir.path(), Arc::new(Miner::new_for_tests(&spec, None)), + Arc::new(ShutdownManager::null()), ); assert!(service.is_ok()); drop(service.unwrap()); diff --git a/crates/ethcore/service/src/stop_guard.rs b/crates/ethcore/service/src/stop_guard.rs index ce662ea5d5..18c91e0740 100644 --- a/crates/ethcore/service/src/stop_guard.rs +++ b/crates/ethcore/service/src/stop_guard.rs @@ -16,7 +16,7 @@ //! Stop guard mod -use std::sync::{atomic::*, Arc}; +use std::sync::{Arc, atomic::*}; /// Stop guard that will set a stop flag on drop pub struct StopGuard { diff --git a/crates/ethcore/src/account_db.rs b/crates/ethcore/src/account_db.rs index b3e50b1635..83b5f4861d 100644 --- a/crates/ethcore/src/account_db.rs +++ b/crates/ethcore/src/account_db.rs @@ -16,7 +16,7 @@ //! DB backend wrapper for Account trie use ethereum_types::H256; -use hash::{keccak, KECCAK_NULL_RLP}; +use hash::{KECCAK_NULL_RLP, keccak}; use hash_db::{AsHashDB, HashDB}; use keccak_hasher::KeccakHasher; use kvdb::DBValue; diff --git a/crates/ethcore/src/block.rs b/crates/ethcore/src/block.rs index 678b4eb24f..e20819b9aa 100644 --- a/crates/ethcore/src/block.rs +++ b/crates/ethcore/src/block.rs @@ -36,24 +36,26 @@ use std::{cmp, collections::HashSet, ops, sync::Arc}; use bytes::Bytes; use ethereum_types::{Address, Bloom, H256, U256}; -use engines::EthEngine; -use error::{BlockError, Error}; -use factory::Factories; -use state::State; -use state_db::StateDB; -use trace::Tracing; +use crate::{ + engines::EthEngine, + error::{BlockError, Error}, + factory::Factories, + state::State, + state_db::StateDB, + trace::Tracing, + verification::PreverifiedBlock, +}; use triehash::ordered_trie_root; use unexpected::{Mismatch, OutOfBounds}; -use verification::PreverifiedBlock; use vm::{EnvInfo, LastHashes}; -use hash::keccak; -use rlp::{encode_list, RlpStream}; -use types::{ +use crate::types::{ header::{ExtendedHeader, Header}, receipt::{TransactionOutcome, TypedReceipt}, transaction::{Error as TransactionError, SignedTransaction}, }; +use hash::keccak; +use rlp::{RlpStream, encode_list}; /// Block that is ready for transactions to be added. /// @@ -627,15 +629,17 @@ pub fn enact_verified( #[cfg(test)] mod tests { use super::*; - use engines::EthEngine; - use error::Error; + use crate::{ + engines::EthEngine, + error::Error, + factory::Factories, + state_db::StateDB, + test_helpers::get_temp_state_db, + types::{header::Header, transaction::SignedTransaction, view, views::BlockView}, + verification::queue::kind::blocks::Unverified, + }; use ethereum_types::Address; - use factory::Factories; - use state_db::StateDB; use std::sync::Arc; - use test_helpers::get_temp_state_db; - use types::{header::Header, transaction::SignedTransaction, view, views::BlockView}; - use verification::queue::kind::blocks::Unverified; use vm::LastHashes; /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header @@ -721,7 +725,7 @@ mod tests { #[test] fn open_block() { - use spec::*; + use crate::spec::*; let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); let db = spec @@ -748,7 +752,7 @@ mod tests { #[test] fn enact_block() { - use spec::*; + use crate::spec::*; let spec = Spec::new_test(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); @@ -809,7 +813,7 @@ mod tests { #[test] fn enact_block_with_uncle() { - use spec::*; + use crate::spec::*; let spec = Spec::new_test(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); diff --git a/crates/ethcore/src/client/ancient_import.rs b/crates/ethcore/src/client/ancient_import.rs index c5ff56aa35..e5c2a49611 100644 --- a/crates/ethcore/src/client/ancient_import.rs +++ b/crates/ethcore/src/client/ancient_import.rs @@ -18,13 +18,14 @@ use std::sync::Arc; -use engines::{EpochVerifier, EthEngine}; -use machine::EthereumMachine; +use crate::{ + engines::{EpochVerifier, EthEngine}, + machine::EthereumMachine, +}; -use blockchain::BlockChain; +use crate::{blockchain::BlockChain, types::header::Header}; use parking_lot::RwLock; use rand::Rng; -use types::header::Header; // do "heavy" verification on ~1/50 blocks, randomly sampled. const HEAVY_VERIFY_RATE: f32 = 0.02; @@ -52,10 +53,10 @@ impl AncientVerifier { rng: &mut R, header: &Header, chain: &BlockChain, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { // perform verification let verified = if let Some(ref cur_verifier) = *self.cur_verifier.read() { - match rng.gen::() <= HEAVY_VERIFY_RATE { + match rng.r#gen::() <= HEAVY_VERIFY_RATE { true => cur_verifier.verify_heavy(header)?, false => cur_verifier.verify_light(header)?, } @@ -93,7 +94,7 @@ impl AncientVerifier { &self, header: &Header, chain: &BlockChain, - ) -> Result>, ::error::Error> { + ) -> Result>, crate::error::Error> { trace!(target: "client", "Initializing ancient block restoration."); let current_epoch_data = chain .epoch_transitions() diff --git a/crates/ethcore/src/client/bad_blocks.rs b/crates/ethcore/src/client/bad_blocks.rs index c938529359..6863db91e7 100644 --- a/crates/ethcore/src/client/bad_blocks.rs +++ b/crates/ethcore/src/client/bad_blocks.rs @@ -16,13 +16,12 @@ //! Stores recently seen bad blocks. +use crate::{types::BlockNumber, verification::queue::kind::blocks::Unverified}; use bytes::{Bytes, ToPretty}; use ethereum_types::H256; use itertools::Itertools; use memory_cache::MemoryLruCache; use parking_lot::RwLock; -use types::BlockNumber; -use verification::queue::kind::blocks::Unverified; /// Recently seen bad blocks. pub struct BadBlocks { diff --git a/crates/ethcore/src/client/chain_notify.rs b/crates/ethcore/src/client/chain_notify.rs index 53fde6719d..0834b76ab4 100644 --- a/crates/ethcore/src/client/chain_notify.rs +++ b/crates/ethcore/src/client/chain_notify.rs @@ -14,16 +14,15 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use blockchain::ImportRoute; +use crate::{blockchain::ImportRoute, types::transaction::UnverifiedTransaction}; use bytes::Bytes; use ethereum_types::{H256, H512, U256}; use std::{collections::HashMap, time::Duration}; -use types::transaction::UnverifiedTransaction; /// Messages to broadcast via chain pub enum ChainMessageType { /// Consensus message - Consensus(Vec), + Consensus(u64, Vec), } /// Route type to indicate whether it is enacted or retracted. @@ -185,7 +184,7 @@ pub trait ChainNotify: Send + Sync { } /// fires when chain sends a message to a specific peer - fn send(&self, _message_type: ChainMessageType, _node_id: Option) { + fn send(&self, _message_type: ChainMessageType, _node_id: &H512) { // does nothing by default } diff --git a/crates/ethcore/src/client/client.rs b/crates/ethcore/src/client/client.rs index 0502d8ae03..e4cc85d3f1 100644 --- a/crates/ethcore/src/client/client.rs +++ b/crates/ethcore/src/client/client.rs @@ -15,26 +15,81 @@ // along with OpenEthereum. If not, see . use std::{ - cmp, + cmp::{self}, collections::{BTreeMap, HashSet, VecDeque}, convert::TryFrom, io::{BufRead, BufReader}, - str::{from_utf8, FromStr}, + str::{FromStr, from_utf8}, sync::{ - atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering as AtomicOrdering}, Arc, Weak, + atomic::{AtomicBool, AtomicI64, AtomicU64, Ordering as AtomicOrdering}, }, time::{Duration, Instant}, }; -use blockchain::{ - BlockChain, BlockChainDB, BlockNumberKey, BlockProvider, BlockReceipts, ExtrasInsert, - ImportRoute, TransactionAddress, TreeRoute, +use crate::{ + block::{ClosedBlock, Drain, LockedBlock, OpenBlock, SealedBlock, enact_verified}, + blockchain::{ + BlockChain, BlockChainDB, BlockNumberKey, BlockProvider, BlockReceipts, ExtrasInsert, + ImportRoute, TransactionAddress, TreeRoute, + }, + client::{ + AccountData, BadBlocks, Balance, BlockChain as BlockChainTrait, BlockChainClient, + BlockChainReset, BlockId, BlockInfo, BlockProducer, BroadcastProposalBlock, Call, + CallAnalytics, ChainInfo, ChainMessageType, ChainNotify, ChainRoute, ClientConfig, + ClientIoMessage, EngineInfo, ImportBlock, ImportExportBlocks, ImportSealedBlock, IoClient, + Mode, NewBlocks, Nonce, PrepareOpenBlock, ProvingBlockChainClient, PruningInfo, + ReopenBlock, ScheduleInfo, SealedBlockImporter, StateClient, StateInfo, StateOrBlock, + TraceFilter, TraceId, TransactionId, TransactionInfo, UncleId, + ancient_import::AncientVerifier, + bad_blocks, + traits::{ChainSyncing, ForceUpdateSealing, ReservedPeersManagement, TransactionRequest}, + }, + engines::{ + EngineError, EpochTransition, EthEngine, ForkChoice, MAX_UNCLE_AGE, SealingState, + epoch::PendingTransition, + }, + error::{ + BlockError, CallError, Error, Error as EthcoreError, ErrorKind as EthcoreErrorKind, + EthcoreResult, ExecutionError, ImportErrorKind, QueueErrorKind, + }, + executive::{Executed, Executive, TransactOptions, contract_address}, + factory::{Factories, VmFactory}, + io::IoChannel, + miner::{Miner, MinerService}, + snapshot::{self, SnapshotClient, io as snapshot_io}, + spec::Spec, + state::{self, State}, + state_db::StateDB, + trace::{ + self, Database as TraceDatabase, ImportRequest as TraceImportRequest, LocalizedTrace, + TraceDB, + }, + transaction_ext::Transaction, + types::{ + BlockNumber, + ancestry_action::AncestryAction, + data_format::DataFormat, + encoded, + filter::Filter, + header::{ExtendedHeader, Header}, + log_entry::LocalizedLogEntry, + receipt::{LocalizedReceipt, TypedReceipt}, + transaction::{ + self, Action, LocalizedTransaction, SignedTransaction, TypedTransaction, + UnverifiedTransaction, + }, + }, + verification::{ + self, BlockQueue, PreverifiedBlock, Verifier, + queue::kind::{BlockLike, blocks::Unverified}, + }, }; +use ansi_term::Colour; use bytes::{Bytes, ToPretty}; -use call_contract::CallContract; +use call_contract::{CallContract, RegistryInfo}; use db::{DBTransaction, DBValue, KeyValueDB}; -use ethcore_miner::pool::VerifiedTransaction; +use ethcore_miner::pool::{VerifiedTransaction, local_transactions::Status}; use ethereum_types::{Address, H256, H264, H512, U256}; use hash::keccak; use itertools::Itertools; @@ -42,71 +97,19 @@ use parking_lot::{Mutex, RwLock}; use rand::rngs::OsRng; use rlp::{PayloadInfo, Rlp}; use rustc_hex::FromHex; -use trie::{Trie, TrieFactory, TrieSpec}; -use types::{ - ancestry_action::AncestryAction, - data_format::DataFormat, - encoded, - filter::Filter, - header::{ExtendedHeader, Header}, - log_entry::LocalizedLogEntry, - receipt::{LocalizedReceipt, TypedReceipt}, - transaction::{ - self, Action, LocalizedTransaction, SignedTransaction, TypedTransaction, - UnverifiedTransaction, - }, - BlockNumber, -}; -use vm::{EnvInfo, LastHashes}; - -use ansi_term::Colour; -use block::{enact_verified, ClosedBlock, Drain, LockedBlock, OpenBlock, SealedBlock}; -use call_contract::RegistryInfo; -use client::{ - ancient_import::AncientVerifier, - bad_blocks, - traits::{ChainSyncing, ForceUpdateSealing, ReservedPeersManagement, TransactionRequest}, - AccountData, BadBlocks, Balance, BlockChain as BlockChainTrait, BlockChainClient, - BlockChainReset, BlockId, BlockInfo, BlockProducer, BroadcastProposalBlock, Call, - CallAnalytics, ChainInfo, ChainMessageType, ChainNotify, ChainRoute, ClientConfig, - ClientIoMessage, EngineInfo, ImportBlock, ImportExportBlocks, ImportSealedBlock, IoClient, - Mode, NewBlocks, Nonce, PrepareOpenBlock, ProvingBlockChainClient, PruningInfo, ReopenBlock, - ScheduleInfo, SealedBlockImporter, StateClient, StateInfo, StateOrBlock, TraceFilter, TraceId, - TransactionId, TransactionInfo, UncleId, -}; -use engines::{ - epoch::PendingTransition, EngineError, EpochTransition, EthEngine, ForkChoice, SealingState, - MAX_UNCLE_AGE, -}; -use error::{ - BlockError, CallError, Error, Error as EthcoreError, ErrorKind as EthcoreErrorKind, - EthcoreResult, ExecutionError, ImportErrorKind, QueueErrorKind, -}; -use executive::{contract_address, Executed, Executive, TransactOptions}; -use factory::{Factories, VmFactory}; -use io::IoChannel; -use miner::{Miner, MinerService}; -use snapshot::{self, io as snapshot_io, SnapshotClient}; -use spec::Spec; -use state::{self, State}; -use state_db::StateDB; use stats::{PrometheusMetrics, PrometheusRegistry}; -use trace::{ - self, Database as TraceDatabase, ImportRequest as TraceImportRequest, LocalizedTrace, TraceDB, -}; -use transaction_ext::Transaction; -use verification::{ - self, - queue::kind::{blocks::Unverified, BlockLike}, - BlockQueue, PreverifiedBlock, Verifier, -}; -use vm::Schedule; +use trie::{Trie, TrieFactory, TrieSpec}; +use vm::{EnvInfo, LastHashes, Schedule}; // re-export -pub use blockchain::CacheSize as BlockChainCacheSize; -use db::{keys::BlockDetails, Readable, Writable}; +pub use crate::{ + blockchain::CacheSize as BlockChainCacheSize, + types::{block_status::BlockStatus, blockchain_info::BlockChainInfo}, + verification::QueueInfo as BlockQueueInfo, +}; +use db::{Readable, Writable, keys::BlockDetails}; pub use reth_util::queue::ExecutionQueue; -pub use types::{block_status::BlockStatus, blockchain_info::BlockChainInfo}; -pub use verification::QueueInfo as BlockQueueInfo; + +use crate::exit::ShutdownManager; use_contract!(registry, "res/contracts/registrar.json"); const ANCIENT_BLOCKS_QUEUE_SIZE: usize = 4096; @@ -192,6 +195,46 @@ struct Importer { pub bad_blocks: bad_blocks::BadBlocks, } +#[derive(Default)] +struct ClientStatistics { + logging_enabled: bool, + broadcasted_consensus_messages: AtomicU64, + broadcasted_consensus_messages_bytes: AtomicU64, + sent_consensus_messages: AtomicU64, + sent_consensus_messages_bytes: AtomicU64, +} + +impl PrometheusMetrics for ClientStatistics { + fn prometheus_metrics(&self, r: &mut PrometheusRegistry) { + if self.logging_enabled { + r.register_counter( + "consens_messages_sent", + "count", + self.sent_consensus_messages + .load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + r.register_counter( + "consens_messages_sent_bytes", + "bytes", + self.sent_consensus_messages_bytes + .load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + r.register_counter( + "consens_messages_broadcasted", + "count", + self.broadcasted_consensus_messages + .load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + r.register_counter( + "consens_messages_broadcasted_bytes", + "bytes", + self.broadcasted_consensus_messages_bytes + .load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + } + } +} + /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client { @@ -260,6 +303,10 @@ pub struct Client { reserved_peers_management: Mutex>>, importer: Importer, + + shutdown: Arc, + + statistics: ClientStatistics, } impl Importer { @@ -520,7 +567,7 @@ impl Importer { warn!(target: "client", "Service tx checker error: {:?}", e); bail!(e); } - Some(ref checker) => match checker.check(client, &t) { + Some(checker) => match checker.check(client, &t) { Ok(true) => {} Ok(false) => { let e = format!( @@ -578,10 +625,10 @@ impl Importer { { let best_block_number = client.chain.read().best_block_number(); if best_block_number >= header.number() { - warn!(target: "client", "Stage 5 verification failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); + warn!(target: "client", "Stage 5 verification failed for #{} ({}) Block is ancient (current best block: #{}). Error: {:?}", header.number(), header.hash(), best_block_number, e); bail!("Block is ancient"); } else { - warn!(target: "client", "Stage 5 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + warn!(target: "client", "Stage 5 block verification failed for #{} ({}) Error: {:?}", header.number(), header.hash(), e); bail!(e); } } @@ -808,17 +855,17 @@ impl Importer { state_db: &StateDB, client: &Client, ) -> EthcoreResult> { - use engines::EpochChange; + use crate::engines::EpochChange; let hash = header.hash(); - let auxiliary = ::machine::AuxiliaryData { + let auxiliary = crate::machine::AuxiliaryData { bytes: Some(block_bytes), receipts: Some(&receipts), }; match self.engine.signals_epoch_end(header, auxiliary) { EpochChange::Yes(proof) => { - use engines::Proof; + use crate::engines::Proof; let proof = match proof { Proof::Known(proof) => proof, @@ -836,7 +883,8 @@ impl Importer { let call = move |addr, data| { let mut state_db = state_db.boxed_clone(); - let backend = ::state::backend::Proving::new(state_db.as_hash_db_mut()); + let backend = + crate::state::backend::Proving::new(state_db.as_hash_db_mut()); let transaction = client.contract_call_tx( BlockId::Hash(*header.parent_hash()), @@ -946,7 +994,8 @@ impl Client { db: Arc, miner: Arc, message_channel: IoChannel, - ) -> Result, ::error::Error> { + shutdown: Arc, + ) -> Result, crate::error::Error> { let trie_spec = match config.fat_db { true => TrieSpec::Fat, false => TrieSpec::Secure, @@ -1025,6 +1074,9 @@ impl Client { trace!(target: "client", "Found registrar at {}", addr); } + let mut statistics = ClientStatistics::default(); + statistics.logging_enabled = true; + let client = Arc::new(Client { enabled: AtomicBool::new(true), sleep_state: Mutex::new(SleepState::new(awake)), @@ -1054,6 +1106,8 @@ impl Client { reserved_peers_management: Mutex::new(None), importer, config, + shutdown, + statistics, }); let exec_client = client.clone(); @@ -1271,7 +1325,7 @@ impl Client { // use a state-proving closure for the given block. fn with_proving_caller(&self, id: BlockId, with_call: F) -> T where - F: FnOnce(&::machine::Call) -> T, + F: FnOnce(&crate::machine::Call) -> T, { let call = |a, d| { let tx = self.contract_call_tx(id, a, d); @@ -1291,7 +1345,7 @@ impl Client { &self, mut state_db: StateDB, chain: &BlockChain, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { let latest_era = match state_db.journal_db().latest_era() { Some(n) => n, None => return Ok(()), @@ -1317,6 +1371,14 @@ impl Client { freeze_at, earliest_era, latest_era, state_db.journal_db().journal_size()); break; } + + // if the engine still needs that block, we are not going to prune it. + if let Some(protected_block) = self.engine.pruning_protection_block_number() { + if earliest_era > protected_block { + debug!(target: "pruning", "Detected attempt from pruning ancient block that is still required by the engine. protected block: {protected_block}, earliest_era: {earliest_era}"); + break; + } + } trace!(target: "client", "Pruning state for ancient era {}", earliest_era); match chain.block_hash(earliest_era) { Some(ancient_hash) => { @@ -1348,17 +1410,29 @@ impl Client { } } + /// Get local transactions from the miner. + pub fn local_transactions(&self) -> BTreeMap { + self.importer.miner.local_transactions() + } + + /// Get local transactions from the miner. + pub fn local_transaction_status(&self, tx_hash: &H256) -> Option { + self.importer.miner.local_transaction_status(tx_hash) + } + /// Get shared miner reference. #[cfg(test)] pub fn miner(&self) -> Arc { self.importer.miner.clone() } + /// Provides read-only access to the `state_db` instance. #[cfg(test)] pub fn state_db(&self) -> ::parking_lot::RwLockReadGuard { self.state_db.read() } + /// Retrieves a cloned instance of the blockchain. #[cfg(test)] pub fn chain(&self) -> Arc { self.chain.read().clone() @@ -1641,7 +1715,7 @@ impl Client { } fn do_virtual_call( - machine: &::machine::EthereumMachine, + machine: &crate::machine::EthereumMachine, env_info: &EnvInfo, state: &mut State, t: &SignedTransaction, @@ -1650,7 +1724,7 @@ impl Client { fn call( state: &mut State, env_info: &EnvInfo, - machine: &::machine::EthereumMachine, + machine: &crate::machine::EthereumMachine, state_diff: bool, transaction: &SignedTransaction, options: TransactOptions, @@ -1911,11 +1985,7 @@ impl RegistryInfo for Client { let value = decoder .decode(&self.call_contract(block, address, data).ok()?) .ok()?; - if value.is_zero() { - None - } else { - Some(value) - } + if value.is_zero() { None } else { Some(value) } } } @@ -1994,7 +2064,7 @@ impl ImportBlock for Client { } impl StateClient for Client { - type State = State<::state_db::StateDB>; + type State = State; fn latest_state_and_header(&self) -> (Self::State, Header) { Client::latest_state_and_header(self) @@ -2012,7 +2082,7 @@ impl Drop for Client { } impl Call for Client { - type State = State<::state_db::StateDB>; + type State = State; fn call( &self, @@ -2366,6 +2436,16 @@ impl BlockChainClient for Client { self.importer.miner.transaction(&hash) } + fn transaction_if_readable( + &self, + hash: &H256, + max_lock_duration: &Duration, + ) -> Option> { + self.importer + .miner + .transaction_if_readable(&hash, max_lock_duration) + } + fn uncle(&self, id: UncleId) -> Option { let index = id.position; self.block_body(id.block) @@ -2374,7 +2454,7 @@ impl BlockChainClient for Client { } fn transaction_receipt(&self, id: TransactionId) -> Option { - // NOTE Don't use block_receipts here for performance reasons + // NOTE Don't use crate::block_receipts here for performance reasons let address = self.transaction_address(id)?; let hash = address.block_hash; let chain = self.chain.read(); @@ -2713,9 +2793,11 @@ impl BlockChainClient for Client { .as_u64() as usize, ) }; - self.importer - .miner - .ready_transactions(self, max_len, ::miner::PendingOrdering::Priority) + self.importer.miner.ready_transactions( + self, + max_len, + crate::miner::PendingOrdering::Priority, + ) } fn transaction(&self, tx_hash: &H256) -> Option> { @@ -2855,11 +2937,16 @@ impl BlockChainClient for Client { .import_own_transaction(self, signed.into(), false) } - fn transact_silently(&self, tx_request: TransactionRequest) -> Result<(), transaction::Error> { + fn transact_silently( + &self, + tx_request: TransactionRequest, + ) -> Result { let signed = self.create_transaction(tx_request)?; + let tx_hash = signed.hash(); self.importer .miner .import_own_transaction(self, signed.into(), true) + .map(|_| tx_hash) } fn is_major_syncing(&self) -> bool { @@ -2872,6 +2959,16 @@ impl BlockChainClient for Client { } } + fn is_syncing(&self) -> bool { + // so far we know, this lock cannot result into a deadlock. + match &*self.sync_provider.lock() { + Some(sync_provider) => sync_provider.is_syncing(), + // We also indicate the "syncing" state when the SyncProvider has not been set, + // which usually only happens when the client is not fully configured yet. + None => true, + } + } + fn next_nonce(&self, address: &Address) -> U256 { self.importer.miner.next_nonce(self, address) } @@ -3181,9 +3278,9 @@ impl BroadcastProposalBlock for Client { impl SealedBlockImporter for Client {} -impl ::miner::TransactionVerifierClient for Client {} +impl crate::miner::TransactionVerifierClient for Client {} -impl ::miner::BlockChainClient for Client {} +impl crate::miner::BlockChainClient for Client {} impl super::traits::EngineClient for Client { fn update_sealing(&self, force: ForceUpdateSealing) { @@ -3201,15 +3298,41 @@ impl super::traits::EngineClient for Client { } } - fn broadcast_consensus_message(&self, message: Bytes) { - self.notify(|notify| notify.broadcast(ChainMessageType::Consensus(message.clone()))); + fn broadcast_consensus_message(&self, future_block_id: u64, message: Bytes) { + self.statistics + .broadcasted_consensus_messages + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + self.statistics + .broadcasted_consensus_messages_bytes + .fetch_add(message.len() as u64, std::sync::atomic::Ordering::Relaxed); + + self.notify(|notify| { + notify.broadcast(ChainMessageType::Consensus( + future_block_id, + message.clone(), + )) + }); } - fn send_consensus_message(&self, message: Bytes, node_id: Option) { - self.notify(|notify| notify.send(ChainMessageType::Consensus(message.clone()), node_id)); + fn send_consensus_message(&self, future_block_id: u64, message: Bytes, node_id: Option) { + self.statistics + .sent_consensus_messages + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + self.statistics + .sent_consensus_messages_bytes + .fetch_add(message.len() as u64, std::sync::atomic::Ordering::Relaxed); + + if let Some(n) = node_id { + self.notify(|notify| { + notify.send( + ChainMessageType::Consensus(future_block_id, message.clone()), + &n, + ) + }); + } } - fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> { + fn epoch_transition_for(&self, parent_hash: H256) -> Option { self.chain.read().epoch_transition_for(parent_hash) } @@ -3229,6 +3352,14 @@ impl super::traits::EngineClient for Client { self.importer.miner.queued_transactions(self) } + fn demand_shutdown(&self) { + self.shutdown.demand_shutdown(); + } + + fn config_shutdown_on_missing_block_import(&self) -> Option { + self.config.shutdown_on_missing_block_import + } + fn create_pending_block_at( &self, txns: Vec, @@ -3239,6 +3370,11 @@ impl super::traits::EngineClient for Client { .miner .create_pending_block_at(self, txns, timestamp, block_number) } + + /// Get local transactions from the miner. + fn local_transaction_status(&self, tx_hash: &H256) -> Option { + self.importer.miner.local_transaction_status(tx_hash) + } } impl ProvingBlockChainClient for Client { @@ -3423,7 +3559,7 @@ impl ImportExportBlocks for Client { /// Returns `LocalizedReceipt` given `LocalizedTransaction` /// and a vector of receipts from given block up to transaction index. fn transaction_receipt( - machine: &::machine::EthereumMachine, + machine: &crate::machine::EthereumMachine, mut tx: LocalizedTransaction, receipt: TypedReceipt, prior_gas_used: U256, @@ -3561,16 +3697,16 @@ impl PrometheusMetrics for Client { report.transactions_applied as i64, ); - self.state_db - .try_read_for(Duration::from_millis(200)) - .map(|state_db| { - let state_db_size = state_db.cache_size(); - r.register_gauge( - "statedb_cache_size", - "State DB cache size", - state_db_size as i64, - ); - }); + let lockd = Duration::from_millis(50); + + self.state_db.try_read_for(lockd).map(|state_db| { + let state_db_size = state_db.cache_size(); + r.register_gauge( + "statedb_cache_size", + "State DB cache size", + state_db_size as i64, + ); + }); // blockchain cache let blockchain_cache_info = self.blockchain_cache_info(); @@ -3630,9 +3766,14 @@ impl PrometheusMetrics for Client { chain.best_block_number as i64, ); - let is_syncing_val: i64 = self.is_major_syncing() as i64; - // 0 or 1 if we are syncing. - r.register_gauge("is_major_syncing", "syncing, boolean", is_syncing_val); + // 0 or 1 if we are major syncing. + r.register_gauge( + "is_major_syncing", + "syncing, boolean", + self.is_major_syncing() as i64, + ); + + r.register_gauge("is_syncing", "syncing, boolean", self.is_syncing() as i64); // prunning info let prunning = self.pruning_info(); @@ -3675,37 +3816,43 @@ impl PrometheusMetrics for Client { queue.verifying_queue_size as i64, ); - // database info - self.db.read().key_value().prometheus_metrics(r); + if let Some(db) = self.db.try_read_for(lockd) { + db.prometheus_metrics(r); + }; // engine specific metrics. - self.engine.prometheus_metrics(r); + + self.statistics.prometheus_metrics(r); } } #[cfg(test)] mod tests { - use blockchain::{BlockProvider, ExtrasInsert}; + use crate::{ + blockchain::{BlockProvider, ExtrasInsert}, + spec::Spec, + test_helpers::generate_dummy_client_with_spec_and_data, + }; use ethereum_types::{H160, H256}; - use spec::Spec; - use test_helpers::generate_dummy_client_with_spec_and_data; #[test] fn should_not_cache_details_before_commit() { - use client::{BlockChainClient, ChainInfo}; - use test_helpers::{generate_dummy_client, get_good_dummy_block_hash}; + use crate::{ + client::{BlockChainClient, ChainInfo}, + test_helpers::{generate_dummy_client, get_good_dummy_block_hash}, + }; + use crate::types::encoded; use kvdb::DBTransaction; use std::{ sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, thread, time::Duration, }; - use types::encoded; let client = generate_dummy_client(0); let genesis = client.chain_info().best_block_hash; @@ -3723,7 +3870,7 @@ mod tests { encoded::Block::new(new_block), Vec::new(), ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, + fork_choice: crate::engines::ForkChoice::New, is_finalized: false, }, ); @@ -3741,8 +3888,10 @@ mod tests { #[test] fn should_return_block_receipts() { - use client::{BlockChainClient, BlockId, TransactionId}; - use test_helpers::generate_dummy_client_with_data; + use crate::{ + client::{BlockChainClient, BlockId, TransactionId}, + test_helpers::generate_dummy_client_with_data, + }; let client = generate_dummy_client_with_data(2, 2, &[1.into(), 1.into()]); let receipts = client.localized_block_receipts(BlockId::Latest).unwrap(); @@ -3768,18 +3917,18 @@ mod tests { #[test] fn should_return_correct_log_index() { use super::transaction_receipt; - use crypto::publickey::KeyPair; - use hash::keccak; - use types::{ + use crate::types::{ log_entry::{LocalizedLogEntry, LogEntry}, receipt::{LegacyReceipt, LocalizedReceipt, TransactionOutcome, TypedReceipt}, transaction::{Action, LocalizedTransaction, Transaction, TypedTransaction}, }; + use crypto::publickey::KeyPair; + use hash::keccak; // given let key = KeyPair::from_secret_slice(keccak("test").as_bytes()).unwrap(); let secret = key.secret(); - let machine = ::ethereum::new_frontier_test_machine(); + let machine = crate::ethereum::new_frontier_test_machine(); let block_number = 1; let block_hash = H256::from_low_u64_be(5); diff --git a/crates/ethcore/src/client/config.rs b/crates/ethcore/src/client/config.rs index d23f5028b2..26c3057709 100644 --- a/crates/ethcore/src/client/config.rs +++ b/crates/ethcore/src/client/config.rs @@ -19,14 +19,15 @@ use std::{ str::FromStr, }; +use crate::{ + snapshot::SnapshotConfiguration, + verification::{QueueConfig, VerifierType}, +}; use journaldb; -use snapshot::SnapshotConfiguration; -use verification::{QueueConfig, VerifierType}; -pub use blockchain::Config as BlockChainConfig; +pub use crate::{blockchain::Config as BlockChainConfig, trace::Config as TraceConfig}; pub use evm::VMType; pub use std::time::Duration; -pub use trace::Config as TraceConfig; /// Client state db compaction profile #[derive(Debug, PartialEq, Clone)] @@ -125,6 +126,10 @@ pub struct ClientConfig { pub transaction_verification_queue_size: usize, /// Maximal number of blocks to import at each round. pub max_round_blocks_to_import: usize, + + /// Shutdown client if block has not happed for n seconds. + pub shutdown_on_missing_block_import: Option, + /// Snapshot configuration pub snapshot: SnapshotConfiguration, } @@ -152,6 +157,7 @@ impl Default for ClientConfig { check_seal: true, transaction_verification_queue_size: 8192, max_round_blocks_to_import: 1, + shutdown_on_missing_block_import: Some(1800), snapshot: Default::default(), } } diff --git a/crates/ethcore/src/client/evm_test_client.rs b/crates/ethcore/src/client/evm_test_client.rs index 97c8d01b3d..2edea99658 100644 --- a/crates/ethcore/src/client/evm_test_client.rs +++ b/crates/ethcore/src/client/evm_test_client.rs @@ -16,23 +16,20 @@ //! Simple Client used for EVM tests. -use client; +use crate::{ + client, executive, + factory::{self, Factories}, + pod_state, spec, state, state_db, trace, + types::{log_entry, receipt, transaction}, +}; use db; use ethereum_types::{H160, H256, U256}; use ethtrie; use evm::{FinalizationResult, VMType}; -use executive; -use factory::{self, Factories}; use journaldb; use kvdb::{self, KeyValueDB}; -use pod_state; -use spec; -use state; -use state_db; use std::{fmt, sync::Arc}; -use trace; use trie; -use types::{log_entry, receipt, transaction}; use vm::{self, ActionParams}; /// EVM test Error. @@ -43,12 +40,12 @@ pub enum EvmTestError { /// EVM error. Evm(vm::Error), /// Initialization error. - ClientError(::error::Error), + ClientError(crate::error::Error), /// Post-condition failure, PostCondition(String), } -impl> From for EvmTestError { +impl> From for EvmTestError { fn from(err: E) -> Self { EvmTestError::ClientError(err.into()) } @@ -67,7 +64,7 @@ impl fmt::Display for EvmTestError { } } -use ethereum::{self}; +use crate::ethereum::{self}; use ethjson::spec::ForkSpec; /// Simplified, single-block EVM test client. @@ -403,7 +400,7 @@ pub struct TransactErr { /// State root pub state_root: H256, /// Execution error - pub error: ::error::Error, + pub error: crate::error::Error, /// end state if needed pub end_state: Option, } diff --git a/crates/ethcore/src/client/io_message.rs b/crates/ethcore/src/client/io_message.rs index cd9c36cf0c..c5e7961ed1 100644 --- a/crates/ethcore/src/client/io_message.rs +++ b/crates/ethcore/src/client/io_message.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{client::Client, snapshot::ManifestData}; use bytes::Bytes; -use client::Client; use ethereum_types::H256; -use snapshot::ManifestData; use std::fmt; /// Message type for external and internal events diff --git a/crates/ethcore/src/client/mod.rs b/crates/ethcore/src/client/mod.rs index cbe6a5df16..25e255fc39 100644 --- a/crates/ethcore/src/client/mod.rs +++ b/crates/ethcore/src/client/mod.rs @@ -44,18 +44,17 @@ pub use self::{ ScheduleInfo, SealedBlockImporter, StateClient, StateOrBlock, TransactionInfo, }, }; -pub use state::StateInfo; +pub use crate::state::StateInfo; -pub use types::{ +pub use crate::types::{ call_analytics::CallAnalytics, ids::*, pruning_info::PruningInfo, trace_filter::Filter as TraceFilter, }; -pub use executive::{Executed, Executive, TransactOptions}; +pub use crate::executive::{Executed, Executive, TransactOptions}; pub use vm::{EnvInfo, LastHashes}; -pub use error::TransactionImportError; -pub use verification::VerifierType; +pub use crate::{error::TransactionImportError, verification::VerifierType}; pub mod traits; diff --git a/crates/ethcore/src/client/test_client.rs b/crates/ethcore/src/client/test_client.rs index 0ca9995ce0..b8be5b6f9d 100644 --- a/crates/ethcore/src/client/test_client.rs +++ b/crates/ethcore/src/client/test_client.rs @@ -20,16 +20,35 @@ use std::{ collections::{BTreeMap, HashMap}, str::FromStr, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrder}, Arc, + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrder}, }, + time::Duration, }; -use blockchain::{BlockReceipts, TreeRoute}; +use crate::{ + blockchain::{BlockReceipts, TreeRoute}, + types::{ + BlockNumber, + basic_account::BasicAccount, + encoded, + filter::Filter, + header::Header, + log_entry::LocalizedLogEntry, + pruning_info::PruningInfo, + receipt::{LegacyReceipt, LocalizedReceipt, TransactionOutcome, TypedReceipt}, + transaction::{ + self, Action, LocalizedTransaction, SignedTransaction, Transaction, TypedTransaction, + TypedTxId, + }, + view, + views::BlockView, + }, +}; use bytes::Bytes; use crypto::publickey::{Generator, Random}; use db::{COL_STATE, NUM_COLUMNS}; -use ethcore_miner::pool::VerifiedTransaction; +use ethcore_miner::pool::{VerifiedTransaction, local_transactions::Status}; use ethereum_types::{Address, H256, H512, U256}; use ethtrie; use hash::keccak; @@ -38,46 +57,32 @@ use kvdb::DBValue; use parking_lot::{Mutex, RwLock}; use rlp::RlpStream; use rustc_hex::FromHex; -use types::{ - basic_account::BasicAccount, - encoded, - filter::Filter, - header::Header, - log_entry::LocalizedLogEntry, - pruning_info::PruningInfo, - receipt::{LegacyReceipt, LocalizedReceipt, TransactionOutcome, TypedReceipt}, - transaction::{ - self, Action, LocalizedTransaction, SignedTransaction, Transaction, TypedTransaction, - TypedTxId, - }, - view, - views::BlockView, - BlockNumber, -}; use vm::Schedule; -use block::{ClosedBlock, OpenBlock, SealedBlock}; -use call_contract::{CallContract, RegistryInfo}; -use client::{ - traits::{ForceUpdateSealing, TransactionRequest}, - AccountData, BadBlocks, Balance, BlockChain, BlockChainClient, BlockChainInfo, BlockId, - BlockInfo, BlockProducer, BlockStatus, BroadcastProposalBlock, Call, CallAnalytics, ChainInfo, - EngineInfo, ImportBlock, ImportSealedBlock, IoClient, LastHashes, Mode, Nonce, - PrepareOpenBlock, ProvingBlockChainClient, ReopenBlock, ScheduleInfo, SealedBlockImporter, - StateClient, StateOrBlock, TraceFilter, TraceId, TransactionId, TransactionInfo, UncleId, +use crate::{ + block::{ClosedBlock, OpenBlock, SealedBlock}, + client::{ + AccountData, BadBlocks, Balance, BlockChain, BlockChainClient, BlockChainInfo, BlockId, + BlockInfo, BlockProducer, BlockStatus, BroadcastProposalBlock, Call, CallAnalytics, + ChainInfo, EngineInfo, ImportBlock, ImportSealedBlock, IoClient, LastHashes, Mode, Nonce, + PrepareOpenBlock, ProvingBlockChainClient, ReopenBlock, ScheduleInfo, SealedBlockImporter, + StateClient, StateOrBlock, TraceFilter, TraceId, TransactionId, TransactionInfo, UncleId, + traits::{ForceUpdateSealing, TransactionRequest}, + }, + engines::EthEngine, + error::{Error, EthcoreResult}, + executed::CallError, + executive::Executed, + miner::{self, Miner, MinerService}, + spec::Spec, + state::StateInfo, + state_db::StateDB, + trace::LocalizedTrace, + verification::queue::{QueueInfo, kind::blocks::Unverified}, }; -use engines::EthEngine; -use error::{Error, EthcoreResult}; -use executed::CallError; -use executive::Executed; +use call_contract::{CallContract, RegistryInfo}; use journaldb; -use miner::{self, Miner, MinerService}; -use spec::Spec; -use state::StateInfo; -use state_db::StateDB; use stats::{PrometheusMetrics, PrometheusRegistry}; -use trace::LocalizedTrace; -use verification::queue::{kind::blocks::Unverified, QueueInfo}; use super::ReservedPeersManagement; @@ -422,6 +427,7 @@ impl TestBlockChainClient { self.disabled.load(AtomicOrder::SeqCst) } + /// Sets the producer for new transaction hashes. pub fn set_new_transaction_hashes_producer( &self, new_transaction_hashes: crossbeam_channel::Sender, @@ -497,8 +503,8 @@ impl BroadcastProposalBlock for TestBlockChainClient { impl SealedBlockImporter for TestBlockChainClient {} -impl ::miner::TransactionVerifierClient for TestBlockChainClient {} -impl ::miner::BlockChainClient for TestBlockChainClient {} +impl crate::miner::TransactionVerifierClient for TestBlockChainClient {} +impl crate::miner::BlockChainClient for TestBlockChainClient {} impl Nonce for TestBlockChainClient { fn nonce(&self, address: &Address, id: BlockId) -> Option { @@ -954,11 +960,7 @@ impl BlockChainClient for TestBlockChainClient { blocks.push(hash.clone()); } } - if adding { - Vec::new() - } else { - blocks - } + if adding { Vec::new() } else { blocks } }, is_from_route_finalized: false, }) @@ -1105,15 +1107,25 @@ impl BlockChainClient for TestBlockChainClient { .import_own_transaction(self, signed.into(), false) } - fn transact_silently(&self, tx_request: TransactionRequest) -> Result<(), transaction::Error> { + fn transact_silently( + &self, + tx_request: TransactionRequest, + ) -> Result { let signed = self.create_transaction(tx_request)?; - self.miner.import_own_transaction(self, signed.into(), true) + let hash = signed.hash(); + self.miner + .import_own_transaction(self, signed.into(), true) + .map(|_| hash) } fn is_major_syncing(&self) -> bool { false } + fn is_syncing(&self) -> bool { + false + } + fn next_nonce(&self, address: &Address) -> U256 { self.miner.next_nonce(self, address) } @@ -1149,6 +1161,14 @@ impl BlockChainClient for TestBlockChainClient { self.miner.transaction(tx_hash) } + fn transaction_if_readable( + &self, + hash: &H256, + max_lock_duration: &Duration, + ) -> Option> { + self.miner.transaction_if_readable(hash, max_lock_duration) + } + /// Returns the devp2p network endpoint IP and Port information that is used to communicate with other peers. fn reserved_peers_management(&self) -> &Mutex>> { @@ -1212,13 +1232,18 @@ impl super::traits::EngineClient for TestBlockChainClient { } } - fn broadcast_consensus_message(&self, _message: Bytes) {} + fn broadcast_consensus_message(&self, _future_block_id: u64, _message: Bytes) {} - fn send_consensus_message(&self, _message: Bytes, _node_id: Option) { + fn send_consensus_message( + &self, + _future_block_id: u64, + _message: Bytes, + _node_id: Option, + ) { // TODO: allow test to intercept the message to relay it to other test clients } - fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> { + fn epoch_transition_for(&self, _block_hash: H256) -> Option { None } @@ -1247,6 +1272,12 @@ impl super::traits::EngineClient for TestBlockChainClient { self.miner .create_pending_block_at(self, txns, timestamp, block_number) } + + fn demand_shutdown(&self) {} + + fn local_transaction_status(&self, tx_hash: &H256) -> Option { + self.miner.local_transaction_status(tx_hash) + } } impl PrometheusMetrics for TestBlockChainClient { diff --git a/crates/ethcore/src/client/trace.rs b/crates/ethcore/src/client/trace.rs index 2fae25f8df..04ce18e658 100644 --- a/crates/ethcore/src/client/trace.rs +++ b/crates/ethcore/src/client/trace.rs @@ -16,10 +16,12 @@ //! Bridge between Tracedb and Blockchain. -use blockchain::{BlockChain, BlockProvider, TransactionAddress}; +use crate::{ + blockchain::{BlockChain, BlockProvider, TransactionAddress}, + trace::DatabaseExtras as TraceDatabaseExtras, + types::BlockNumber, +}; use ethereum_types::H256; -use trace::DatabaseExtras as TraceDatabaseExtras; -use types::BlockNumber; impl TraceDatabaseExtras for BlockChain { fn block_hash(&self, block_number: BlockNumber) -> Option { diff --git a/crates/ethcore/src/client/traits.rs b/crates/ethcore/src/client/traits.rs index 8fab9c1850..7c9c5c5351 100644 --- a/crates/ethcore/src/client/traits.rs +++ b/crates/ethcore/src/client/traits.rs @@ -20,45 +20,50 @@ use std::{ collections::{BTreeMap, BTreeSet}, net::SocketAddr, sync::Arc, + time::Duration, }; -use blockchain::{BlockReceipts, TreeRoute}; +use crate::{ + blockchain::{BlockReceipts, TreeRoute}, + types::{ + BlockNumber, + basic_account::BasicAccount, + block_status::BlockStatus, + blockchain_info::BlockChainInfo, + call_analytics::CallAnalytics, + data_format::DataFormat, + encoded, + filter::Filter, + header::Header, + ids::*, + log_entry::LocalizedLogEntry, + pruning_info::PruningInfo, + receipt::LocalizedReceipt, + trace_filter::Filter as TraceFilter, + transaction::{self, Action, LocalizedTransaction, SignedTransaction, TypedTxId}, + }, +}; use bytes::Bytes; use call_contract::{CallContract, RegistryInfo}; -use ethcore_miner::pool::VerifiedTransaction; +use ethcore_miner::pool::{VerifiedTransaction, local_transactions::Status}; use ethereum_types::{Address, H256, H512, U256}; use evm::Schedule; use itertools::Itertools; use kvdb::DBValue; use parking_lot::Mutex; -use types::{ - basic_account::BasicAccount, - block_status::BlockStatus, - blockchain_info::BlockChainInfo, - call_analytics::CallAnalytics, - data_format::DataFormat, - encoded, - filter::Filter, - header::Header, - ids::*, - log_entry::LocalizedLogEntry, - pruning_info::PruningInfo, - receipt::LocalizedReceipt, - trace_filter::Filter as TraceFilter, - transaction::{self, Action, LocalizedTransaction, SignedTransaction, TypedTxId}, - BlockNumber, -}; use vm::LastHashes; -use block::{ClosedBlock, OpenBlock, SealedBlock}; -use client::Mode; -use engines::EthEngine; -use error::{Error, EthcoreResult}; -use executed::CallError; -use executive::Executed; -use state::StateInfo; -use trace::LocalizedTrace; -use verification::queue::{kind::blocks::Unverified, QueueInfo as BlockQueueInfo}; +use crate::{ + block::{ClosedBlock, OpenBlock, SealedBlock}, + client::Mode, + engines::EthEngine, + error::{Error, EthcoreResult}, + executed::CallError, + executive::Executed, + state::StateInfo, + trace::LocalizedTrace, + verification::queue::{QueueInfo as BlockQueueInfo, kind::blocks::Unverified}, +}; /// State information to be used during client query pub enum StateOrBlock { @@ -215,6 +220,9 @@ pub trait EngineInfo { /// Provides information about the chain sync state. pub trait ChainSyncing: Send + Sync { + /// are we syncing? + fn is_syncing(&self) -> bool; + /// are we in the middle of a major sync? fn is_major_syncing(&self) -> bool; } @@ -416,6 +424,14 @@ pub trait BlockChainClient: /// Get verified transaction with specified transaction hash. fn transaction(&self, tx_hash: &H256) -> Option>; + /// see queued_transactions(&self). + /// Get pool transaction with a given hash, but returns NONE fast, if if cannot acquire a readlock fast. + fn transaction_if_readable( + &self, + hash: &H256, + max_lock_duration: &Duration, + ) -> Option>; + /// Sorted list of transaction gas prices from at least last sample_size blocks. fn gas_price_corpus(&self, sample_size: usize) -> ::stats::Corpus { let mut h = self.chain_info().best_block_hash; @@ -522,11 +538,15 @@ pub trait BlockChainClient: /// Same as transact(), but just adding the transaction to the queue, without calling back into the engine. /// Used by engines to queue transactions without causing deadlocks due to re-entrant calls. - fn transact_silently(&self, tx_request: TransactionRequest) -> Result<(), transaction::Error>; + fn transact_silently(&self, tx_request: TransactionRequest) + -> Result; - /// Returns true if the chain is currently syncing. + /// Returns true if the chain is currently syncing in major states. fn is_major_syncing(&self) -> bool; + /// Returns true if the chain is currently syncing. + fn is_syncing(&self) -> bool; + /// Returns the next nonce for the given address, taking the transaction queue into account. fn next_nonce(&self, address: &Address) -> U256; @@ -648,17 +668,17 @@ pub trait EngineClient: Sync + Send + ChainInfo { fn submit_seal(&self, block_hash: H256, seal: Vec); /// Broadcast a consensus message to the network. - fn broadcast_consensus_message(&self, message: Bytes); + fn broadcast_consensus_message(&self, future_block_id: u64, message: Bytes); /// Send a consensus message to the specified peer - fn send_consensus_message(&self, message: Bytes, node_id: Option); + fn send_consensus_message(&self, future_block_id: u64, message: Bytes, node_id: Option); /// Get the transition to the epoch the given parent hash is part of /// or transitions to. /// This will give the epoch that any children of this parent belong to. /// /// The block corresponding the the parent hash must be stored already. - fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>; + fn epoch_transition_for(&self, parent_hash: H256) -> Option; /// Attempt to cast the engine client to a full client. fn as_full_client(&self) -> Option<&dyn BlockChainClient>; @@ -669,6 +689,9 @@ pub trait EngineClient: Sync + Send + ChainInfo { /// Get raw block header data by block id. fn block_header(&self, id: BlockId) -> Option; + /// demand a shutdown out of the nodesoftware. + fn demand_shutdown(&self); + /// Get currently pending transactions fn queued_transactions(&self) -> Vec>; @@ -679,6 +702,17 @@ pub trait EngineClient: Sync + Send + ChainInfo { timestamp: u64, block_number: u64, ) -> Option
; + + /// Time in seconds until the Engine shuts down if no Block Import is performed. + fn config_shutdown_on_missing_block_import(&self) -> Option { + None + } + + /// Get local transaction status. + /// Note that already included transactions might be not available here anymore. + /// As well as transactions that were culled, replaced, dropped or whatever, + /// do not exist forever in the memory. + fn local_transaction_status(&self, tx_hash: &H256) -> Option; } /// Extended client interface for providing proofs of the state. diff --git a/crates/ethcore/src/engines/authority_round/block_gas_limit.rs b/crates/ethcore/src/engines/authority_round/block_gas_limit.rs index b4f98dc75d..7bf3a6b493 100644 --- a/crates/ethcore/src/engines/authority_round/block_gas_limit.rs +++ b/crates/ethcore/src/engines/authority_round/block_gas_limit.rs @@ -16,8 +16,8 @@ //! A client interface for interacting with the block gas limit contract. -use client::{BlockChainClient, BlockId}; -use types::header::Header; +use crate::client::{BlockChainClient, BlockId}; +use crate::types::header::Header; use ethabi::FunctionOutputDecoder; use ethabi_contract::use_contract; use ethereum_types::{Address, U256}; diff --git a/crates/ethcore/src/engines/authority_round/finality.rs b/crates/ethcore/src/engines/authority_round/finality.rs index cd7e9128db..eace66ac14 100644 --- a/crates/ethcore/src/engines/authority_round/finality.rs +++ b/crates/ethcore/src/engines/authority_round/finality.rs @@ -17,14 +17,14 @@ //! Finality proof generation and checking. use std::collections::{ - hash_map::{Entry, HashMap}, VecDeque, + hash_map::{Entry, HashMap}, }; +use crate::types::BlockNumber; use ethereum_types::{Address, H256}; -use types::BlockNumber; -use engines::validator_set::SimpleList; +use crate::engines::validator_set::SimpleList; /// Error indicating unknown validator. #[derive(Debug, PartialEq, Eq, Clone, Copy)] @@ -182,7 +182,9 @@ impl RollingFinality { } } Entry::Vacant(_) => { - panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"); + panic!( + "all hashes in `header` should have entries in `sign_count` for their signers; qed" + ); } } } @@ -192,16 +194,18 @@ impl RollingFinality { #[cfg(test)] mod tests { use super::RollingFinality; + use crate::types::BlockNumber; use ethereum_types::{Address, H256}; - use types::BlockNumber; #[test] fn rejects_unknown_signers() { let signers = (0..3).map(|_| Address::random()).collect::>(); let mut finality = RollingFinality::blank(signers.clone(), BlockNumber::max_value()); - assert!(finality - .push_hash(H256::random(), 0, vec![signers[0], Address::random()]) - .is_err()); + assert!( + finality + .push_hash(H256::random(), 0, vec![signers[0], Address::random()]) + .is_err() + ); } #[test] @@ -287,9 +291,11 @@ mod tests { fn rejects_unknown_signers_2_3() { let signers = (0..3).map(|_| Address::random()).collect::>(); let mut finality = RollingFinality::blank(signers.clone(), 0); - assert!(finality - .push_hash(H256::random(), 0, vec![signers[0], Address::random()]) - .is_err()); + assert!( + finality + .push_hash(H256::random(), 0, vec![signers[0], Address::random()]) + .is_err() + ); } #[test] diff --git a/crates/ethcore/src/engines/authority_round/mod.rs b/crates/ethcore/src/engines/authority_round/mod.rs index 0b8b351e7c..2e7f23a06a 100644 --- a/crates/ethcore/src/engines/authority_round/mod.rs +++ b/crates/ethcore/src/engines/authority_round/mod.rs @@ -38,8 +38,8 @@ use std::{ iter::{self, FromIterator}, ops::Deref, sync::{ - atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering}, Arc, Weak, + atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering}, }, time::{Duration, UNIX_EPOCH}, u64, @@ -47,42 +47,45 @@ use std::{ use self::finality::RollingFinality; use super::{ - signer::EngineSigner, - validator_set::{new_validator_set_posdao, SimpleList, ValidatorSet}, EthEngine, + signer::EngineSigner, + validator_set::{SimpleList, ValidatorSet, new_validator_set_posdao}, }; -use block::*; -use bytes::Bytes; -use client::{ - traits::{ForceUpdateSealing, TransactionRequest}, - EngineClient, +use crate::{ + block::*, + client::{ + EngineClient, + traits::{ForceUpdateSealing, TransactionRequest}, + }, + engines::{ + ConstructedVerifier, Engine, EngineError, Seal, SealingState, block_reward, + block_reward::{BlockRewardContract, RewardKind}, + }, + error::{BlockError, Error, ErrorKind}, }; +use bytes::Bytes; use crypto::publickey::{self, Signature}; -use engines::{ - block_reward, - block_reward::{BlockRewardContract, RewardKind}, - ConstructedVerifier, Engine, EngineError, Seal, SealingState, -}; -use error::{BlockError, Error, ErrorKind}; use ethereum_types::{Address, H256, H512, H520, U128, U256}; +use crate::{ + io::{IoContext, IoHandler, IoService, TimerToken}, + machine::{AuxiliaryData, Call, EthereumMachine}, + types::{ + BlockNumber, + ancestry_action::AncestryAction, + header::{ExtendedHeader, Header}, + ids::BlockId, + transaction::SignedTransaction, + }, +}; use ethjson::{self, uint::Uint}; use hash::keccak; -use io::{IoContext, IoHandler, IoService, TimerToken}; use itertools::{self, Itertools}; use lru_cache::LruCache; -use machine::{AuxiliaryData, Call, EthereumMachine}; use parking_lot::{Mutex, RwLock}; use rand::rngs::OsRng; -use rlp::{encode, Decodable, DecoderError, Encodable, Rlp, RlpStream}; +use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream, encode}; use time_utils::CheckedSystemTime; -use types::{ - ancestry_action::AncestryAction, - header::{ExtendedHeader, Header}, - ids::BlockId, - transaction::SignedTransaction, - BlockNumber, -}; use unexpected::{Mismatch, OutOfBounds}; //mod block_gas_limit as crate_block_gas_limit; @@ -1043,7 +1046,7 @@ impl AuthorityRound { step.calibrate(); let engine = Arc::new(AuthorityRound { - transition_service: IoService::<()>::start("AuRa")?, + transition_service: IoService::<()>::start("AuRa", 4)?, step: Arc::new(PermissionedStep { inner: step, can_propose: AtomicBool::new(true), @@ -1090,8 +1093,8 @@ impl AuthorityRound { // fetch correct validator set for epoch at header, taking into account // finality of previous transitions. - fn epoch_set<'a>( - &'a self, + fn epoch_set( + &self, header: &Header, ) -> Result<(CowLike, BlockNumber), Error> { Ok(if self.immediate_transitions { @@ -1155,7 +1158,7 @@ impl AuthorityRound { self.empty_steps.lock().insert(empty_step); } - fn generate_empty_step(&self, parent_hash: &H256) { + fn generate_empty_step(&self, future_block_id: u64, parent_hash: &H256) { let step = self.step.inner.load(); let empty_step_rlp = empty_step_rlp(step, parent_hash); @@ -1170,16 +1173,16 @@ impl AuthorityRound { }; trace!(target: "engine", "broadcasting empty step message: {:?}", empty_step); - self.broadcast_message(message_rlp); + self.broadcast_message(future_block_id, message_rlp); self.handle_empty_step_message(empty_step); } else { warn!(target: "engine", "generate_empty_step: FAIL: accounts secret key unavailable"); } } - fn broadcast_message(&self, message: Vec) { + fn broadcast_message(&self, future_block_id: u64, message: Vec) { if let Ok(c) = self.upgrade_client_or(None) { - c.broadcast_consensus_message(message); + c.broadcast_consensus_message(future_block_id, message); } } @@ -1737,7 +1740,9 @@ impl Engine for AuthorityRound { // this is guarded against by `can_propose` unless the block was signed // on the same step (implies same key) and on a different node. if parent_step == step { - warn!("Attempted to seal block on the same step as parent. Is this authority sealing with more than one node?"); + warn!( + "Attempted to seal block on the same step as parent. Is this authority sealing with more than one node?" + ); return Seal::None; } @@ -1754,7 +1759,7 @@ impl Engine for AuthorityRound { .compare_exchange(true, false, AtomicOrdering::SeqCst, AtomicOrdering::SeqCst) .is_ok() { - self.generate_empty_step(header.parent_hash()); + self.generate_empty_step(header.number(), header.parent_hash()); } return Seal::None; @@ -1879,7 +1884,7 @@ impl Engine for AuthorityRound { }; let parent = client - .block_header(::client::BlockId::Hash(*block.header.parent_hash())) + .block_header(crate::client::BlockId::Hash(*block.header.parent_hash())) .expect("hash is from parent; parent header must exist; qed") .decode(self.params().eip1559_transition)?; @@ -2312,7 +2317,7 @@ impl Engine for AuthorityRound { if self.immediate_transitions { None } else { - Some(Box::new(::snapshot::PoaSnapshot)) + Some(Box::new(crate::snapshot::PoaSnapshot)) } } @@ -2382,43 +2387,45 @@ fn next_step_time_duration(info: StepDurationInfo, time: u64) -> Option<(u64, u6 #[cfg(test)] mod tests { use super::{ - calculate_score, next_step_time_duration, util::BoundContract, AuthorityRound, - AuthorityRoundParams, EmptyStep, SealedEmptyStep, StepDurationInfo, + AuthorityRound, AuthorityRoundParams, EmptyStep, SealedEmptyStep, StepDurationInfo, + calculate_score, next_step_time_duration, util::BoundContract, + }; + use crate::{ + block::*, + engines::{ + Engine, EngineError, EngineSigner, EthEngine, Seal, + block_reward::BlockRewardContract, + validator_set::{SimpleList, TestSet}, + }, + error::{Error, ErrorKind}, + miner::{Author, MinerService}, + spec::Spec, + test_helpers::{ + TestNotify, generate_dummy_client_with_spec, generate_dummy_client_with_spec_and_data, + get_temp_state_db, push_block_with_transactions_and_author, + }, + types::{ + header::Header, + ids::BlockId, + transaction::{Action, Transaction, TypedTransaction}, + }, }; use accounts::AccountProvider; - use block::*; use crypto::publickey::Signature; - use engines::{ - block_reward::BlockRewardContract, - validator_set::{SimpleList, TestSet}, - Engine, EngineError, EngineSigner, EthEngine, Seal, - }; - use error::{Error, ErrorKind}; use ethabi_contract::use_contract; use ethereum_types::{Address, H256, H520, U256}; use ethjson; use hash::keccak; - use miner::{Author, MinerService}; use rlp::encode; - use spec::Spec; use std::{ collections::BTreeMap, str::FromStr, sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicU64, AtomicUsize, Ordering as AtomicOrdering}, }, time::Duration, }; - use test_helpers::{ - generate_dummy_client_with_spec, generate_dummy_client_with_spec_and_data, - get_temp_state_db, push_block_with_transactions_and_author, TestNotify, - }; - use types::{ - header::Header, - ids::BlockId, - transaction::{Action, Transaction, TypedTransaction}, - }; fn aura(f: F) -> Arc where @@ -2449,9 +2456,9 @@ mod tests { f(&mut params); // create engine - let mut c_params = ::spec::CommonParams::default(); + let mut c_params = crate::spec::CommonParams::default(); c_params.gas_limit_bound_divisor = 5.into(); - let machine = ::machine::EthereumMachine::regular(c_params, Default::default()); + let machine = crate::machine::EthereumMachine::regular(c_params, Default::default()); AuthorityRound::new(params, machine).unwrap() } @@ -3489,16 +3496,18 @@ mod tests { engine.step(); assert!(bc.call_const(rand_contract::functions::is_reveal_phase::call())?); assert!(!bc.call_const(rand_contract::functions::sent_reveal::call(0, addr1))?); - assert!(bc - .call_const(rand_contract::functions::get_value::call())? - .is_zero()); + assert!( + bc.call_const(rand_contract::functions::get_value::call())? + .is_zero() + ); // ...so in the next step, we reveal our random value, and the contract's random value is not zero anymore. engine.step(); assert!(bc.call_const(rand_contract::functions::sent_reveal::call(0, addr1))?); - assert!(!bc - .call_const(rand_contract::functions::get_value::call())? - .is_zero()); + assert!( + !bc.call_const(rand_contract::functions::get_value::call())? + .is_zero() + ); Ok(()) } @@ -3725,12 +3734,12 @@ mod tests { } }"#; let deserialized: ethjson::spec::AuthorityRound = serde_json::from_str(config).unwrap(); - AuthorityRoundParams::from(deserialized.params); + let _ = AuthorityRoundParams::from(deserialized.params); } #[test] fn should_rewrite_bytecode_according_to_transitions() { - use state::StateInfo; + use crate::state::StateInfo; let tap = Arc::new(AccountProvider::transient_provider()); let addr1 = tap.insert_account(keccak("1").into(), &"1".into()).unwrap(); diff --git a/crates/ethcore/src/engines/authority_round/randomness.rs b/crates/ethcore/src/engines/authority_round/randomness.rs index 0113d80416..be0f0a7d26 100644 --- a/crates/ethcore/src/engines/authority_round/randomness.rs +++ b/crates/ethcore/src/engines/authority_round/randomness.rs @@ -69,10 +69,10 @@ //! A production implementation of a randomness contract can be found here: //! https://github.com/poanetwork/posdao-contracts/blob/4fddb108993d4962951717b49222327f3d94275b/contracts/RandomAuRa.sol +use crate::engines::signer::EngineSigner; use bytes::Bytes; -use crypto::publickey::{ecies, Error as CryptoError}; +use crypto::publickey::{Error as CryptoError, ecies}; use derive_more::Display; -use engines::signer::EngineSigner; use ethabi::Hash; use ethabi_contract::use_contract; use ethereum_types::{Address, H256, U256}; @@ -213,7 +213,7 @@ impl RandomnessPhase { // Generate a new random number, but don't reveal it yet. Instead, we publish its hash to the // randomness contract, together with the number encrypted to ourselves. That way we will later be // able to decrypt and reveal it, and other parties are able to verify it against the hash. - let number: RandNumber = rng.gen(); + let number: RandNumber = rng.r#gen(); let number_hash: Hash = keccak(number.0); let public = signer.public().ok_or(PhaseError::MissingPublicKey)?; let cipher = ecies::encrypt(&public, number_hash.as_bytes(), number.as_bytes())?; diff --git a/crates/ethcore/src/engines/authority_round/util.rs b/crates/ethcore/src/engines/authority_round/util.rs index 3b50a087f2..d9fb039601 100644 --- a/crates/ethcore/src/engines/authority_round/util.rs +++ b/crates/ethcore/src/engines/authority_round/util.rs @@ -20,12 +20,14 @@ use std::fmt; -use client::{traits::EngineClient, BlockChainClient}; +use crate::{ + client::{BlockChainClient, traits::EngineClient}, + types::{header::Header, ids::BlockId}, +}; use ethabi::{self, FunctionOutputDecoder}; use ethabi_contract::use_contract; use ethereum_types::{Address, U256}; use log::{debug, error}; -use types::{header::Header, ids::BlockId}; /// A contract bound to a client and block number. /// @@ -42,8 +44,10 @@ pub struct BoundContract<'a> { #[derive(Debug)] pub enum CallError { /// The call itself failed. + #[allow(dead_code)] CallFailed(String), /// Decoding the return value failed or the decoded value was a failure. + #[allow(dead_code)] DecodeFailed(ethabi::Error), /// The passed in client reference could not be upgraded to a `BlockchainClient`. NotFullClient, diff --git a/crates/ethcore/src/engines/basic_authority.rs b/crates/ethcore/src/engines/basic_authority.rs index 91add1a8b7..5994035c13 100644 --- a/crates/ethcore/src/engines/basic_authority.rs +++ b/crates/ethcore/src/engines/basic_authority.rs @@ -16,18 +16,20 @@ //! A blockchain engine that supports a basic, non-BFT proof-of-authority. -use super::validator_set::{new_validator_set, SimpleList, ValidatorSet}; -use block::*; -use client::EngineClient; +use super::validator_set::{SimpleList, ValidatorSet, new_validator_set}; +use crate::{ + block::*, + client::EngineClient, + engines::{ConstructedVerifier, Engine, EngineError, Seal, SealingState, signer::EngineSigner}, + error::{BlockError, Error}, + machine::{AuxiliaryData, Call, EthereumMachine}, + types::header::{ExtendedHeader, Header}, +}; use crypto::publickey::{self, Signature}; -use engines::{signer::EngineSigner, ConstructedVerifier, Engine, EngineError, Seal, SealingState}; -use error::{BlockError, Error}; use ethereum_types::{H256, H520}; use ethjson; -use machine::{AuxiliaryData, Call, EthereumMachine}; use parking_lot::RwLock; use std::sync::Weak; -use types::header::{ExtendedHeader, Header}; /// `BasicAuthority` params. #[derive(Debug, PartialEq)] @@ -225,16 +227,18 @@ impl Engine for BasicAuthority { #[cfg(test)] mod tests { + use crate::{ + block::*, + engines::{Seal, SealingState}, + spec::Spec, + test_helpers::get_temp_state_db, + types::header::Header, + }; use accounts::AccountProvider; - use block::*; - use engines::{Seal, SealingState}; use ethereum_types::H520; use hash::keccak; - use spec::Spec; use std::sync::Arc; use tempdir::TempDir; - use test_helpers::get_temp_state_db; - use types::header::Header; /// Create a new test chain spec with `BasicAuthority` consensus engine. fn new_test_authority() -> Spec { diff --git a/crates/ethcore/src/engines/block_reward.rs b/crates/ethcore/src/engines/block_reward.rs index 90c05a8a7e..2417802792 100644 --- a/crates/ethcore/src/engines/block_reward.rs +++ b/crates/ethcore/src/engines/block_reward.rs @@ -21,13 +21,15 @@ use ethabi::{self, ParamType}; use ethereum_types::{Address, H160, U256}; use super::{SystemOrCodeCall, SystemOrCodeCallKind}; -use block::ExecutedBlock; -use error::Error; +use crate::{ + block::ExecutedBlock, + error::Error, + machine::Machine, + trace::{self, ExecutiveTracer, Tracer, Tracing}, + types::BlockNumber, +}; use hash::keccak; -use machine::Machine; use std::sync::Arc; -use trace::{self, ExecutiveTracer, Tracer, Tracing}; -use types::BlockNumber; use_contract!(block_reward_contract, "res/contracts/block_reward.json"); @@ -126,7 +128,7 @@ impl BlockRewardContract { let output = caller(self.kind.clone(), input) .map_err(Into::into) - .map_err(::engines::EngineError::FailedSystemCall)?; + .map_err(crate::engines::EngineError::FailedSystemCall)?; // since this is a non-constant call we can't use ethabi's function output // deserialization, sadness ensues. @@ -137,7 +139,7 @@ impl BlockRewardContract { let tokens = ethabi::decode(types, &output) .map_err(|err| err.to_string()) - .map_err(::engines::EngineError::FailedSystemCall)?; + .map_err(crate::engines::EngineError::FailedSystemCall)?; assert!(tokens.len() == 2); @@ -151,7 +153,7 @@ impl BlockRewardContract { .expect("type checked by ethabi::decode; qed"); if addresses.len() != rewards.len() { - return Err(::engines::EngineError::FailedSystemCall( + return Err(crate::engines::EngineError::FailedSystemCall( "invalid data returned by reward contract: both arrays must have the same size" .into(), ) @@ -195,13 +197,13 @@ pub fn apply_block_rewards( #[cfg(test)] mod test { - use client::PrepareOpenBlock; + use crate::{ + client::PrepareOpenBlock, spec::Spec, test_helpers::generate_dummy_client_with_spec, + }; use ethereum_types::{H160, U256}; - use spec::Spec; - use test_helpers::generate_dummy_client_with_spec; use super::{BlockRewardContract, RewardKind}; - use engines::SystemOrCodeCallKind; + use crate::engines::SystemOrCodeCallKind; use std::str::FromStr; #[test] @@ -237,10 +239,12 @@ mod test { }; // if no beneficiaries are given no rewards are attributed - assert!(block_reward_contract - .reward(&vec![], &mut call) - .unwrap() - .is_empty()); + assert!( + block_reward_contract + .reward(&vec![], &mut call) + .unwrap() + .is_empty() + ); // the contract rewards (1000 + kind) for each benefactor let beneficiaries = vec![ diff --git a/crates/ethcore/src/engines/clique/block_state.rs b/crates/ethcore/src/engines/clique/block_state.rs index 53565693dc..37f88706c3 100644 --- a/crates/ethcore/src/engines/clique/block_state.rs +++ b/crates/ethcore/src/engines/clique/block_state.rs @@ -20,18 +20,20 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; -use engines::{ - clique::{ - util::{extract_signers, recover_creator}, - VoteType, DIFF_INTURN, DIFF_NOTURN, NULL_AUTHOR, SIGNING_DELAY_NOTURN_MS, +use crate::{ + engines::{ + EngineError, + clique::{ + DIFF_INTURN, DIFF_NOTURN, NULL_AUTHOR, SIGNING_DELAY_NOTURN_MS, VoteType, + util::{extract_signers, recover_creator}, + }, }, - EngineError, + error::{BlockError, Error}, + types::{BlockNumber, header::Header}, }; -use error::{BlockError, Error}; use ethereum_types::{Address, H64}; use rand::Rng; use time_utils::CheckedSystemTime; -use types::{header::Header, BlockNumber}; use unexpected::Mismatch; /// Type that keeps track of the state for a given vote diff --git a/crates/ethcore/src/engines/clique/mod.rs b/crates/ethcore/src/engines/clique/mod.rs index c5107c117d..04e024e054 100644 --- a/crates/ethcore/src/engines/clique/mod.rs +++ b/crates/ethcore/src/engines/clique/mod.rs @@ -66,26 +66,28 @@ use std::{ }; use super::signer::EngineSigner; -use block::ExecutedBlock; -use client::{traits::ForceUpdateSealing, BlockId, EngineClient}; -use crypto::publickey::Signature; -use engines::{ - clique::util::{extract_signers, recover_creator}, - Engine, EngineError, Seal, SealingState, +use crate::{ + block::ExecutedBlock, + client::{BlockId, EngineClient, traits::ForceUpdateSealing}, + engines::{ + Engine, EngineError, Seal, SealingState, + clique::util::{extract_signers, recover_creator}, + }, + error::{BlockError, Error}, + machine::{Call, EthereumMachine}, + types::{ + BlockNumber, + header::{ExtendedHeader, Header}, + }, }; -use error::{BlockError, Error}; -use ethereum_types::{Address, H160, H256, H64, U256}; +use crypto::publickey::Signature; +use ethereum_types::{Address, H64, H160, H256, U256}; use hash::KECCAK_EMPTY_LIST_RLP; use itertools::Itertools; use lru_cache::LruCache; -use machine::{Call, EthereumMachine}; use parking_lot::RwLock; use rand::Rng; use time_utils::CheckedSystemTime; -use types::{ - header::{ExtendedHeader, Header}, - BlockNumber, -}; use unexpected::{Mismatch, OutOfBounds}; use self::{block_state::CliqueBlockState, params::CliqueParams}; @@ -171,12 +173,20 @@ pub struct Clique { #[cfg(test)] /// Test version of `CliqueEngine` to make all fields public pub struct Clique { + /// Number of blocks that make up an epoch in the Clique consensus algorithm. + /// At each epoch transition, signers/validators can be added or removed. pub epoch_length: u64, + /// The period between blocks in seconds pub period: u64, + /// The Ethereum machine implementation pub machine: EthereumMachine, + /// Reference to the engine client pub client: RwLock>>, + /// Cache of block states indexed by block hash pub block_state_by_hash: RwLock>, + /// Current set of proposals for adding/removing validators pub proposals: RwLock>, + /// Optional engine signer pub signer: RwLock>>, } @@ -200,19 +210,21 @@ impl Clique { thread::Builder::new() .name("StepService".into()) - .spawn(move || loop { - let next_step_at = Instant::now() + SEALING_FREQ; - trace!(target: "miner", "StepService: triggering sealing"); - if let Some(eng) = weak_eng.upgrade() { - eng.step() - } else { - warn!(target: "shutdown", "StepService: engine is dropped; exiting."); - break; - } + .spawn(move || { + loop { + let next_step_at = Instant::now() + SEALING_FREQ; + trace!(target: "miner", "StepService: triggering sealing"); + if let Some(eng) = weak_eng.upgrade() { + eng.step() + } else { + warn!(target: "shutdown", "StepService: engine is dropped; exiting."); + break; + } - let now = Instant::now(); - if now < next_step_at { - thread::sleep(next_step_at - now); + let now = Instant::now(); + if now < next_step_at { + thread::sleep(next_step_at - now); + } } })?; Ok(engine) @@ -223,7 +235,7 @@ impl Clique { /// Note we need to `mock` the miner and it is introduced to test block verification to trigger new blocks /// to mainly test consensus edge cases pub fn with_test(epoch_length: u64, period: u64) -> Self { - use spec::Spec; + use crate::spec::Spec; Self { epoch_length, @@ -328,13 +340,14 @@ impl Clique { .expect("chain has at least one element; qed") .parent_hash(); - let last_checkpoint_header = - match c.block_header(BlockId::Hash(last_checkpoint_hash)) { - None => { - return Err(EngineError::CliqueMissingCheckpoint(last_checkpoint_hash))? - } - Some(header) => header.decode(self.machine.params().eip1559_transition)?, - }; + let last_checkpoint_header = match c + .block_header(BlockId::Hash(last_checkpoint_hash)) + { + None => { + return Err(EngineError::CliqueMissingCheckpoint(last_checkpoint_hash))?; + } + Some(header) => header.decode(self.machine.params().eip1559_transition)?, + }; let last_checkpoint_state = match block_state_by_hash.get_mut(&last_checkpoint_hash) { diff --git a/crates/ethcore/src/engines/clique/tests.rs b/crates/ethcore/src/engines/clique/tests.rs index 22135731f4..1ecd8f1767 100644 --- a/crates/ethcore/src/engines/clique/tests.rs +++ b/crates/ethcore/src/engines/clique/tests.rs @@ -17,13 +17,15 @@ //! Consensus tests for `PoA Clique Engine`, see http://eips.ethereum.org/EIPS/eip-225 for more information use super::*; -use block::*; +use crate::{ + block::*, + engines::Engine, + error::{Error, ErrorKind}, + state_db::StateDB, + test_helpers::get_temp_state_db, +}; use crypto::publickey::{KeyPair, Secret}; -use engines::Engine; -use error::{Error, ErrorKind}; use ethereum_types::{Address, H256}; -use state_db::StateDB; -use test_helpers::get_temp_state_db; use std::{collections::HashMap, sync::Arc}; @@ -332,7 +334,8 @@ fn one_signer_dropping_itself() { 'A', ) .unwrap(); - let signers = tester.clique_signers(&vote.hash()); + let hash = vote.hash(); + let signers = tester.clique_signers(&hash); assert!(signers.count() == 0); } diff --git a/crates/ethcore/src/engines/clique/util.rs b/crates/ethcore/src/engines/clique/util.rs index a5fd9f14a8..e59093e0e5 100644 --- a/crates/ethcore/src/engines/clique/util.rs +++ b/crates/ethcore/src/engines/clique/util.rs @@ -16,17 +16,19 @@ use std::collections::BTreeSet; -use crypto::publickey::{public_to_address, recover as ec_recover, Signature}; -use engines::{ - clique::{ADDRESS_LENGTH, NULL_MIXHASH, NULL_NONCE, SIGNATURE_LENGTH, VANITY_LENGTH}, - EngineError, +use crate::{ + engines::{ + EngineError, + clique::{ADDRESS_LENGTH, NULL_MIXHASH, NULL_NONCE, SIGNATURE_LENGTH, VANITY_LENGTH}, + }, + error::Error, + types::header::Header, }; -use error::Error; +use crypto::publickey::{Signature, public_to_address, recover as ec_recover}; use ethereum_types::{Address, H160, H256}; use lru_cache::LruCache; use parking_lot::RwLock; use rlp::encode; -use types::header::Header; /// How many recovered signature to cache in the memory. pub const CREATOR_CACHE_NUM: usize = 4096; diff --git a/crates/ethcore/src/engines/hbbft/block_reward_hbbft.rs b/crates/ethcore/src/engines/hbbft/block_reward_hbbft.rs index 3455e8832c..7c20d1e2e9 100644 --- a/crates/ethcore/src/engines/hbbft/block_reward_hbbft.rs +++ b/crates/ethcore/src/engines/hbbft/block_reward_hbbft.rs @@ -17,11 +17,13 @@ //! Types for declaring block rewards and a client interface for interacting with a //! block reward contract. -use engines::{SystemOrCodeCall, SystemOrCodeCallKind}; -use error::Error; +use crate::{ + engines::{SystemOrCodeCall, SystemOrCodeCallKind}, + error::Error, +}; use ethabi::FunctionOutputDecoder; use ethabi_contract::use_contract; -use ethereum_types::{Address, U256}; +use ethereum_types::Address; use_contract!( block_reward_contract, @@ -49,18 +51,24 @@ impl BlockRewardContract { /// and returns the reward allocation (address - value). The block reward contract *must* be /// called by the system address so the `caller` must ensure that (e.g. using /// `machine.execute_as_system`). - pub fn reward(&self, caller: &mut SystemOrCodeCall, is_epoch_end: bool) -> Result { + pub fn reward(&self, caller: &mut SystemOrCodeCall, is_epoch_end: bool) -> Result<(), Error> { let (input, decoder) = block_reward_contract::functions::reward::call(is_epoch_end); - let output = caller(self.kind.clone(), input) .map_err(Into::into) - .map_err(::engines::EngineError::FailedSystemCall)?; + .map_err(crate::engines::EngineError::FailedSystemCall)?; + + match decoder.decode(&output) { + Ok(_rewards_native) => {} + Err(err) => { + debug!(target: "engine", "Failed to decode block reward contract. output length {:?} output: {:?}: Error {:?}", output.len(), output, err); + } + } - let rewards_native = decoder - .decode(&output) - .map_err(|err| err.to_string()) - .map_err(::engines::EngineError::FailedSystemCall)?; + return Ok(()); - Ok(rewards_native) + // let rewards_native = decoder + // .decode(&output) + // .map_err(|err| err.to_string()) + // .map_err(crate::engines::EngineError::FailedSystemCall)?; } } diff --git a/crates/ethcore/src/engines/hbbft/contracts/connectivity_tracker_hbbft.rs b/crates/ethcore/src/engines/hbbft/contracts/connectivity_tracker_hbbft.rs new file mode 100644 index 0000000000..5212dcc967 --- /dev/null +++ b/crates/ethcore/src/engines/hbbft/contracts/connectivity_tracker_hbbft.rs @@ -0,0 +1,142 @@ +use crate::{client::EngineClient, types::ids::BlockId}; +use ethereum_types::{Address, H256, U256}; +use std::str::FromStr; + +use crate::{ + client::{BlockChainClient, traits::TransactionRequest}, + engines::hbbft::utils::bound_contract::{BoundContract, CallError}, +}; + +use_contract!( + connectivity_tracker_hbbft_contract, + "res/contracts//hbbft_connectivity_tracker.json" +); + +lazy_static! { + static ref CONNECTIVITY_TRACKER_HBBFT_CONTRACT_ADDRESS: Address = + Address::from_str("1200000000000000000000000000000000000001").unwrap(); +} + +macro_rules! call_const_connectivity_tracker_hbbft { + ($c:ident, $x:ident $(, $a:expr )*) => { + $c.call_const(connectivity_tracker_hbbft_contract::functions::$x::call($($a),*)) + }; +} + +pub fn is_connectivity_loss_reported( + client: &dyn EngineClient, + block_id: BlockId, + reporter: &Address, + epoch: u64, + validator: &Address, +) -> Result { + let c = BoundContract::bind( + client, + block_id, + *CONNECTIVITY_TRACKER_HBBFT_CONTRACT_ADDRESS, + ); + return Ok(call_const_connectivity_tracker_hbbft!( + c, + is_reported, + epoch, + *validator, + *reporter + )?); +} + +// currently not required for operation. +// we just check if "we" have reported the validator. +// pub fn get_current_flagged_validators_from_contract( +// client: &dyn EngineClient, +// block_id: BlockId, +// ) -> Result, CallError> { +// let c = BoundContract::bind( +// client, +// block_id, +// *CONNECTIVITY_TRACKER_HBBFT_CONTRACT_ADDRESS, +// ); +// return Ok(call_const_connectivity_tracker_hbbft!( +// c, +// get_flagged_validators +// )?); +// } + +fn get_block_data(client: &dyn EngineClient) -> (u64, H256) { + if let Some(block_number) = client.block_number(BlockId::Latest) { + if let Some(header) = client.block_header(BlockId::Number(block_number - 1)) { + return (header.number(), header.hash()); + } else { + warn!(target:"engine", "early-epoch-end: could not get block number for block: {block_number}"); + return (0, H256::zero()); + } + } else { + warn!(target:"engine", "early-epoch-end: could not get latest block."); + return (0, H256::zero()); + }; +} + +pub fn report_missing_connectivity( + client: &dyn EngineClient, + full_client: &dyn BlockChainClient, + missing_validator: &Address, + signing_address: &Address, +) -> bool { + let (block_number, block_hash) = get_block_data(client); + if block_number == 0 { + return false; + } + + let send_data = + connectivity_tracker_hbbft_contract::functions::report_missing_connectivity::call( + *missing_validator, + block_number, + block_hash, + ); + + let nonce = full_client.next_nonce(signing_address); + + let transaction = + TransactionRequest::call(*CONNECTIVITY_TRACKER_HBBFT_CONTRACT_ADDRESS, send_data.0) + .gas(U256::from(500_000)) + .gas_price(U256::from(10000000000u64)) + .nonce(nonce); + + info!(target:"engine", "early-epoch-end: sending report_missing_connectivity for with nonce: {nonce}, missing: {:?} ", missing_validator); + if let Err(e) = full_client.transact_silently(transaction) { + warn!(target:"engine", "early-epoch-end: could not report_missing_connectivity {e:?}"); + return false; + } + return true; +} + +pub fn report_reconnect( + client: &dyn EngineClient, + full_client: &dyn BlockChainClient, + reconnected_validator: &Address, + signing_address: &Address, +) -> bool { + let (block_number, block_hash) = get_block_data(client); + if block_number == 0 { + return false; + } + + let send_data = connectivity_tracker_hbbft_contract::functions::report_reconnect::call( + *reconnected_validator, + block_number, + block_hash, + ); + + let nonce = full_client.next_nonce(signing_address); + + let transaction = + TransactionRequest::call(*CONNECTIVITY_TRACKER_HBBFT_CONTRACT_ADDRESS, send_data.0) + .gas(U256::from(200_000)) + .nonce(nonce); + + info!(target:"engine", "early-epoch-end: sending report_reconnect for with nonce: {nonce}, missing: {:?} ", reconnected_validator); + if let Err(e) = full_client.transact_silently(transaction) { + warn!(target:"engine", "early-epoch-end: could not report_missing_connectivity {e:?}"); + return false; + } + return true; +} diff --git a/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs b/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs index 7f6f3e1e31..ee41a17eff 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/keygen_history.rs @@ -1,26 +1,28 @@ -use client::traits::EngineClient; -use crypto::{self, publickey::Public}; -use engines::{ - hbbft::{ - contracts::validator_set::{get_validator_pubkeys, ValidatorType}, - utils::bound_contract::{BoundContract, CallError}, - NodeId, +use crate::{ + client::traits::EngineClient, + engines::{ + hbbft::{ + NodeId, + contracts::validator_set::{ValidatorType, get_validator_pubkeys}, + utils::bound_contract::{BoundContract, CallError}, + }, + signer::EngineSigner, }, - signer::EngineSigner, + types::ids::BlockId, }; +use crypto::{self, publickey::Public}; use ethereum_types::{Address, H512, U256}; use hbbft::{ + NetworkInfo, crypto::{PublicKeySet, SecretKeyShare}, sync_key_gen::{ Ack, AckOutcome, Error, Part, PartOutcome, PubKeyMap, PublicKey, SecretKey, SyncKeyGen, }, util::max_faulty, - NetworkInfo, }; use itertools::Itertools; use parking_lot::RwLock; use std::{collections::BTreeMap, str::FromStr, sync::Arc}; -use types::ids::BlockId; use_contract!( key_history_contract, @@ -46,12 +48,17 @@ pub fn engine_signer_to_synckeygen<'a>( inner: signer.clone(), }; let public = match signer.read().as_ref() { - Some(signer) => signer - .public() - .expect("Signer's public key must be available!"), + Some(signer) => { + if let Some(this_public) = signer.public() { + this_public + } else { + error!(target: "engine", "Signer's public key must be available for address {:?}", signer.address()); + return Err(hbbft::sync_key_gen::Error::UnknownSender); + } + } None => Public::from(H512::from_low_u64_be(0)), }; - let mut rng = rand_065::thread_rng(); + let mut rng = rand::thread_rng(); let num_nodes = pub_keys.len(); SyncKeyGen::new(public, wrapper, pub_keys, max_faulty(num_nodes), &mut rng) } @@ -100,13 +107,16 @@ pub fn part_of_address( return Ok(None); } let deserialized_part: Part = bincode::deserialize(&serialized_part).unwrap(); - let mut rng = rand_065::thread_rng(); + let mut rng = rand::thread_rng(); let outcome = skg .handle_part(vmap.get(&address).unwrap(), deserialized_part, &mut rng) .unwrap(); match outcome { - PartOutcome::Invalid(_) => Err(CallError::ReturnValueInvalid), + PartOutcome::Invalid(e) => { + error!(target: "engine", "Part for address {} is invalid: {:?}", address, e); + Err(CallError::ReturnValueInvalid) + } PartOutcome::Valid(ack) => Ok(ack), } } @@ -168,10 +178,17 @@ pub struct KeyPairWrapper { pub inner: Arc>>>, } +impl PublicWrapper { + /// Check if the public key is valid. + pub fn is_valid(&self) -> bool { + self.encrypt(b"a", &mut rand::thread_rng()).is_ok() + } +} + impl<'a> PublicKey for PublicWrapper { type Error = crypto::publickey::Error; type SecretKey = KeyPairWrapper; - fn encrypt, R: rand_065::Rng>( + fn encrypt, R: rand::Rng>( &self, msg: M, _rng: &mut R, @@ -194,25 +211,8 @@ impl<'a> SecretKey for KeyPairWrapper { pub fn all_parts_acks_available( client: &dyn EngineClient, - block_timestamp: u64, num_validators: usize, ) -> Result { - // backward compatibility: - // this is a performance improvement introduced on the DMD Alpha Testnet. - // more about https://github.com/DMDcoin/openethereum-3.x/issues/71 - // this piece of code exists only for the DMD public alpha testnet, - // in order to support the v1 protocol version. - // since the v2 protocol version is better, - // v1 should be never used. - // remove the code: - // see: https://github.com/DMDcoin/openethereum-3.x/issues/72 - - let trigger_timestamp: u64 = 1646395200; // Friday, March 4, 2022 12:00:00 PM - - if block_timestamp > 0 && trigger_timestamp > 0 && block_timestamp < trigger_timestamp { - return Ok(true); - } - let c = BoundContract::bind(client, BlockId::Latest, *KEYGEN_HISTORY_ADDRESS); let (num_parts, num_acks) = call_const_key_history!(c, get_number_of_key_fragments_written)?; Ok(num_parts.low_u64() == (num_validators as u64) @@ -226,6 +226,7 @@ pub fn initialize_synckeygen( block_id: BlockId, validator_type: ValidatorType, ) -> Result, CallError> { + debug!(target: "engine", "Initializing SyncKeyGen with block_id: {:?}", block_id); let vmap = get_validator_pubkeys(&*client, block_id, validator_type)?; let pub_keys: BTreeMap<_, _> = vmap .values() @@ -250,8 +251,8 @@ pub fn initialize_synckeygen( #[cfg(test)] mod tests { use super::*; + use crate::engines::signer::{EngineSigner, from_keypair}; use crypto::publickey::{KeyPair, Secret}; - use engines::signer::{from_keypair, EngineSigner}; use std::{collections::BTreeMap, sync::Arc}; #[test] diff --git a/crates/ethcore/src/engines/hbbft/contracts/mod.rs b/crates/ethcore/src/engines/hbbft/contracts/mod.rs index 691d0cc50d..95253501fe 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/mod.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/mod.rs @@ -1,3 +1,4 @@ +pub mod connectivity_tracker_hbbft; pub mod keygen_history; pub mod permission; pub mod random_hbbft; diff --git a/crates/ethcore/src/engines/hbbft/contracts/permission.rs b/crates/ethcore/src/engines/hbbft/contracts/permission.rs index 9e4ac713b8..9323e64c4c 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/permission.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/permission.rs @@ -4,8 +4,8 @@ use std::str::FromStr; use crate::{ client::EngineClient, engines::hbbft::utils::bound_contract::{BoundContract, CallError}, + types::ids::BlockId, }; -use types::ids::BlockId; use_contract!(permission_contract, "res/contracts/permission_hbbft.json"); diff --git a/crates/ethcore/src/engines/hbbft/contracts/staking.rs b/crates/ethcore/src/engines/hbbft/contracts/staking.rs index 2ab835aef1..84094ccc58 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/staking.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/staking.rs @@ -1,11 +1,13 @@ -use client::EngineClient; -use engines::hbbft::utils::bound_contract::{BoundContract, CallError}; +use crate::{ + client::EngineClient, + engines::hbbft::utils::bound_contract::{BoundContract, CallError}, + types::ids::BlockId, +}; use ethereum_types::{Address, Public, U256}; use std::{ net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, str::FromStr, }; -use types::ids::BlockId; use_contract!(staking_contract, "res/contracts/staking_contract.json"); @@ -115,8 +117,8 @@ pub fn get_pool_public_key( #[cfg(test)] pub mod tests { use super::*; + use crate::engines::hbbft::test::hbbft_test_client::HbbftTestClient; use crypto::publickey::{Generator, KeyPair, Public, Random}; - use engines::hbbft::test::hbbft_test_client::HbbftTestClient; pub fn min_staking(client: &dyn EngineClient) -> Result { let c = BoundContract::bind(client, BlockId::Latest, *STAKING_CONTRACT_ADDRESS); diff --git a/crates/ethcore/src/engines/hbbft/contracts/validator_set.rs b/crates/ethcore/src/engines/hbbft/contracts/validator_set.rs index f76faf3676..9ad5ec5cdc 100644 --- a/crates/ethcore/src/engines/hbbft/contracts/validator_set.rs +++ b/crates/ethcore/src/engines/hbbft/contracts/validator_set.rs @@ -1,12 +1,14 @@ -use client::{ - traits::{EngineClient, TransactionRequest}, - BlockChainClient, +use crate::{ + client::{ + BlockChainClient, + traits::{EngineClient, TransactionRequest}, + }, + engines::hbbft::utils::bound_contract::{BoundContract, CallError}, + types::{ids::BlockId, transaction::Error}, }; use crypto::publickey::Public; -use engines::hbbft::utils::bound_contract::{BoundContract, CallError}; use ethereum_types::{Address, U256}; use std::{collections::BTreeMap, net::SocketAddr, str::FromStr}; -use types::{ids::BlockId, transaction::Error}; use_contract!( validator_set_hbbft, @@ -79,7 +81,7 @@ pub fn is_pending_validator( call_const_validator!(c, is_pending_validator, staking_address.clone()) } -#[derive(PartialEq)] +#[derive(PartialEq, Debug)] pub enum KeyGenMode { WritePart, WriteAck, @@ -153,7 +155,7 @@ pub fn set_validator_internet_address( .gas(U256::from(100_000)) .nonce(nonce); - info!(target:"consensus", "set_validator_internet_address: ip: {} nonce: {}", socket_addr, nonce); + info!(target:"consensus", "set_validator_internet_address: ip: {} with nonce: {}", socket_addr, nonce); full_client.transact_silently(transaction)?; Ok(()) } @@ -166,17 +168,18 @@ pub fn send_tx_announce_availability( // we need to get the real latest nonce. //let nonce_from_full_client = full_client.nonce(address,BlockId::Latest); - let mut nonce = full_client.next_nonce(&address); + let nonce = full_client.next_nonce(&address); - match full_client.nonce(address, BlockId::Latest) { - Some(new_nonce) => { - if new_nonce != nonce { - info!(target:"consensus", "got better nonce for announce availability: {} => {}", nonce, new_nonce); - nonce = new_nonce; - } - } - None => {} - } + // match full_client.nonce(address, BlockId::Latest) { + // Some(current_nonce) => { + + // if new_nonce != nonce { + // info!(target:"consensus", "got better nonce for announce availability: {} => {}", nonce, new_nonce); + // nonce = new_nonce; + // } + // } + // None => {} + // } match full_client.block_number(BlockId::Latest) { Some(block_number) => match full_client.block_hash(BlockId::Number(block_number)) { @@ -190,10 +193,12 @@ pub fn send_tx_announce_availability( ); let transaction = TransactionRequest::call(*VALIDATOR_SET_ADDRESS, send_data.0) .gas(U256::from(1_000_000)) + .gas_price(U256::from(0)) .nonce(nonce); info!(target:"consensus", "sending announce availability with nonce: {}", nonce); - full_client.transact_silently(transaction)?; + let hash = full_client.transact_silently(transaction)?; + info!(target:"consensus", "sending announce availability with nonce: {} hash: {}", nonce, hash); return Ok(()); } }, diff --git a/crates/ethcore/src/engines/hbbft/contribution.rs b/crates/ethcore/src/engines/hbbft/contribution.rs index 91eaf75619..7863dbe7d8 100644 --- a/crates/ethcore/src/engines/hbbft/contribution.rs +++ b/crates/ethcore/src/engines/hbbft/contribution.rs @@ -1,7 +1,7 @@ -use rand_065::{self, distributions::Standard, Rng}; +use crate::types::transaction::SignedTransaction; +use rand::{self, Rng, distributions::Standard}; use rlp::RlpStream; use std::time::UNIX_EPOCH; -use types::transaction::SignedTransaction; #[derive(Clone, Eq, PartialEq, Debug, Hash, Serialize, Deserialize)] pub(crate) struct Contribution { @@ -42,12 +42,11 @@ impl Contribution { s.drain() }) .collect(); - let mut rng = rand_065::thread_rng(); Contribution { transactions: ser_txns, timestamp: unix_now_secs(), - random_data: rng + random_data: rand::thread_rng() .sample_iter(&Standard) .take(RANDOM_BYTES_PER_EPOCH) .collect(), @@ -57,10 +56,12 @@ impl Contribution { #[cfg(test)] mod tests { + use crate::{ + engines::hbbft::test::create_transactions::create_transaction, + types::transaction::{SignedTransaction, TypedTransaction}, + }; use crypto::publickey::{Generator, Random}; - use engines::hbbft::test::create_transactions::create_transaction; use ethereum_types::U256; - use types::transaction::{SignedTransaction, TypedTransaction}; #[test] fn test_contribution_serialization() { diff --git a/crates/ethcore/src/engines/hbbft/dmd/src/create_miner.rs b/crates/ethcore/src/engines/hbbft/dmd/src/create_miner.rs index 7cfb4e3fbb..1c28058008 100644 --- a/crates/ethcore/src/engines/hbbft/dmd/src/create_miner.rs +++ b/crates/ethcore/src/engines/hbbft/dmd/src/create_miner.rs @@ -1,6 +1,7 @@ use ethstore::{KeyFile, SafeAccount}; use parity_crypto::publickey::{Generator, KeyPair, Random, Secret}; -use std::{fs, num::NonZeroU32, path::Path}; +use serde_json::Value; +use std::{fs, num::NonZeroU32, path::Path, str::FromStr}; fn write_json_for_secret(secret: Secret, filename: &str) { let json_key: KeyFile = SafeAccount::create( @@ -21,6 +22,37 @@ fn write_json_for_secret(secret: Secret, filename: &str) { pub fn create_miner() { println!("Creating dmd v4 miner..."); + let mut name: String = "DPoSChain".to_string(); + match fs::read_to_string("spec.json") { + Ok(s) => match serde_json::from_str(s.as_str()) { + Ok(Value::Object(map)) => { + if map.contains_key("name") { + let x = &map["name"]; + + match x.as_str() { + Some(n) => { + name = String::from_str(n) + .expect("could not parse chain name from spec.json"); + println!("chain: {}", name); + } + None => { + println!("could not read chain name from spec.json"); + } + } + } + } + _ => { + println!("unable to parse spec.json"); + } + }, + Err(e) => { + println!("unable to to open spec.json: {:?}", e); + } + } + + //let serialized_json_key = + //serde_json::to_string(&json_key).expect("json key object serialization should succeed"); + let acc = Random.generate(); // Create "data" and "network" subfolders. @@ -31,8 +63,8 @@ pub fn create_miner() { .expect("Unable to write the network key file"); // Create "keys" and "DPoSChain" subfolders. - let accounts_dir = Path::new("./data/keys/DPoSChain"); - fs::create_dir_all(accounts_dir).expect("Could not create accounts directory"); + let accounts_dir = Path::new("./data/keys/").join(name); + fs::create_dir_all(accounts_dir.clone()).expect("Could not create accounts directory"); // Write JSON account. write_json_for_secret( diff --git a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/.gitignore b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/.gitignore index e90ae314cb..0c65d01edc 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/.gitignore +++ b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/.gitignore @@ -13,3 +13,4 @@ password.txt reserved-peers rpc_node.toml nodes_info.json +fork_example.json diff --git a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/Cargo.toml b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/Cargo.toml index ff2a79ef06..b4b7aa084d 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/Cargo.toml +++ b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/Cargo.toml @@ -3,24 +3,28 @@ description = "parity config generator for hbbft validators" name = "hbbft_config_generator" version = "0.0.1" license = "GPL-3.0" +edition = "2024" authors = [ - "David Forstenlechner " + "David Forstenlechner ", + "Thomas Haller " ] [dependencies] bincode = "1.1.2" clap = "2" ethcore = { path = "../../../.." } +ethjson = { path = "../../../../../ethjson" } ethereum-types = "0.9.2" ethkey = { path = "../../../../../accounts/ethkey" } ethstore = { path = "../../../../../accounts/ethstore"} -hbbft = { git = "https://github.com/poanetwork/hbbft", rev = "4857b7f9c7a0f513caca97c308d352c6a77fe5c2" } -hbbft_testing = { git = "https://github.com/poanetwork/hbbft" } +hbbft = { git = "https://github.com/DMDcoin/hbbft.git", rev = "4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074" } +hbbft_testing = { git = "https://github.com/DMDcoin/hbbft.git", rev = "4edcd5cf5f370e6862d6d84d7ae4f05c0eb88074" } parity-crypto = { version = "0.6.2", features = ["publickey"] } -rand = "0.6.5" +rand = "0.7.3" rustc-hex = "2.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +serde_with = { version = "3.6", features = [ "hex", "std", "macros" ] } toml = "0.5.6" [dev-dependencies] diff --git a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/keygen_history_helpers.rs b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/keygen_history_helpers.rs index 1cbce20053..b6f6a9bf72 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/keygen_history_helpers.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/keygen_history_helpers.rs @@ -1,8 +1,10 @@ use crate::Enode; use ethereum_types::H128; +use ethjson::spec::hbbft::HbbftNetworkFork; use hbbft::sync_key_gen::{AckOutcome, Part, PartOutcome, PublicKey, SecretKey, SyncKeyGen}; -use parity_crypto::publickey::{public_to_address, Address, Public, Secret}; +use parity_crypto::publickey::{Address, Public, Secret, public_to_address}; use serde::{Deserialize, Serialize}; +use serde_with::serde_as; use std::{collections::BTreeMap, sync::Arc}; #[derive(Clone)] @@ -92,15 +94,13 @@ pub fn generate_keygens( (sync_keygen, parts, acks) } -pub fn enodes_to_pub_keys( - enodes: &BTreeMap, -) -> Arc> { +pub fn enodes_to_pub_keys(enodes: &Vec) -> Arc> { Arc::new( enodes .iter() - .map(|(n, e)| { + .map(|e| { ( - n.clone(), + e.public.clone(), KeyPairWrapper { public: e.public, secret: e.secret.clone(), @@ -111,22 +111,56 @@ pub fn enodes_to_pub_keys( ) } +#[serde_as] #[derive(Serialize, Deserialize)] -struct KeyGenHistoryData { +pub struct KeyGenHistoryData { validators: Vec, staking_addresses: Vec, public_keys: Vec, ip_addresses: Vec, + #[serde_as(as = "Vec")] parts: Vec>, + #[serde_as(as = "Vec>")] acks: Vec>>, } +impl KeyGenHistoryData { + pub fn to_json(&self) -> String { + serde_json::to_string(self).expect("Keygen History must convert to JSON") + } + + pub fn create_example_fork_definition(&self) -> HbbftNetworkFork { + let validators: Vec> = self + .public_keys + .iter() + .map(|v| { + let mut hex = v.clone(); + println!("public key: {}", v); + if v.starts_with("0x") { + hex = v.split_at(2).1.to_string(); + } + + let public = hex.parse::().expect("Could not parse public key"); + public.as_bytes().to_vec() + }) + .collect(); + + HbbftNetworkFork { + block_number_start: 10, + block_number_end: Some(100), + validators: validators, + parts: self.parts.clone(), + acks: self.acks.clone(), + } + } +} + pub fn key_sync_history_data( parts: &BTreeMap, acks: &BTreeMap>, - enodes: &BTreeMap, + enodes: &Vec, include_validators_only: bool, -) -> String { +) -> KeyGenHistoryData { let mut data = KeyGenHistoryData { validators: Vec::new(), staking_addresses: Vec::new(), @@ -141,10 +175,13 @@ pub fn key_sync_history_data( let mut acks_total_bytes = 0; let mut num_acks = 0; - let ids = enodes.keys(); + //let ids: Vec = enodes.iter().map(|e| e.public.clone()).collect(); + let mut staking_counter = 1; // Add Parts and Acks in strict order - for id in ids { + for enode in enodes.iter() { + let id = &enode.public; + // if there is no part available for this node, // then the it is not a initial validator. @@ -157,8 +194,7 @@ pub fn key_sync_history_data( data.staking_addresses .push(format!("{:?}", Address::from_low_u64_be(staking_counter))); staking_counter += 1; - data.public_keys - .push(format!("{:?}", enodes.get(id).unwrap().public)); + data.public_keys.push(format!("{:?}", id)); data.ip_addresses .push(format!("{:?}", H128::from_low_u64_be(1))); @@ -215,7 +251,7 @@ pub fn key_sync_history_data( parts_total_bytes + acks_total_bytes ); - serde_json::to_string(&data).expect("Keygen History must convert to JSON") + data } #[cfg(test)] diff --git a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/main.rs b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/main.rs index 03862f7b29..20d43b78a2 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/main.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_config_generator/src/main.rs @@ -2,7 +2,6 @@ extern crate bincode; #[macro_use] extern crate clap; extern crate ethcore; -extern crate ethereum_types; extern crate ethkey; extern crate ethstore; extern crate hbbft; @@ -11,6 +10,7 @@ extern crate rand; extern crate rustc_hex; extern crate serde; extern crate serde_json; +extern crate serde_with; extern crate toml; mod keygen_history_helpers; @@ -19,11 +19,8 @@ use clap::{App, Arg}; use ethstore::{KeyFile, SafeAccount}; use keygen_history_helpers::{enodes_to_pub_keys, generate_keygens, key_sync_history_data}; use parity_crypto::publickey::{Address, Generator, KeyPair, Public, Random, Secret}; -use std::{ - collections::BTreeMap, convert::TryInto, fmt::Write, fs, num::NonZeroU32, str::FromStr, - sync::Arc, -}; -use toml::{map::Map, Value}; +use std::{convert::TryInto, fmt::Write, fs, num::NonZeroU32, str::FromStr}; +use toml::{Value, map::Map}; pub fn create_account() -> (Secret, Public, Address) { let acc = Random.generate(); @@ -34,20 +31,22 @@ pub fn create_account() -> (Secret, Public, Address) { ) } +#[derive(Clone)] pub struct Enode { secret: Secret, public: Public, address: Address, idx: usize, ip: String, + port: u16, } impl ToString for Enode { fn to_string(&self) -> String { // Example: // enode://30ccdeb8c31972f570e4eea0673cd08cbe7cefc5de1d70119b39c63b1cba33b48e494e9916c0d1eab7d296774f3573da46025d1accdef2f3690bc9e6659a34b4@192.168.0.101:30300 - let port = 30300usize + self.idx; - format!("enode://{:x}@{}:{}", self.public, self.ip, port) + + format!("enode://{:x}@{}:{}", self.public, self.ip, self.port) } } @@ -55,8 +54,9 @@ fn generate_enodes( num_nodes: usize, private_keys: Vec, external_ip: Option<&str>, -) -> BTreeMap { - let mut map = BTreeMap::new(); + port_base: u16, +) -> Vec { + let mut map = Vec::new(); for i in 0..num_nodes { // Note: node 0 is a regular full node (not a validator) in the testnet setup, so we start at index 1. let idx = i + 1; @@ -76,25 +76,23 @@ fn generate_enodes( create_account() }; println!("Debug, Secret: {:?}", secret); - map.insert( + map.push(Enode { + secret, public, - Enode { - secret, - public, - address, - idx, - ip: ip.into(), - }, - ); + address, + idx, + ip: ip.into(), + port: port_base + idx as u16, + }); } // the map has the element order by their public key. // we reassign the idx here, so the index of the nodes follows // the same order like everything else. - let mut new_index = 1; - for public in map.iter_mut() { - public.1.idx = new_index; - new_index = new_index + 1; - } + // let mut new_index = 1; + // for public in map.iter_mut() { + // public.1.idx = new_index; + // new_index = new_index + 1; + // } map } @@ -104,17 +102,21 @@ fn to_toml_array(vec: Vec<&str>) -> Value { fn to_toml( i: usize, + open_ports: bool, config_type: &ConfigType, external_ip: Option<&str>, signer_address: &Address, total_num_of_nodes: usize, tx_queue_per_sender: Option, + base_metrics_port: Option, + metrics_interface: Option<&str>, + base_port: u16, + base_rpc_port: u16, + base_ws_port: u16, + logging: Option<&str>, ) -> Value { - let base_port = 30300i64; - let base_rpc_port = 8540i64; - let base_ws_port = 9540i64; - let mut parity = Map::new(); + match config_type { ConfigType::PosdaoSetup => { parity.insert("chain".into(), Value::String("./spec/spec.json".into())); @@ -131,7 +133,10 @@ fn to_toml( } let mut network = Map::new(); - network.insert("port".into(), Value::Integer(base_port + i as i64)); + network.insert( + "port".into(), + Value::Integer((base_port as usize + i) as i64), + ); match config_type { ConfigType::PosdaoSetup => { network.insert( @@ -165,27 +170,37 @@ fn to_toml( } let mut rpc = Map::new(); - rpc.insert("interface".into(), Value::String("all".into())); - rpc.insert("cors".into(), to_toml_array(vec!["all"])); - rpc.insert("hosts".into(), to_toml_array(vec!["all"])); - let apis = to_toml_array(vec![ - "web3", - "eth", - "pubsub", - "net", - "parity", - "parity_set", - "parity_pubsub", - "personal", - "traces", - ]); - rpc.insert("apis".into(), apis); - rpc.insert("port".into(), Value::Integer(base_rpc_port + i as i64)); - let mut websockets = Map::new(); - websockets.insert("interface".into(), Value::String("all".into())); - websockets.insert("origins".into(), to_toml_array(vec!["all"])); - websockets.insert("port".into(), Value::Integer(base_ws_port + i as i64)); + + if open_ports { + rpc.insert("interface".into(), Value::String("all".into())); + rpc.insert("cors".into(), to_toml_array(vec!["all"])); + rpc.insert("hosts".into(), to_toml_array(vec!["all"])); + let apis = to_toml_array(vec![ + "web3", + "eth", + "pubsub", + "net", + "parity", + "parity_pubsub", + "traces", + ]); + rpc.insert("apis".into(), apis); + rpc.insert( + "port".into(), + Value::Integer((base_rpc_port as usize + i) as i64), + ); + + websockets.insert("interface".into(), Value::String("all".into())); + websockets.insert("origins".into(), to_toml_array(vec!["all"])); + websockets.insert( + "port".into(), + Value::Integer((base_ws_port as usize + i) as i64), + ); + } else { + rpc.insert("disable".into(), Value::Boolean(true)); + websockets.insert("disable".into(), Value::Boolean(true)); + } let mut ipc = Map::new(); ipc.insert("disable".into(), Value::Boolean(true)); @@ -221,12 +236,12 @@ fn to_toml( } mining.insert("force_sealing".into(), Value::Boolean(true)); - mining.insert("min_gas_price".into(), Value::Integer(1000000000)); - mining.insert( - "gas_floor_target".into(), - Value::String("1000000000".into()), - ); + // we put an extremly low min gas price in the config + // the min gas price is gathered from the DAO + // this makes sure that the min_gas_price wont be higher then the gas pricce the DAO decides. + mining.insert("min_gas_price".into(), Value::Integer(1000)); mining.insert("reseal_on_txs".into(), Value::String("none".into())); + mining.insert("gas_floor_target".into(), Value::String("300000000".into())); mining.insert("reseal_min_period".into(), Value::Integer(0)); if let Some(tx_queue_per_sender_) = tx_queue_per_sender { @@ -237,11 +252,19 @@ fn to_toml( } let mut misc = Map::new(); + + // example for a more verbose logging. + // Value::String("txqueue=trace,consensus=debug,engine=trace,own_tx=trace,miner=trace,tx_filter=trace".into()) misc.insert( "logging".into(), - Value::String("txqueue=trace,consensus=trace,engine=trace".into()), + Value::String( + logging.unwrap_or("txqueue=trace,consensus=trace,engine=trace,own_tx=trace,tx_filter=trace,sync=trace,network=trace,io=trace") + .into(), + ), ); - misc.insert("log_file".into(), Value::String("parity.log".into())); + misc.insert("log_file".into(), Value::String("diamond-node.log".into())); + + // metrics.insert(""); let mut map = Map::new(); map.insert("parity".into(), Value::Table(parity)); @@ -253,6 +276,35 @@ fn to_toml( map.insert("account".into(), Value::Table(account)); map.insert("mining".into(), Value::Table(mining)); map.insert("misc".into(), Value::Table(misc)); + + if let Some(port_base) = base_metrics_port { + let mut metrics = Map::new(); + + let port = (port_base as usize) + i; + + metrics.insert("enable".into(), Value::Boolean(true)); + + metrics.insert("port".into(), Value::Integer(port as i64)); + + // metrics.insert("interface".into(), Value::String("local".into())); + // Metrics: + // --metrics + // Enable prometheus metrics (only full client). + + // --metrics-port=[PORT] + // Specify the port portion of the metrics server. (default: 3000) + + // --metrics-interface=[IP] + // Specify the hostname portion of the metrics server, IP should be an interface's IP address, or all (all + // interfaces) or local. (default: local) + + if let Some(metrics_interface_) = metrics_interface { + metrics.insert("interface".into(), Value::String(metrics_interface_.into())); + } + + map.insert("metrics".into(), Value::Table(metrics)); + } + Value::Table(map) } @@ -323,6 +375,54 @@ fn main() { .required(false) .takes_value(true), ) + .arg( + Arg::with_name("metrics_port_base") + .long("metrics_port_base") + .help("activates prometheus metrics. The port is the base port, the node index is added to it.") + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name("metrics_interface") + .long("metrics_interface") + .help("internet interface of metrics. 'all', 'local' or ip address.") + .required(false) + .takes_value(true), + ) + .arg(Arg::with_name("fork_block") + .long("fork block number") + .help("defines a fork block number.") + .required(false) + .takes_value(true), + ) + .arg( + Arg::with_name("port_base") + .long("port_base") + .help("devp2p communication port base address") + .required(false) + .default_value("30300") + .takes_value(true), + ).arg( + Arg::with_name("port_base_rpc") + .long("port_base_rpc") + .help("rpc port base") + .required(false) + .default_value("8540") + .takes_value(true), + ).arg( + Arg::with_name("port_base_ws") + .long("port_base_ws") + .help("rpc web socket port base") + .required(false) + .default_value("9540") + .takes_value(true), + ).arg( + Arg::with_name("logging") + .long("log definition string") + .help("example: txqueue=trace,consensus=debug,engine=debug,own_tx=trace,tx_filter=trace,sync=trace,network=trace") + .required(false) + .takes_value(true), + ) .get_matches(); let num_nodes_validators: usize = matches @@ -345,12 +445,57 @@ fn main() { ) }); + // let fork_block_number: Option = matches.value_of("fork_block_number").map_or(None, |v| { + // Some( + // v.parse::() + // .expect("fork_block_number need to be of integer type"), + // ) + // }); + + let metrics_port_base: Option = matches.value_of("metrics_port_base").map_or(None, |v| { + Some( + v.parse::() + .expect("metrics_port need to be an integer port definition 1-65555"), + ) + }); + + let port_base: u16 = matches + .value_of("port_base") + .map(|v| { + v.parse::() + .expect("metrics_port need to be an integer port definition 1-65555") + }) + .unwrap(); + + let port_base_rpc: Option = matches.value_of("port_base_rpc").map_or(None, |v| { + Some( + v.parse::() + .expect("metrics_port need to be an integer port definition 1-65555"), + ) + }); + + let port_base_ws: Option = matches.value_of("port_base_ws").map_or(None, |v| { + Some( + v.parse::() + .expect("metrics_port need to be an integer port definition 1-65555"), + ) + }); + + std::println!("metrics_port_base: {:?}", metrics_port_base); + + let metrics_interface = matches.value_of("metrics_interface"); + + let logging_string = matches.value_of("logging"); + assert!( num_nodes_total >= num_nodes_validators, "max_nodes must be greater than nodes" ); - println!("generating config files for {} nodes in total, with the first {} nodes as initial validator", num_nodes_total, num_nodes_validators); + println!( + "generating config files for {} nodes in total, with the first {} nodes as initial validator", + num_nodes_total, num_nodes_validators + ); let config_type = value_t!(matches.value_of("configtype"), ConfigType).unwrap_or(ConfigType::PosdaoSetup); @@ -370,41 +515,47 @@ fn main() { assert!(private_keys.len() == num_nodes_total); }; - let enodes_map = generate_enodes(num_nodes_total, private_keys, external_ip); + let enodes = generate_enodes(num_nodes_total, private_keys, external_ip, port_base); let mut rng = rand::thread_rng(); - let pub_keys = enodes_to_pub_keys(&enodes_map); + //let pub_keys = enodes_to_pub_keys(&enodes_map); - // we only need the first x pub_keys - let pub_keys_for_key_gen_btree = pub_keys + let enodes_for_key: Vec = enodes .iter() .take(num_nodes_validators) - .map(|x| (x.0.clone(), x.1.clone())) + .map(|e| e.clone()) .collect(); + let pub_keys_for_key_gen_btree = enodes_to_pub_keys(&enodes_for_key); + let (_sync_keygen, parts, acks) = generate_keygens( - Arc::new(pub_keys_for_key_gen_btree), + pub_keys_for_key_gen_btree.clone(), &mut rng, (num_nodes_validators - 1) / 3, ); let mut reserved_peers = String::new(); - for pub_key in pub_keys.iter() { - let our_id = pub_key.0; - - let enode = enodes_map.get(our_id).expect("validator id must be mapped"); + for enode in enodes.iter() { writeln!(&mut reserved_peers, "{}", enode.to_string()) .expect("enode should be written to the reserved peers string"); let i = enode.idx; let file_name = format!("hbbft_validator_{}.toml", i); + // the unwrap is safe, because there is a default value defined. let toml_string = toml::to_string(&to_toml( i, + false, &config_type, external_ip, &enode.address, num_nodes_total, tx_queue_per_sender.clone(), + metrics_port_base, + metrics_interface, + port_base, + port_base_rpc.unwrap(), + port_base_ws.unwrap(), + logging_string, )) .expect("TOML string generation should succeed"); fs::write(file_name, toml_string).expect("Unable to write config file"); @@ -427,14 +578,26 @@ fn main() { format!("hbbft_validator_key_{}.json", i), ); } + + // let base_port = 30300i64; + // let base_rpc_port = 8540i64; + // let base_ws_port = 9540i64; + // Write rpc node config let rpc_string = toml::to_string(&to_toml( 0, + true, &ConfigType::Rpc, external_ip, &Address::default(), // todo: insert HBBFT Contracts pot here. num_nodes_total, tx_queue_per_sender.clone(), + metrics_port_base, + metrics_interface, + port_base, + port_base_rpc.unwrap(), + port_base_ws.unwrap(), + logging_string, )) .expect("TOML string generation should succeed"); fs::write("rpc_node.toml", rpc_string).expect("Unable to write rpc config file"); @@ -445,19 +608,27 @@ fn main() { // Write the password file fs::write("password.txt", "test").expect("Unable to write password.txt file"); + let key_sync_file_validators_only = key_sync_history_data(&parts, &acks, &enodes, true); // only pass over enodes in the enodes_map that are also available for acks and parts. - fs::write( "keygen_history.json", - key_sync_history_data(&parts, &acks, &enodes_map, true), + key_sync_file_validators_only.to_json(), ) .expect("Unable to write keygen history data file"); fs::write( "nodes_info.json", - key_sync_history_data(&parts, &acks, &enodes_map, false), + key_sync_history_data(&parts, &acks, &enodes, false).to_json(), ) .expect("Unable to write nodes_info data file"); + + fs::write( + "fork_example.json", + key_sync_file_validators_only + .create_example_fork_definition() + .to_json(), + ) + .expect("Unable to write fork_example.json data file"); } #[cfg(test)] @@ -504,7 +675,7 @@ mod tests { let num_nodes = 4; let t = 1; - let enodes = generate_enodes(num_nodes, Vec::new(), None); + let enodes = generate_enodes(num_nodes, Vec::new(), None, 30300); let pub_keys = enodes_to_pub_keys(&enodes); let mut rng = rand::thread_rng(); diff --git a/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs b/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs new file mode 100644 index 0000000000..224cc1ac2a --- /dev/null +++ b/crates/ethcore/src/engines/hbbft/hbbft_early_epoch_end_manager.rs @@ -0,0 +1,399 @@ +use crate::types::ids::BlockId; +use ethereum_types::Address; +use stats::PrometheusMetrics; + +use crate::{ + client::{BlockChainClient, EngineClient}, + engines::hbbft::contracts::connectivity_tracker_hbbft::report_missing_connectivity, + ethereum::public_key_to_address::public_key_to_address, +}; +use std::{ + collections::BTreeMap, + time::{Duration, Instant}, +}; + +use super::{ + NodeId, + contracts::connectivity_tracker_hbbft::{is_connectivity_loss_reported, report_reconnect}, + hbbft_message_memorium::HbbftMessageMemorium, +}; + +pub(crate) struct HbbftEarlyEpochEndManager { + /// The current epoch number. + current_tracked_epoch_number: u64, + + /// epoch manager start up time. + start_time: Instant, + + start_block: u64, + + /// allowed devp2p warmup time. + allowed_devp2p_warmup_time: Duration, + + /// public keys of all validators for this epoch. + validators: Vec, + + /// current flagged validators, unordered list - no performance issue, since this can + /// only grow up to 7 elements for a usual set of 25 nodes. + flagged_validators: Vec, + + node_id_to_address: BTreeMap, + + // address_to_node_id: BTreeMap, + signing_address: Address, +} + +impl HbbftEarlyEpochEndManager { + /// creates a new EarlyEpochEndManager, + /// if conditions are matching to create one. + /// It is expected that this function is only called if the node is a validator. + /// This prerequesite will be checked and if not met, panics. + pub fn create_early_epoch_end_manager( + allowed_devp2p_warmup_time: Duration, + client: &dyn BlockChainClient, + engine_client: &dyn EngineClient, + epoch_number: u64, + epoch_start_block: u64, + validator_set: Vec, + signing_address: &Address, + ) -> Option { + if client.is_syncing() { + // if we are syncing, we do not need to create an early epoch end manager yet. + // if we are syncing as a validator, and it is really this epoch, + // this way the creation of the early epoch end manager is created in a subsequent call, + // when we are at the tip of the chain, and get the correct state for + // - flagged validators + // - start_time + // The whole window for the devp2p warmup time is granted in this case, + // therefore this node won't flag anyone in the near future. + return None; + } + + let mut node_id_to_address: BTreeMap = BTreeMap::new(); + let mut address_to_node_id: BTreeMap = BTreeMap::new(); + + let mut validators: Vec = Vec::new(); + + for validator in validator_set.iter() { + let address = public_key_to_address(&validator.0); + node_id_to_address.insert(validator.clone(), address); + address_to_node_id.insert(address, validator.clone()); + + if address == *signing_address { + continue; + } + + validators.push(validator.clone()); + } + + // figure out if we have to retrieve the data from the smart contracts. + // if the epoch start did just happen, + // we do not have to retrieve the data from the smart contracts. + let now = Instant::now(); + + let flagged_validators = Self::get_current_reported_validators_from_contracts( + engine_client, + BlockId::Latest, + &node_id_to_address, + &validators, + signing_address, + epoch_number, + ); + + let result = Self { + current_tracked_epoch_number: epoch_number, + start_time: now, + start_block: epoch_start_block, + allowed_devp2p_warmup_time, + validators: validators, + flagged_validators: flagged_validators, + node_id_to_address, + // address_to_node_id, + signing_address: signing_address.clone(), + }; + + info!(target: "engine", "early-epoch-end: HbbftEarlyEpochEndManager created. start_time {now:?}, start_block: {epoch_start_block}"); + + return Some(result); + } + + /// retrieves the information from smart contracts which validators are currently flagged. + fn get_current_reported_validators_from_contracts( + client: &dyn EngineClient, + block_id: BlockId, + node_id_to_address: &BTreeMap, + validators: &Vec, + signing_address: &Address, + epoch: u64, + ) -> Vec { + let mut result = Vec::::new(); + + for validator in validators.iter() { + let validator_address = if let Some(a) = node_id_to_address.get(validator) { + a + } else { + error!(target: "engine", "early-epoch-end: could not find address for validator in node_id_to_address cache."); + continue; + }; + + match is_connectivity_loss_reported( + client, + block_id, + signing_address, + epoch, + validator_address, + ) { + Ok(reported) => { + if reported { + result.push(validator.clone()); + } + } + Err(e) => { + error!(target: "engine", "early-epoch-end: could not get reported status for validator {validator:?}. call error: {e:?}"); + } + } + } + + return result; + // match is_connectivity_loss_reported(client, block_id, signing_address, ) { + + // } + + // match get_current_flagged_validators_from_contract(client, block_id) { + // Ok(v) => { + // let mut result: Vec = Vec::new(); + + // for a in v.iter() { + // if let Some(node_id) = address_to_node_id.get(a) { + // result.push(node_id.clone()); + // } else { + // error!(target: "engine","early-epoch-end: could not find validator in address cache: {a:?}"); + // } + // } + + // return result; + // // address_to_node_id.get(key) + // } + // Err(e) => { + // error!(target: "engine","early-epoch-end: could not get_current_flagged_validators_from_contracts {e:?}" ); + // Vec::new() + // } + // } + } + + fn notify_about_missing_validator( + &mut self, + validator: &NodeId, + client: &dyn EngineClient, + full_client: &dyn BlockChainClient, + ) { + if let Some(validator_address) = self.node_id_to_address.get(validator) { + if report_missing_connectivity( + client, + full_client, + validator_address, + &self.signing_address, + ) { + if !self.flagged_validators.contains(&validator) { + // in this case, we already had this validator in the list, + // what means that the transaction previous send was not successful. + // we could here do some improvements to not spam to many disconnect reports into the system. + self.flagged_validators.push(validator.clone()); + } + } + } else { + warn!("Could not find validator_address for node id in cache: {validator:?}"); + return; + } + } + + fn notify_about_validator_reconnect( + &mut self, + validator: &NodeId, + full_client: &dyn BlockChainClient, + engine_client: &dyn EngineClient, + ) { + let index = if let Some(index) = self.flagged_validators.iter().position(|x| x == validator) + { + index + } else { + error!(target: "engine", "early-epoch-end: notify_about_validator_reconnect Could not find reconnected validator in flagged validators."); + return; + }; + + if let Some(validator_address) = self.node_id_to_address.get(validator) { + if report_reconnect( + engine_client, + full_client, + validator_address, + &self.signing_address, + ) { + // Todo: we do not know if the transaction will get processed successful. + // shall we track transactions ? + self.flagged_validators.remove(index); + } + } else { + warn!("Could not find validator_address for node id in cache: {validator:?}"); + return; + } + } + + pub fn is_reported( + &self, + client: &dyn EngineClient, + other_validator_address: &Address, + ) -> bool { + // todo: for performance improvements, we could apply caching here, + // once we are up to date with the contract information and track the inclusion of our own reports. + let result = is_connectivity_loss_reported( + client, + BlockId::Latest, + &self.signing_address, + self.current_tracked_epoch_number, + other_validator_address, + ); + + if let Ok(r) = result { + return r; + } else { + error!(target: "engine", "early-epoch-end: could not get reported status for validator {other_validator_address:?}"); + return false; + } + } + + /// decides on the memorium data if we should update to contract data. + /// end executes them. + pub fn decide( + &mut self, + memorium: &HbbftMessageMemorium, + full_client: &dyn BlockChainClient, + client: &dyn EngineClient, + ) { + // if devp2p warmup time is not over yet, we do not have to do anything. + if self.start_time.elapsed() < self.allowed_devp2p_warmup_time { + debug!(target: "engine", "early-epoch-end: no decision: Devp2p warmup time"); + return; + } + + if full_client.is_major_syncing() { + // if we are syncing, we wont do any blaming. + debug!(target: "engine", "early-epoch-end: no decision: syncing"); + return; + } + + if full_client.is_syncing() { + // if we are syncing, we wont do any blaming. + debug!(target: "engine", "early-epoch-end: detected attempt to break because of is_major_syncing() instead of is_synincg()no decision: syncing"); + } + + let block_num = if let Some(block) = full_client.block(BlockId::Latest) { + block.number() + } else { + error!(target:"engine", "early-epoch-end: could not retrieve latest block."); + return; + }; + + let treshold: u64 = 2; + // todo: read this out from contracts: ConnectivityTrackerHbbft -> reportDisallowPeriod + // requires us to update the Contracts ABIs: + // https://github.com/DMDcoin/diamond-node/issues/115 + let treshold_time = Duration::from_secs(12 * 60); // 12 Minutes = 1 times the heartbeat + 2 minutes as grace period. + + if self.start_time.elapsed() < treshold_time { + debug!(target: "engine", "early-epoch-end: no decision: Treshold time not reached."); + return; + } + + if block_num < self.start_block + treshold { + // not enought blocks have passed this epoch, + // to judge other nodes. + debug!(target: "engine", "early-epoch-end: no decision: not enough blocks."); + return; + } + + trace!(target: "engine", "checking epoch history for {} validators", &self.validators.len()); + + //full_client.best_block_header() + // get current state of missing validators from hbbftMemorium. + if let Some(epoch_history) = memorium.get_staking_epoch_history(block_num) { + for validator in &self.validators.clone() { + let validator_address = match self.node_id_to_address.get(validator) { + Some(a) => a, + None => { + error!(target: "engine", "early-epoch-end: could not find validator_address for node id in cache: {validator:?}"); + continue; + } + }; + + if let Some(node_history) = epoch_history.get_history_for_node(validator) { + let last_message_time = node_history.get_last_good_message_time(); + let last_message_time_lateness = last_message_time.elapsed(); + if last_message_time_lateness > treshold_time { + // we do not have to send notification, if we already did so. + if !self.is_reported(client, validator_address) { + // this function will also add the validator to the list of flagged validators. + self.notify_about_missing_validator(&validator, client, full_client); + } + } else { + // this validator is OK. + // maybe it was flagged and we need to unflag it ? + if self.is_reported(client, validator_address) { + self.notify_about_validator_reconnect(&validator, full_client, client); + } + } + } else { + debug!(target: "engine", "early-epoch-end: no history info for validator {validator}"); + + // we do not have any history for this node. + if !self.is_reported(client, validator_address) { + // this function will also add the validator to the list of flagged validators. + self.notify_about_missing_validator(&validator, client, full_client); + } + } + // todo: if the systems switched from block based measurement to time based measurement. + } + } + // else: nothing to do: no history yet. + + // note: We do not take care if hbbft message memorium might not have processed some of the messages yet, + // since it is not important to do the decision based on the latest data, since the decide method will be called + // again. + } +} + +impl PrometheusMetrics for HbbftEarlyEpochEndManager { + fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) { + registry.register_gauge( + "early_epoch_end_staking_epoch", + "staking epoch information for early epoch end manager", + self.current_tracked_epoch_number as i64, + ); + + registry.register_gauge( + "early_epoch_end_num_flagged_validators", + "number of validators flagged for missing communication", + self.flagged_validators.len() as i64, + ); + + for v in self.validators.iter() { + let is_flagged = self.flagged_validators.contains(v); + let label_value = v.as_8_byte_string(); + registry.register_gauge_with_other_node_label( + "early_epoch_end_flag", + "node has flagged other_node [0-1]", + label_value.as_str(), + is_flagged as i64, + ); + } + } +} +/// testing early epoch stop manager. +#[cfg(test)] +mod tests { + + #[test] + fn test_early_epoch_end() { + + // should + } +} diff --git a/crates/ethcore/src/engines/hbbft/hbbft_engine.rs b/crates/ethcore/src/engines/hbbft/hbbft_engine.rs index c26af006fd..2ff4f8c1ae 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_engine.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_engine.rs @@ -1,25 +1,38 @@ -use super::block_reward_hbbft::BlockRewardContract; +use super::{ + block_reward_hbbft::BlockRewardContract, + hbbft_early_epoch_end_manager::HbbftEarlyEpochEndManager, + hbbft_engine_cache::HbbftEngineCache, + hbbft_peers_handler::{HbbftConnectToPeersMessage, HbbftPeersHandler}, +}; use crate::{ - client::BlockChainClient, - engines::hbbft::{ - contracts::random_hbbft::set_current_seed_tx_raw, hbbft_message_memorium::BadSealReason, - hbbft_peers_management::HbbftPeersManagement, + block::ExecutedBlock, + client::{ + BlockChainClient, + traits::{EngineClient, ForceUpdateSealing}, + }, + engines::{ + BlockAuthorOption, Engine, EngineError, ForkChoice, Seal, SealingState, + default_system_or_code_call, + hbbft::{ + contracts::random_hbbft::set_current_seed_tx_raw, hbbft_message_memorium::BadSealReason, + }, + signer::EngineSigner, + }, + error::{BlockError, Error}, + io::{IoContext, IoHandler, IoService, TimerToken}, + machine::EthereumMachine, + types::{ + BlockNumber, + header::{ExtendedHeader, Header}, + ids::BlockId, + transaction::{SignedTransaction, TypedTransaction}, }, }; -use block::ExecutedBlock; -use client::traits::{EngineClient, ForceUpdateSealing}; use crypto::publickey::Signature; -use engines::{ - default_system_or_code_call, signer::EngineSigner, Engine, EngineError, ForkChoice, Seal, - SealingState, -}; -use error::{BlockError, Error}; -use ethereum_types::{Address, H256, H512, U256}; +use ethereum_types::{Address, H256, H512, Public, U256}; use ethjson::spec::HbbftParams; use hbbft::{NetworkInfo, Target}; -use io::{IoContext, IoHandler, IoService, TimerToken}; use itertools::Itertools; -use machine::EthereumMachine; use parking_lot::{Mutex, RwLock}; use rlp; use rmp_serde; @@ -30,37 +43,31 @@ use std::{ collections::BTreeMap, convert::TryFrom, ops::BitXor, - sync::{atomic::AtomicBool, Arc, Weak}, - time::Duration, -}; -use types::{ - header::{ExtendedHeader, Header}, - ids::BlockId, - transaction::{SignedTransaction, TypedTransaction}, - BlockNumber, + sync::{Arc, Weak, atomic::AtomicBool}, + time::{Duration, Instant}, }; use super::{ + NodeId, contracts::{ keygen_history::{all_parts_acks_available, initialize_synckeygen}, staking::start_time_of_next_phase_transition, - validator_set::{get_pending_validators, is_pending_validator, ValidatorType}, + validator_set::{ValidatorType, get_pending_validators, is_pending_validator}, }, contribution::{unix_now_millis, unix_now_secs}, hbbft_state::{Batch, HbMessage, HbbftState, HoneyBadgerStep}, keygen_transactions::KeygenTransactionSender, sealing::{self, RlpSig, Sealing}, - NodeId, -}; -use engines::hbbft::{ - contracts::validator_set::{ - get_validator_available_since, send_tx_announce_availability, staking_by_mining_address, - }, - hbbft_message_memorium::HbbftMessageDispatcher, }; +use crate::engines::hbbft::hbbft_message_memorium::HbbftMessageDispatcher; use std::{ops::Deref, sync::atomic::Ordering}; -use std::process::Command; +// Internal representation for storing deferred outgoing consensus messages. +struct StoredOutgoingMessage { + block_number: BlockNumber, + data: Vec, + recipients: Vec, +} type TargetedMessage = hbbft::TargetedMessage; @@ -73,9 +80,20 @@ enum Message { Sealing(BlockNumber, sealing::Message), } +impl Message { + /// Returns the epoch (block number) of the message. + pub fn block_number(&self) -> BlockNumber { + match self { + Message::HoneyBadger(_, msg) => msg.epoch(), + Message::Sealing(block_num, _) => *block_num, + } + } +} + /// The Honey Badger BFT Engine. pub struct HoneyBadgerBFT { transition_service: IoService<()>, + hbbft_peers_service: IoService, client: Arc>>>, signer: Arc>>>, machine: EthereumMachine, @@ -86,15 +104,29 @@ pub struct HoneyBadgerBFT { message_counter: Mutex, random_numbers: RwLock>, keygen_transaction_sender: RwLock, - has_sent_availability_tx: AtomicBool, + has_connected_to_validator_set: AtomicBool, - peers_management: Mutex, + //peers_management: Mutex, current_minimum_gas_price: Mutex>, + early_epoch_manager: Mutex>, + hbbft_engine_cache: Mutex, + delayed_hbbft_join: AtomicBool, + + // When true, outgoing consensus messages are deferred and stored for later delivery. + defer_outgoing_messages: AtomicBool, + // Storage for deferred outgoing messages ready to be delivered later. + stored_outgoing_messages: Mutex>, + // Phoenix recovery protocol: ensure we reset HoneyBadger only once before resuming sending. + phoenix_reset_performed: AtomicBool, } struct TransitionHandler { client: Arc>>>, engine: Arc, + /// the last known block for the auto shutdown on stuck node feature. + /// https://github.com/DMDcoin/diamond-node/issues/78 + auto_shutdown_last_known_block_number: Mutex, + auto_shutdown_last_known_block_import: Mutex, } const DEFAULT_DURATION: Duration = Duration::from_secs(1); @@ -153,12 +185,179 @@ impl TransitionHandler { // Arbitrary identifier for the timer we register with the event handler. const ENGINE_TIMEOUT_TOKEN: TimerToken = 1; -const ENGINE_SHUTDOWN_IF_UNAVAILABLE: TimerToken = 2; +const ENGINE_SHUTDOWN: TimerToken = 2; // Some Operations should be executed if the chain is synced to the current tail. const ENGINE_DELAYED_UNITL_SYNCED_TOKEN: TimerToken = 3; // Some Operations have no urge on the timing, but are rather expensive. // those are handeled by this slow ticking timer. const ENGINE_VALIDATOR_CANDIDATE_ACTIONS: TimerToken = 4; +// Check for current Phoenix Protocol phase +const ENGINE_PHOENIX_CHECK: TimerToken = 5; + +impl TransitionHandler { + fn handle_shutdown_on_missing_block_import( + &self, + shutdown_on_missing_block_import_config_option: Option, + ) { + let shutdown_on_missing_block_import_config: u64; + + if let Some(c) = shutdown_on_missing_block_import_config_option { + if c == 0 { + // if shutdown_on_missing_block_import is configured to 0, we do not have to do anything. + return; + } + shutdown_on_missing_block_import_config = c; + } else { + // if shutdown_on_missing_block_import is not configured at all, we do not have to do anything. + return; + } + + // ... we need to check if enough time has passed since the last block was imported. + let current_block_number_option = if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + c.block_number(BlockId::Latest) + } else { + warn!(target: "consensus", "shutdown-on-missing-block-import: Could not upgrade weak reference to client."); + return; + } + } else { + warn!(target: "consensus", "shutdown-on-missing-block-import: Could not read client."); + return; + }; + + let now = std::time::Instant::now(); + + if let Some(current_block_number) = current_block_number_option { + if current_block_number <= 1 { + // we do not do an auto shutdown for the first block. + // it is normal for a network to have no blocks at the beginning, until everything is settled. + return; + } + + let last_known_block_number: u64 = + self.auto_shutdown_last_known_block_number.lock().clone(); + + if current_block_number == last_known_block_number { + // if the last known block number is the same as the current block number, + // we have not imported a new block since the last check. + // we need to check if enough time has passed since the last check. + + let last_known_block_import = + self.auto_shutdown_last_known_block_import.lock().clone(); + let duration_since_last_block_import = + now.duration_since(last_known_block_import).as_secs(); + + if duration_since_last_block_import < shutdown_on_missing_block_import_config { + // if the time since the last block import is less than the configured interval, + // we do not have to do anything. + return; + } + + // lock the client and signal shutdown. + warn!( + "shutdown-on-missing-block-import: Detected stalled block import. no import for {duration_since_last_block_import}. last known import: {:?} now: {:?} Demanding shut down of hbbft engine.", + last_known_block_import, now + ); + + // if auto shutdown at missing block production (or import) is configured. + // ... we need to check if enough time has passed since the last block was imported. + if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + c.demand_shutdown(); + } else { + error!( + "shutdown-on-missing-block-import: Error during Shutdown: could not upgrade weak reference." + ); + } + } else { + error!( + "shutdown-on-missing-block-import: Error during Shutdown: No client found." + ); + } + } else { + *self.auto_shutdown_last_known_block_import.lock() = now; + *self.auto_shutdown_last_known_block_number.lock() = current_block_number; + } + } else { + warn!(target: "consensus", "shutdown-on-missing-block-import: Could not read current block number."); + } + } + + fn handle_engine(&self, io: &IoContext<()>) -> Result<(), Error> { + let client = self + .client + .read() + .as_ref() + .ok_or(EngineError::RequiresClient)? + .upgrade() + .ok_or(EngineError::RequiresClient)?; + + // trace!(target: "consensus", "Honey Badger IoHandler timeout called"); + // The block may be complete, but not have been ready to seal - trigger a new seal attempt. + // TODO: In theory, that should not happen. The seal is ready exactly when the sealing entry is `Complete`. + // if let Some(ref weak) = *self.client.read() { + // if let Some(c) = weak.upgrade() { + // c.update_sealing(ForceUpdateSealing::No); + // shutdown_on_missing_block_import_config = + // c.config_shutdown_on_missing_block_import(); + // } + // } + + client.update_sealing(ForceUpdateSealing::No); + let shutdown_on_missing_block_import_config = + client.config_shutdown_on_missing_block_import(); + + // Periodically allow messages received for future epochs to be processed. + self.engine.replay_cached_messages(); + + // rejoin Hbbft Epoch after sync was completed. + if self + .engine + .delayed_hbbft_join + .load(std::sync::atomic::Ordering::SeqCst) + { + if let Err(e) = self.engine.join_hbbft_epoch() { + error!(target: "engine", "Error trying to join epoch after synced: {}", e); + } + } + + self.handle_shutdown_on_missing_block_import(shutdown_on_missing_block_import_config); + + let mut timer_duration = self.min_block_time_remaining(client.clone()); + + // If the minimum block time has passed we are ready to trigger new blocks. + if timer_duration == Duration::from_secs(0) { + // Always create blocks if we are in the keygen phase. + self.engine.start_hbbft_epoch_if_next_phase(); + + // If the maximum block time has been reached we trigger a new block in any case. + if self.max_block_time_remaining(client.clone()) == Duration::from_secs(0) { + self.engine.start_hbbft_epoch(client); + } + + // Transactions may have been submitted during creation of the last block, trigger the + // creation of a new block if the transaction threshold has been reached. + self.engine.start_hbbft_epoch_if_ready(); + + // Set timer duration to the default period (1s) + timer_duration = DEFAULT_DURATION; + } + + // The duration should be at least 1ms and at most self.engine.params.minimum_block_time + timer_duration = max(timer_duration, Duration::from_millis(1)); + timer_duration = min( + timer_duration, + Duration::from_secs(self.engine.params.minimum_block_time), + ); + + io.register_timer_once(ENGINE_TIMEOUT_TOKEN, timer_duration) + .unwrap_or_else( + |e| warn!(target: "consensus", "Failed to restart consensus step timer: {}.", e), + ); + + Ok(()) + } +} impl IoHandler<()> for TransitionHandler { fn initialize(&self, io: &IoContext<()>) { @@ -168,68 +367,36 @@ impl IoHandler<()> for TransitionHandler { |e| warn!(target: "consensus", "Failed to start consensus timer: {}.", e), ); - io.register_timer(ENGINE_SHUTDOWN_IF_UNAVAILABLE, Duration::from_secs(1200)) + io.register_timer(ENGINE_SHUTDOWN, Duration::from_secs(1200)) .unwrap_or_else(|e| warn!(target: "consensus", "HBBFT Shutdown Timer failed: {}.", e)); // io.register_timer_once(ENGINE_DELAYED_UNITL_SYNCED_TOKEN, Duration::from_secs(10)) // .unwrap_or_else(|e| warn!(target: "consensus", "ENGINE_DELAYED_UNITL_SYNCED_TOKEN Timer failed: {}.", e)); - io.register_timer(ENGINE_VALIDATOR_CANDIDATE_ACTIONS, Duration::from_secs(120)) + io.register_timer(ENGINE_VALIDATOR_CANDIDATE_ACTIONS, Duration::from_secs(30)) .unwrap_or_else(|e| warn!(target: "consensus", "ENGINE_VALIDATOR_CANDIDATE_ACTIONS Timer failed: {}.", e)); + + // io.channel() + // io.register_stream() + + io.register_timer(ENGINE_PHOENIX_CHECK, Duration::from_secs(10)) + .unwrap_or_else( + |e| warn!(target: "consensus", "ENGINE_PHOENIX_CHECK Timer failed: {}.", e), + ); } fn timeout(&self, io: &IoContext<()>, timer: TimerToken) { if timer == ENGINE_TIMEOUT_TOKEN { - // trace!(target: "consensus", "Honey Badger IoHandler timeout called"); - // The block may be complete, but not have been ready to seal - trigger a new seal attempt. - // TODO: In theory, that should not happen. The seal is ready exactly when the sealing entry is `Complete`. - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - c.update_sealing(ForceUpdateSealing::No); - } - } - - // Periodically allow messages received for future epochs to be processed. - self.engine.replay_cached_messages(); + if let Err(err) = self.handle_engine(io) { + trace!(target: "consensus", "Error in Honey Badger Engine timeout handler: {:?}", err); - // The client may not be registered yet on startup, we set the default duration. - let mut timer_duration = DEFAULT_DURATION; - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - timer_duration = self.min_block_time_remaining(c.clone()); - - // If the minimum block time has passed we are ready to trigger new blocks. - if timer_duration == Duration::from_secs(0) { - // Always create blocks if we are in the keygen phase. - self.engine.start_hbbft_epoch_if_next_phase(); - - // If the maximum block time has been reached we trigger a new block in any case. - if self.max_block_time_remaining(c.clone()) == Duration::from_secs(0) { - self.engine.start_hbbft_epoch(c); - } - - // Transactions may have been submitted during creation of the last block, trigger the - // creation of a new block if the transaction threshold has been reached. - self.engine.start_hbbft_epoch_if_ready(); - - // Set timer duration to the default period (1s) - timer_duration = DEFAULT_DURATION; - } - - // The duration should be at least 1ms and at most self.engine.params.minimum_block_time - timer_duration = max(timer_duration, Duration::from_millis(1)); - timer_duration = min( - timer_duration, - Duration::from_secs(self.engine.params.minimum_block_time), - ); - } - } - - io.register_timer_once(ENGINE_TIMEOUT_TOKEN, timer_duration) + // in error cases we try again soon. + io.register_timer_once(ENGINE_TIMEOUT_TOKEN, DEFAULT_DURATION) .unwrap_or_else( |e| warn!(target: "consensus", "Failed to restart consensus step timer: {}.", e), ); - } else if timer == ENGINE_SHUTDOWN_IF_UNAVAILABLE { + } + } else if timer == ENGINE_SHUTDOWN { // we do not run this on the first occurence, // the first occurence could mean that the client is not fully set up // (e.g. it should sync, but it does not know it yet.) @@ -263,82 +430,31 @@ impl IoHandler<()> for TransitionHandler { debug!(target: "consensus", "Honey Badger check for unavailability shutdown."); - match self.engine.is_staked() { - Ok(is_stacked) => { - if is_stacked { - debug!(target: "consensus", "is_staked: {}", is_stacked); - match self.engine.is_available() { - Ok(is_available) => { - if !is_available { - warn!(target: "consensus", "Initiating Shutdown: Honey Badger Consensus detected that this Node has been flagged as unavailable, while it should be available."); - - if let Some(ref weak) = *self.client.read() { - if let Some(c) = weak.upgrade() { - if let Some(id) = c.block_number(BlockId::Latest) { - warn!(target: "consensus", "BlockID: {id}"); - } - } - } - //TODO: implement shutdown. - // panic!("Shutdown hard. Todo: implement Soft Shutdown."); - //if let c = self.engine.client.read() { - //} - - let id: usize = std::process::id() as usize; - - let thread_id = std::thread::current().id(); - - //let child_id = std::process::en; - - info!(target: "engine", "Waiting for Signaling shutdown to process ID: {id} thread: {:?}", thread_id); - - // Using libc resulted in errors. - // can't a process not send a signal to it's own ?! - - // unsafe { - // let signal_result = libc::signal(libc::SIGTERM, id); - // info!(target: "engine", "Signal result: {signal_result}"); - // } - - let child = Command::new("/bin/kill") - .arg(id.to_string()) - .spawn() - .expect("failed to execute child"); - - let kill_id = child.id(); - info!(target: "engine", "Signaling shutdown SENT to process ID: {id} with process: {kill_id} "); - - // if let Some(ref weak) = *self.client.read() { - // if let Some(client) = weak.upgrade() { - - // match client.as_full_client() { - // Some(full_client) => { - // //full_client.shutdown(); - // } - // None => { + let is_staked = self.engine.is_staked(); + if is_staked { + trace!(target: "consensus", "We are staked!"); + let is_available = self.engine.is_available(); + if !is_available { + warn!(target: "consensus", "Initiating Shutdown: Honey Badger Consensus detected that this Node has been flagged as unavailable, while it should be available."); + + if let Some(ref weak) = *self.client.read() { + if let Some(c) = weak.upgrade() { + if let Some(id) = c.block_number(BlockId::Latest) { + warn!(target: "consensus", "BlockID: {id}"); + } + } + } - // } - // } + let id: usize = std::process::id() as usize; + let thread_id = std::thread::current().id(); + info!(target: "engine", "Waiting for Signaling shutdown to process ID: {id} thread: {:?}", thread_id); - // match client.as_full_client() { - // Some(full_client) => full_client.is_major_syncing(), - // // We only support full clients at this point. - // None => true, - // } - // } - // } - } - // if the node is available, everythign is fine! - } - Err(error) => { - warn!(target: "consensus", "Could not query Honey Badger check for unavailability shutdown. {:?}", error); - } + if let Some(ref weak) = *self.client.read() { + if let Some(client) = weak.upgrade() { + info!(target: "engine", "demanding shutdown from hbbft engine."); + client.demand_shutdown(); } } - // else: just a regular node. - } - Err(error) => { - warn!(target: "consensus", "Could not query Honey Badger check if validator is staked. {:?}", error); } } } else if timer == ENGINE_DELAYED_UNITL_SYNCED_TOKEN { @@ -351,20 +467,148 @@ impl IoHandler<()> for TransitionHandler { trace!(target: "consensus", "All Operation that had to be done after syncing have been done now."); } } else if timer == ENGINE_VALIDATOR_CANDIDATE_ACTIONS { - warn!(target: "consensus", "do_validator_engine_actions"); if let Err(err) = self.engine.do_validator_engine_actions() { error!(target: "consensus", "do_validator_engine_actions failed: {:?}", err); } + } else if timer == ENGINE_PHOENIX_CHECK { + self.engine.handle_phoenix_recovery_protocol(); } } } impl HoneyBadgerBFT { + // Phoenix recovery protocol parameters + // Start deferring and reset HoneyBadger after this many seconds without a new block. + const PHOENIX_DEFER_AFTER_SECS: i64 = 600; + // Add this number to PHOENIX_DEFER_AFTER_SECS for each try after the first try to + // incrementally increase the time for the next block creation attempt. + // If 'n' is the current try, starting at 0 then: + // Try(0): PHOENIX_DEFER_AFTER_SECS + // Try(n): Try(n-1) + PHOENIX_DEFER_AFTER_SECS + n * PHOENIX_DEFER_INCREMENT_SECS + const PHOENIX_DEFER_INCREMENT_SECS: i64 = 120; + // Resume sending and deliver deferred messages after this many seconds. + const PHOENIX_RESUME_AFTER_SECS: i64 = 120; + // Timeout for trying to acquire the hbbft_state lock to reset HoneyBadger, in milliseconds. + const PHOENIX_LOCK_TIMEOUT_MS: u64 = 100; + + /// Phoenix recovery protocol + /// Called periodically to detect stalls and perform a controlled recovery. + /// Retry recovery every n*PHOENIX_DEFER_AFTER_SECS by deferring and resetting, + /// and resume sending messages every n*PHOENIX_DEFER_AFTER_SECS + PHOENIX_RESUME_AFTER_SECS. + fn handle_phoenix_recovery_protocol(&self) { + if !self.hbbft_state.read().is_validator() { + return; + } + + if let Some(client) = self.client_arc() { + // Skip if still syncing. + if self.is_major_syncing(&client) { + return; + } + + match client.block_header(BlockId::Latest) { + Some(h) => { + let ts = h.timestamp() as i64; + let now_ts = unix_now_secs() as i64; + let diff_secs = now_ts - ts; + + let defer_after = Self::PHOENIX_DEFER_AFTER_SECS; + let resume_after = Self::PHOENIX_RESUME_AFTER_SECS; + + if diff_secs >= defer_after { + // Determine the current recovery cycle index n and its boundaries using + // increasing delays per try. The cumulative start time S_n is defined as: + // S_0 = PHOENIX_DEFER_AFTER_SECS + // S_n = S_{n-1} + PHOENIX_DEFER_AFTER_SECS + n * PHOENIX_DEFER_INCREMENT_SECS + let mut n: i64 = 0; + let mut cycle_start: i64 = defer_after; // S_0 + let next_cycle_start: i64; + loop { + let incr = defer_after + (n + 1) * Self::PHOENIX_DEFER_INCREMENT_SECS; + let candidate_next = cycle_start + incr; // S_{n+1} + if diff_secs >= candidate_next { + n += 1; + cycle_start = candidate_next; // advance to next cycle + continue; + } else { + next_cycle_start = candidate_next; + break; + } + } + let cycle_resume = cycle_start + resume_after; + + if diff_secs < cycle_resume { + // We are within the deferring window of the current cycle. + self.set_defer_outgoing_messages(true); + + // Ensure we reset the HoneyBadger instance only once during a deferring window. + if !self.phoenix_reset_performed.load(Ordering::SeqCst) { + match self.hbbft_state.try_write_for(Duration::from_millis( + Self::PHOENIX_LOCK_TIMEOUT_MS, + )) { + Some(mut state) => { + if state.reset_honeybadger().is_some() { + // Also reset the sealing protocol state to avoid mixing signatures + // from the previous block creation attempt. + self.sealing.write().clear(); + + self.phoenix_reset_performed + .store(true, Ordering::SeqCst); + warn!(target: "consensus", "Phoenix Protocol: Deferred outgoing messages, reset HoneyBadger and cleared sealing state ({}s since last block; cycle n={}, window [{}..{}))", diff_secs, n, cycle_start, cycle_resume); + } else { + warn!(target: "consensus", "Phoenix Protocol: Deferred outgoing messages but failed to reset HoneyBadger ({}s since last block; cycle n={}, window [{}..{}))", diff_secs, n, cycle_start, cycle_resume); + } + } + None => { + warn!(target: "consensus", "Phoenix Protocol: Could not acquire hbbft_state lock to reset HoneyBadger while deferring messages ({}s since last block; cycle n={}, window [{}..{}))", diff_secs, n, cycle_start, cycle_resume); + } + } + } + } else if diff_secs < next_cycle_start { + // We are in the resume window of the current cycle. + if self.phoenix_reset_performed.load(Ordering::SeqCst) { + self.set_defer_outgoing_messages(false); + self.deliver_stored_outgoing_messages(); + // Allow the next cycle to perform a reset again if needed. + self.phoenix_reset_performed.store(false, Ordering::SeqCst); + warn!(target: "consensus", "Phoenix Protocol: Resumed sending and delivered deferred messages ({}s since last block; cycle n={}, resume @ {}, next_cycle @ {})", diff_secs, n, cycle_resume, next_cycle_start); + } else { + warn!(target: "consensus", "Phoenix Protocol: Expecting block to be generated ({}s since last block; cycle n={}, resume @ {}, next_cycle @ {})", diff_secs, n, cycle_resume, next_cycle_start); + } + } + } else { + // A new block has been imported while recovery protocol was active. + // Clean up recovery state: stop deferring and deliver any stored messages. + if self.defer_outgoing_messages.load(Ordering::SeqCst) + || self.phoenix_reset_performed.load(Ordering::SeqCst) + { + self.set_defer_outgoing_messages(false); + self.deliver_stored_outgoing_messages(); + self.phoenix_reset_performed.store(false, Ordering::SeqCst); + warn!(target: "consensus", "Phoenix Protocol: Cleaned up recovery state after new block ({}s since last block < defer threshold {})", diff_secs, defer_after); + } + } + + // Always log the latest timestamp and diff for visibility. + trace!(target: "consensus", "Phoenix Protocol: latest block timestamp: {} (diff_secs: {})", h.timestamp(), diff_secs); + } + None => { + error!(target: "consensus", "Phoenix Protocol: No latest block header available."); + } + } + } + } + /// Creates an instance of the Honey Badger BFT Engine. pub fn new(params: HbbftParams, machine: EthereumMachine) -> Result, Error> { let is_unit_test = params.is_unit_test.unwrap_or(false); + let engine = Arc::new(HoneyBadgerBFT { - transition_service: IoService::<()>::start("Hbbft")?, + transition_service: IoService::<()>::start("Hbbft", 4)?, + hbbft_peers_service: IoService::::start( + "hbbftp", /* hbbft peers (we use 6 letter acronyms for nice log file layout.) */ + 1, + )?, client: Arc::new(RwLock::new(None)), signer: Arc::new(RwLock::new(None)), machine, @@ -383,24 +627,42 @@ impl HoneyBadgerBFT { ), sealing: RwLock::new(BTreeMap::new()), params, - message_counter: Mutex::new(0), + message_counter: Mutex::new(0), // restore message counter from memory here for RBC ? */ random_numbers: RwLock::new(BTreeMap::new()), - keygen_transaction_sender: RwLock::new(KeygenTransactionSender::new()), - has_sent_availability_tx: AtomicBool::new(false), + /* Todo: make this configureable + */ + keygen_transaction_sender: RwLock::new(KeygenTransactionSender::new(1, 60000)), + has_connected_to_validator_set: AtomicBool::new(false), - peers_management: Mutex::new(HbbftPeersManagement::new()), current_minimum_gas_price: Mutex::new(None), + early_epoch_manager: Mutex::new(None), + hbbft_engine_cache: Mutex::new(HbbftEngineCache::new()), + delayed_hbbft_join: AtomicBool::new(false), + + defer_outgoing_messages: AtomicBool::new(false), + stored_outgoing_messages: Mutex::new(Vec::new()), + phoenix_reset_performed: AtomicBool::new(false), }); if !engine.params.is_unit_test.unwrap_or(false) { let handler = TransitionHandler { client: engine.client.clone(), engine: engine.clone(), + auto_shutdown_last_known_block_number: Mutex::new(0), + auto_shutdown_last_known_block_import: Mutex::new(Instant::now()), }; engine .transition_service .register_handler(Arc::new(handler))?; } + + let peers_handler = HbbftPeersHandler::new(engine.client.clone()); + engine + .hbbft_peers_service + .register_handler(Arc::new(peers_handler))?; + + // todo: + // setup rev Ok(engine) } @@ -424,7 +686,7 @@ impl HoneyBadgerBFT { trace!(target: "consensus", "Batch received for epoch {}, creating new Block.", batch.epoch); // Decode and de-duplicate transactions - let batch_txns: Vec<_> = batch + let mut batch_txns: Vec<_> = batch .contributions .iter() .flat_map(|(_, c)| &c.transactions) @@ -439,9 +701,11 @@ impl HoneyBadgerBFT { }) .collect(); - info!(target: "consensus", "Block creation: Batch received for epoch {}, total {} contributions, with {} unique transactions.", batch.epoch, batch + debug!(target: "consensus", "Block creation: Batch received for epoch {}, total {} contributions, with {} unique transactions.", batch.epoch, batch .contributions.iter().fold(0, |i, c| i + c.1.transactions.len()), batch_txns.len()); + trace!(target: "consensus", "Block creation: transactions {}", batch_txns.iter().map(|x| x.hash.to_string()).join(", ")); + // Make sure the resulting transactions do not contain nonces out of order. // Not necessary any more - we select contribution transactions by sender, contributing all transactions by that sender or none. // The transaction queue's "pending" transactions already guarantee there are no nonce gaps for a selected sender. @@ -458,9 +722,6 @@ impl HoneyBadgerBFT { .map(|(_, c)| c.timestamp) .sorted(); - // todo: use timstamps for calculating negative score. - // https://github.com/DMDcoin/diamond-node/issues/37 - let timestamp = match timestamps.iter().nth(timestamps.len() / 2) { Some(t) => t.clone(), None => { @@ -486,19 +747,13 @@ impl HoneyBadgerBFT { .write() .insert(batch.epoch, random_number); - if let Some(mut header) = client.create_pending_block_at(batch_txns, timestamp, batch.epoch) - { + // Shuffelin transactions deterministically based on the random number. + batch_txns = crate::engines::hbbft::utils::transactions_shuffling::deterministic_transactions_shuffling(batch_txns, random_number); + + if let Some(header) = client.create_pending_block_at(batch_txns, timestamp, batch.epoch) { let block_num = header.number(); let hash = header.bare_hash(); - if let Some(reward_contract_address) = self.params.block_reward_contract_address { - header.set_author(reward_contract_address); - } else { - warn!( - "Creating block with no blockRewardContractAddress {}", - block_num - ); - } - + // TODO: trace is missleading here: we already got the signature shares, we can already trace!(target: "consensus", "Sending signature share of {} for block {}", hash, block_num); let step = match self .sealing @@ -601,7 +856,7 @@ impl HoneyBadgerBFT { ) { Some(n) => n, None => { - error!(target: "consensus", "Sealing message for block #{} could not be processed due to missing/mismatching network info.", block_num); + error!(target: "consensus", "Sealing message for block #{} could not be processed due to missing network info for signer {}", block_num, sender_id); self.hbbft_message_dispatcher.report_seal_bad( &sender_id, block_num, @@ -647,25 +902,44 @@ impl HoneyBadgerBFT { for m in messages { let ser = rmp_serde::to_vec(&m.message).expect("Serialization of consensus message failed"); + + // Determine recipients based on the target, excluding ourselves. + let mut recipients: Vec = Vec::new(); match m.target { Target::Nodes(set) => { - trace!(target: "consensus", "Dispatching message {:?} to {:?}", m.message, set); for node_id in set.into_iter().filter(|p| p != net_info.our_id()) { - trace!(target: "consensus", "Sending message to {}", node_id.0); - client.send_consensus_message(ser.clone(), Some(node_id.0)); + recipients.push(node_id.0); } } Target::AllExcept(set) => { - trace!(target: "consensus", "Dispatching exclusive message {:?} to all except {:?}", m.message, set); for node_id in net_info .all_ids() .filter(|p| (p != &net_info.our_id() && !set.contains(p))) { - trace!(target: "consensus", "Sending exclusive message to {}", node_id.0); - client.send_consensus_message(ser.clone(), Some(node_id.0)); + recipients.push(node_id.0); } } } + + let block_number = m.message.block_number(); + + if self.defer_outgoing_messages.load(Ordering::SeqCst) { + // Store for deferred delivery + warn!(target: "consensus", "Phoenix Protocol: Storing message for deferred sending for block #{} ", block_number); + self.stored_outgoing_messages + .lock() + .push(StoredOutgoingMessage { + block_number, + data: ser, + recipients, + }); + } else { + // Send immediately + for node in recipients { + trace!(target: "consensus", "Sending message to {} for block #{} ", node, block_number); + client.send_consensus_message(block_number, ser.clone(), Some(node)); + } + } } } @@ -682,7 +956,7 @@ impl HoneyBadgerBFT { .map(|msg| msg.map(|m| Message::Sealing(block_num, m))); self.dispatch_messages(&client, messages, network_info); if let Some(sig) = step.output.into_iter().next() { - trace!(target: "consensus", "Signature for block {} is ready", block_num); + trace!(target: "consensus", "Signature for block {} is ready.", block_num); let state = Sealing::Complete(sig); self.sealing.write().insert(block_num, state); @@ -705,20 +979,62 @@ impl HoneyBadgerBFT { message: Message::HoneyBadger(*message_counter, msg.message), } }); + self.dispatch_messages(&client, messages, network_info); std::mem::drop(message_counter); self.process_output(client, step.output, network_info); } + /// Enables or disables deferring of outgoing consensus messages. + pub fn set_defer_outgoing_messages(&self, defer: bool) { + self.defer_outgoing_messages.store(defer, Ordering::SeqCst); + } + + /// Deliver all stored outgoing consensus messages immediately. + /// If no client is registered yet, the messages remain stored. + pub fn deliver_stored_outgoing_messages(&self) { + let client = match self.client_arc() { + Some(c) => c, + None => { + warn!(target: "consensus", "deliver_stored_outgoing_messages: No client available; keeping messages deferred."); + return; + } + }; + + let mut stored = self.stored_outgoing_messages.lock(); + if stored.is_empty() { + return; + } + let mut messages: Vec = Vec::with_capacity(stored.len()); + std::mem::swap(&mut *stored, &mut messages); + drop(stored); + + for msg in messages.into_iter() { + for node in msg.recipients.iter() { + trace!(target: "consensus", "Delivering deferred message to {}", node); + client.send_consensus_message(msg.block_number, msg.data.clone(), Some(*node)); + } + } + } + /// Conditionally joins the current hbbft epoch if the number of received /// contributions exceeds the maximum number of tolerated faulty nodes. fn join_hbbft_epoch(&self) -> Result<(), EngineError> { let client = self.client_arc().ok_or(EngineError::RequiresClient)?; if self.is_syncing(&client) { trace!(target: "consensus", "tried to join HBBFT Epoch, but still syncing."); + self.delayed_hbbft_join + .store(true, std::sync::atomic::Ordering::SeqCst); return Ok(()); } + if self + .delayed_hbbft_join + .swap(false, std::sync::atomic::Ordering::SeqCst) + { + trace!(target: "consensus", "continued join_hbbft_epoch after sync was completed."); + } + let step = self .hbbft_state .write() @@ -738,7 +1054,7 @@ impl HoneyBadgerBFT { let step = match self .hbbft_state - .try_write_for(std::time::Duration::from_millis(10)) + .try_write_for(std::time::Duration::from_millis(100)) { Some(mut state_lock) => state_lock.try_send_contribution(client.clone(), &self.signer), None => { @@ -758,6 +1074,8 @@ impl HoneyBadgerBFT { if let Some(block_header) = client.block_header(BlockId::Latest) { let target_min_timestamp = block_header.timestamp() + self.params.minimum_block_time; let now = unix_now_secs(); + // we could implement a cheaper way to get the number of queued transaction, that does not require this intensive locking. + // see: https://github.com/DMDcoin/diamond-node/issues/237 let queue_length = client.queued_transactions().len(); (self.params.minimum_block_time == 0 || target_min_timestamp <= now) && queue_length >= self.params.transaction_queue_size_trigger @@ -775,6 +1093,9 @@ impl HoneyBadgerBFT { } fn start_hbbft_epoch_if_next_phase(&self) { + // experimental deactivation of empty blocks. + // see: https://github.com/DMDcoin/diamond-node/issues/160 + match self.client_arc() { None => return, Some(client) => { @@ -803,7 +1124,7 @@ impl HoneyBadgerBFT { let steps = match self.hbbft_state.try_write_for(Duration::from_millis(10)) { Some(mut hbbft_state_lock) => hbbft_state_lock.replay_cached_messages(client.clone()), None => { - trace!(target: "engine", "could not acquire write lock for replaying cached messages, stepping back..",); + debug!(target: "engine", "could not acquire write lock for replaying cached messages, stepping back..",); return None; } }; @@ -831,69 +1152,75 @@ impl HoneyBadgerBFT { Some(()) } - fn should_handle_availability_announcements(&self) -> bool { - !self.has_sent_availability_tx.load(Ordering::SeqCst) - } - fn should_connect_to_validator_set(&self) -> bool { !self.has_connected_to_validator_set.load(Ordering::SeqCst) } - fn handle_availability_announcements( + /// early epoch ends + /// https://github.com/DMDcoin/diamond-node/issues/87 + fn handle_early_epoch_end( &self, - engine_client: &dyn EngineClient, block_chain_client: &dyn BlockChainClient, + engine_client: &dyn EngineClient, mining_address: &Address, + epoch_start_block: u64, + epoch_num: u64, + validator_set: &Vec, ) { - // handles the announcements of the availability of other peers as blockchain transactions - - // let engine_client = client.deref(); - - match get_validator_available_since(engine_client, &mining_address) { - Ok(s) => { - if s.is_zero() { - //debug!(target: "engine", "sending announce availability transaction"); - info!(target: "engine", "sending announce availability transaction"); - match send_tx_announce_availability(block_chain_client, &mining_address) { - Ok(()) => {} - Err(call_error) => { - error!(target: "engine", "CallError during announce availability. {:?}", call_error); - } - } + // todo: acquire allowed devp2p warmup time from contracts ?! + let allowed_devp2p_warmup_time = Duration::from_secs(1200); + + debug!(target: "engine", "early-epoch-end: handle_early_epoch_end."); + + // we got everything we need from hbbft_state - drop lock ASAP. + + if let Some(memorium) = self + .hbbft_message_dispatcher + .get_memorium() + .try_read_for(Duration::from_millis(300)) + { + // this is currently the only location where we lock early epoch manager - + // so this should never cause a deadlock, and we do not have to try_lock_for + let mut lock_guard = self.early_epoch_manager.lock(); + + match lock_guard.as_mut() { + Some(early_epoch_end_manager) => { + // should we check here if the epoch number has changed ? + early_epoch_end_manager.decide(&memorium, block_chain_client, engine_client); } + None => { + *lock_guard = HbbftEarlyEpochEndManager::create_early_epoch_end_manager( + allowed_devp2p_warmup_time, + block_chain_client, + engine_client, + epoch_num, + epoch_start_block, + validator_set.clone(), + mining_address, + ); - // we store "HAS_SENT" if we SEND, - // or if we are already marked as available. - self.has_sent_availability_tx.store(true, Ordering::SeqCst); - //return Ok(()); - } - Err(e) => { - error!(target: "engine", "Error trying to send availability check: {:?}", e); + if let Some(manager) = lock_guard.as_mut() { + manager.decide(&memorium, block_chain_client, engine_client); + } + } } + } else { + warn!(target: "engine", "early-epoch-end: could not acquire read lock for memorium to decide on ealry_epoch_end_manager in do_validator_engine_actions."); } } - fn should_handle_internet_address_announcements(&self, client: &dyn BlockChainClient) -> bool { - // this will just called in the next hbbft validator node events again. - // if we don't get a lock, we will just a little be late with announcing our internet address. - if let Some(peers) = self.peers_management.try_lock() { - return peers.should_announce_own_internet_address(client); - } - - false - } - - // some actions are required for hbbft validator nodes. + // some actions are required for hbbft nodes. // this functions figures out what kind of actions are required and executes them. // this will lock the client and some deeper layers. - fn do_validator_engine_actions(&self) -> Result<(), String> { + fn do_validator_engine_actions(&self) -> Result<(), Error> { // here we need to differentiate the different engine functions, // that requre different levels of access to the client. - + trace!(target: "engine", "do_validator_engine_actions."); match self.client_arc() { Some(client_arc) => { if self.is_syncing(&client_arc) { // we are syncing - do not do anything. + trace!(target: "engine", "do_validator_engine_actions: skipping because we are syncing."); return Ok(()); } @@ -908,6 +1235,15 @@ impl HoneyBadgerBFT { } }; + let engine_client = client_arc.as_ref(); + if let Err(err) = self + .hbbft_engine_cache + .lock() + .refresh_cache(mining_address, engine_client) + { + trace!(target: "engine", "do_validator_engine_actions: data could not get updated, follow up tasks might fail: {:?}", err); + } + let engine_client = client_arc.deref(); let block_chain_client = match engine_client.as_full_client() { @@ -917,95 +1253,70 @@ impl HoneyBadgerBFT { } }; - let should_handle_availability_announcements = - self.should_handle_availability_announcements(); - let should_handle_internet_address_announcements = - self.should_handle_internet_address_announcements(block_chain_client); - let should_connect_to_validator_set = self.should_connect_to_validator_set(); + let mut should_handle_early_epoch_end = false; + + // we just keep those variables here, because we need them in the early_epoch_end_manager. + // this is just an optimization, so we do not acquire the lock for that much time. + let mut validator_set: Vec = Vec::new(); + let mut epoch_start_block: u64 = 0; + let mut epoch_num: u64 = 0; - // if we do not have to do anything, we can return early. - if !(should_handle_availability_announcements - || should_handle_internet_address_announcements - || should_connect_to_validator_set) { + let hbbft_state_option = + self.hbbft_state.try_read_for(Duration::from_millis(250)); + match hbbft_state_option { + Some(hbbft_state) => { + should_handle_early_epoch_end = hbbft_state.is_validator(); + + // if we are a pending validator, we will also do the reserved peers management. + if should_handle_early_epoch_end { + // we already remember here stuff the early epoch manager needs, + // so we do not have to acquire the lock for that long. + epoch_num = hbbft_state.get_current_posdao_epoch(); + epoch_start_block = + hbbft_state.get_current_posdao_epoch_start_block(); + validator_set = hbbft_state.get_validator_set(); + } + } + None => { + // maybe improve here, to return with a result, that triggers a retry soon. + debug!(target: "engine", "Unable to do_validator_engine_actions: Could not acquire read lock for hbbft state. Unable to decide about early epoch end. retrying soon."); + } + }; + } // drop lock for hbbft_state + + // if we do not have to do anything, we can return early. + if !(should_connect_to_validator_set || should_handle_early_epoch_end) { return Ok(()); } - // TODO: - // staking by mining address could be cached. - // but it COULD also get changed in the contracts, during the time the node is running. - // most likely since a Node can get staked, and than it becomes a mining address. - // a good solution for this is not to do this that fequently. - let staking_address = match staking_by_mining_address( - engine_client, - &mining_address, - ) { - Ok(staking_address) => { - if staking_address.is_zero() { - //TODO: here some fine handling can improve performance. - //with this implementation every node (validator or not) - //needs to query this state every block. - //trace!(target: "engine", "availability handling not a validator"); - return Ok(()); - } - staking_address - } - Err(call_error) => { - error!(target: "engine", "unable to ask for corresponding staking address for given mining address: {:?}", call_error); - let message = format!("unable to ask for corresponding staking address for given mining address: {:?}", call_error); - return Err(message.into()); - } - }; + self.hbbft_peers_service + .channel() + .send(HbbftConnectToPeersMessage::AnnounceAvailability)?; - // if we are not a potential validator, we already have already returned here. - if should_handle_availability_announcements { - self.handle_availability_announcements( - engine_client, + self.hbbft_peers_service + .send_message(HbbftConnectToPeersMessage::AnnounceOwnInternetAddress)?; + + if should_connect_to_validator_set { + self.hbbft_peers_service.send_message( + HbbftConnectToPeersMessage::ConnectToCurrentPeers(validator_set.clone()), + )?; + } + + if should_handle_early_epoch_end { + self.handle_early_epoch_end( block_chain_client, + engine_client, &mining_address, + epoch_start_block, + epoch_num, + &validator_set, ); } - // since get latest nonce respects the pending transactions, - // we don't have to take care of sending 2 transactions at once. - if should_handle_internet_address_announcements { - if let Some(mut peers_management) = self - .peers_management - .try_lock_for(Duration::from_millis(100)) - { - if let Err(error) = peers_management.announce_own_internet_address( - block_chain_client, - engine_client, - &mining_address, - &staking_address, - ) { - error!(target: "engine", "Error trying to announce own internet address: {:?}", error); - } else { - } - } - } - - if should_connect_to_validator_set { - let network_info_o = if let Some(hbbft_state) = self.hbbft_state.try_read() { - hbbft_state.get_current_network_info() - } else { - None - }; + self.do_keygen(); - if let Some(network_info) = network_info_o { - if let Some(mut peers_management) = self - .peers_management - .try_lock_for(Duration::from_millis(100)) - { - // connecting to current validators. - peers_management - .connect_to_current_validators(&network_info, &client_arc); - self.has_connected_to_validator_set - .store(true, Ordering::SeqCst); - } - } - } return Ok(()); } @@ -1018,7 +1329,7 @@ impl HoneyBadgerBFT { } /// Returns true if we are in the keygen phase and a new key has been generated. - fn do_keygen(&self, block_timestamp: u64) -> bool { + fn do_keygen(&self) -> bool { match self.client_arc() { None => false, Some(client) => { @@ -1037,19 +1348,22 @@ impl HoneyBadgerBFT { // Check if a new key is ready to be generated, return true to switch to the new epoch in that case. // The execution needs to be *identical* on all nodes, which means it should *not* use the local signer // when attempting to initialize the synckeygen. - if let Ok(all_available) = - all_parts_acks_available(&*client, block_timestamp, validators.len()) - { + if let Ok(all_available) = all_parts_acks_available(&*client, validators.len()) { if all_available { let null_signer = Arc::new(RwLock::new(None)); - if let Ok(synckeygen) = initialize_synckeygen( + match initialize_synckeygen( &*client, &null_signer, BlockId::Latest, ValidatorType::Pending, ) { - if synckeygen.is_ready() { - return true; + Ok(synckeygen) => { + if synckeygen.is_ready() { + return true; + } + } + Err(e) => { + error!(target: "consensus", "Error initializing synckeygen: {:?}", e); } } } @@ -1063,28 +1377,10 @@ impl HoneyBadgerBFT { if let Ok(is_pending) = is_pending_validator(&*client, &signer.address()) { trace!(target: "engine", "is_pending_validator: {}", is_pending); if is_pending { - // we are a pending validator, so we need to connect to other pending validators. - // so we start already the required communication channels. - // but this is NOT Mission critical, - // we will connect to the validators when we are in the validator set anyway. - // so we won't lock and wait forever to be able to do this. - if let Some(mut peers_management) = self - .peers_management - .try_lock_for(Duration::from_millis(50)) - { - // problem: this get's called every block, not only when validators become pending. - match peers_management - .connect_to_pending_validators(&client, &validators) - { - Ok(value) => { - debug!(target: "engine", "added to additional {:?} reserved peers, because they are pending validators.", value); - } - Err(err) => { - warn!(target: "engine", "Error connecting to other pending validators: {:?}", err); - } - } - } else { - warn!(target: "engine", "Could not connect to other pending validators, peers management lock not acquird within time."); + if let Err(err) = self.hbbft_peers_service.send_message( + HbbftConnectToPeersMessage::ConnectToPendingPeers(validators), + ) { + error!(target: "engine", "Error connecting to pending peers: {:?}", err); } let _err = self @@ -1107,121 +1403,28 @@ impl HoneyBadgerBFT { fn is_syncing(&self, client: &Arc) -> bool { match client.as_full_client() { - Some(full_client) => full_client.is_major_syncing(), + Some(full_client) => full_client.is_syncing(), // We only support full clients at this point. None => true, } } - /** returns if the signer of hbbft is tracked as available in the hbbft contracts. */ - pub fn is_available(&self) -> Result { - match self.signer.read().as_ref() { - Some(signer) => { - match self.client_arc() { - Some(client) => { - let engine_client = client.deref(); - let mining_address = signer.address(); - - if mining_address.is_zero() { - debug!(target: "consensus", "is_available: not available because mining address is zero: "); - return Ok(false); - } - match super::contracts::validator_set::get_validator_available_since( - engine_client, - &mining_address, - ) { - Ok(available_since) => { - debug!(target: "consensus", "available_since: {}", available_since); - return Ok(!available_since.is_zero()); - } - Err(err) => { - warn!(target: "consensus", "Error get get_validator_available_since: ! {:?}", err); - } - } - } - None => { - // warn!("Could not retrieve address for writing availability transaction."); - warn!(target: "consensus", "is_available: could not get engine client"); - } - } - } - None => {} + fn is_major_syncing(&self, client: &Arc) -> bool { + match client.as_full_client() { + Some(full_client) => full_client.is_major_syncing(), + // We only support full clients at this point. + None => true, } - return Ok(false); } - /** returns if the signer of hbbft is stacked. */ - pub fn is_staked(&self) -> Result { - // is the configured validator stacked ?? - - // TODO: improvement: - // since a signer address can not change after boot, - // we can just cash the value - // so we don't need a read lock here, - // getting the numbers of required read locks down (deadlock risk) - // and improving the performance. - - match self.signer.read().as_ref() { - Some(signer) => { - match self.client_arc() { - Some(client) => { - let engine_client = client.deref(); - let mining_address = signer.address(); - - if mining_address.is_zero() { - return Ok(false); - } + /** returns if the signer of hbbft is tracked as available in the hbbft contracts..*/ + pub fn is_available(&self) -> bool { + self.hbbft_engine_cache.lock().is_available() + } - match super::contracts::validator_set::staking_by_mining_address( - engine_client, - &mining_address, - ) { - Ok(staking_address) => { - // if there is no pool for this validator defined, we know that - if staking_address.is_zero() { - return Ok(false); - } - match super::contracts::staking::stake_amount( - engine_client, - &staking_address, - &staking_address, - ) { - Ok(stake_amount) => { - debug!(target: "consensus", "stake_amount: {}", stake_amount); - - // we need to check if the pool stake amount is >= minimum stake - match super::contracts::staking::candidate_min_stake( - engine_client, - ) { - Ok(min_stake) => { - debug!(target: "consensus", "min_stake: {}", min_stake); - return Ok(stake_amount.ge(&min_stake)); - } - Err(err) => { - warn!(target: "consensus", "Error get candidate_min_stake: ! {:?}", err); - warn!(target: "consensus", "stake amount: {}", stake_amount); - } - } - } - Err(err) => { - warn!(target: "consensus", "Error get stake_amount: ! {:?}", err); - } - } - } - Err(err) => { - warn!(target: "consensus", "Error get staking_by_mining_address: ! {:?}", err); - } - } - } - None => { - // warn!("Could not retrieve address for writing availability transaction."); - warn!(target: "consensus", "could not get engine client"); - } - } - } - None => {} - } - return Ok(false); + /** returns if the signer of hbbft is stacked. */ + pub fn is_staked(&self) -> bool { + self.hbbft_engine_cache.lock().is_staked() } fn start_hbbft_epoch_if_ready(&self) { @@ -1242,6 +1445,10 @@ impl Engine for HoneyBadgerBFT { &self.machine } + fn minimum_gas_price(&self) -> Option { + self.current_minimum_gas_price.lock().clone() + } + fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> ForkChoice { crate::engines::total_difficulty_fork_choice(new, current) } @@ -1285,7 +1492,9 @@ impl Engine for HoneyBadgerBFT { { Ok(()) } else { - error!(target: "engine", "Invalid seal for block #{}!", header.number()); + error!(target: "engine", "Invalid seal (Stage 3) for block #{}!", header.number()); + let trace = std::backtrace::Backtrace::capture(); + error!(target: "engine", "Invalid Seal Trace: #{trace:?}!"); Err(BlockError::InvalidSeal.into()) } } @@ -1297,12 +1506,36 @@ impl Engine for HoneyBadgerBFT { fn register_client(&self, client: Weak) { *self.client.write() = Some(client.clone()); + if let Some(client) = self.client_arc() { let mut state = self.hbbft_state.write(); + + // todo: better get the own ID from devP2P communication ?! + let own_public_key = match self.signer.read().as_ref() { + Some(signer) => signer + .public() + .expect("Signer's public key must be available!"), + None => Public::from(H512::from_low_u64_be(0)), + }; + + if let Some(latest_block) = client.block_number(BlockId::Latest) { + state.init_fork_manager( + NodeId(own_public_key), + latest_block, + self.params.forks.clone(), + ); + } else { + error!(target: "engine", "hbbft-hardfork : could not initialialize hardfork manager, no latest block found."); + } + + // RBC: we need to replay disk cached messages here. + // state.replay_cached_messages(client) + match state.update_honeybadger( client, &self.signer, - &self.peers_management, + &self.hbbft_peers_service, + &self.early_epoch_manager, &self.current_minimum_gas_price, BlockId::Latest, true, @@ -1312,7 +1545,7 @@ impl Engine for HoneyBadgerBFT { let epoch_start_block = state.get_current_posdao_epoch_start_block(); // we got all infos from the state, we can drop the lock. std::mem::drop(state); - warn!(target: "engine", "report new epoch: {} at block: {}", posdao_epoch, epoch_start_block); + info!(target: "engine", "report new epoch: {} at block: {}", posdao_epoch, epoch_start_block); self.hbbft_message_dispatcher .report_new_epoch(posdao_epoch, epoch_start_block); } @@ -1323,10 +1556,16 @@ impl Engine for HoneyBadgerBFT { fn set_signer(&self, signer: Option>) { if let Some(engine_signer) = signer.as_ref() { - // this is importamt, we really have to get that lock here. - self.peers_management - .lock() - .set_validator_address(engine_signer.address()); + let signer_address = engine_signer.address(); + info!(target: "engine", "set_signer: {:?}", signer_address); + if let Err(err) = self + .hbbft_peers_service + .send_message(HbbftConnectToPeersMessage::SetSignerAddress(signer_address)) + { + error!(target: "engine", "Error setting signer address in hbbft peers service: {:?}", err); + } + } else { + info!(target: "engine", "set_signer: signer is None, not setting signer address in hbbft peers service."); } *self.signer.write() = signer; @@ -1343,7 +1582,8 @@ impl Engine for HoneyBadgerBFT { if let None = self.hbbft_state.write().update_honeybadger( client, &self.signer, - &self.peers_management, + &self.hbbft_peers_service, + &self.early_epoch_manager, &self.current_minimum_gas_price, BlockId::Latest, true, @@ -1392,6 +1632,8 @@ impl Engine for HoneyBadgerBFT { fn handle_message(&self, message: &[u8], node_id: Option) -> Result<(), EngineError> { let node_id = NodeId(node_id.ok_or(EngineError::UnexpectedMessage)?); + // todo: handling here old message as well. + match rmp_serde::from_slice(message) { Ok(Message::HoneyBadger(msg_idx, hb_msg)) => { self.process_hb_message(msg_idx, hb_msg, node_id) @@ -1437,8 +1679,11 @@ impl Engine for HoneyBadgerBFT { false } - fn use_block_author(&self) -> bool { - false + fn use_block_author(&self) -> BlockAuthorOption { + if let Some(address) = self.params.block_reward_contract_address { + return BlockAuthorOption::EngineBlockAuthor(address); + } + return BlockAuthorOption::ConfiguredBlockAuthor; } fn on_before_transactions(&self, block: &mut ExecutedBlock) -> Result<(), Error> { @@ -1528,24 +1773,20 @@ impl Engine for HoneyBadgerBFT { // only if no block reward skips are defined for this block. let header_number = block.header.number(); - block.header.set_author(address); - if self .params .should_do_block_reward_contract_call(header_number) { let mut call = default_system_or_code_call(&self.machine, block); let mut latest_block_number: BlockNumber = 0; - let mut latest_block_timestamp: u64 = 0; if let Some(client) = self.client_arc() { if let Some(header) = client.block_header(BlockId::Latest) { latest_block_number = header.number(); - latest_block_timestamp = header.timestamp() } } // only do the key gen - let is_epoch_end = self.do_keygen(latest_block_timestamp); + let is_epoch_end = self.do_keygen(); trace!(target: "consensus", "calling reward function for block {} isEpochEnd? {} on address: {} (latest block: {}", header_number, is_epoch_end, address, latest_block_number); let contract = BlockRewardContract::new_from_address(address); @@ -1566,7 +1807,8 @@ impl Engine for HoneyBadgerBFT { match state.update_honeybadger( client.clone(), &self.signer, - &self.peers_management, + &self.hbbft_peers_service, + &self.early_epoch_manager, &self.current_minimum_gas_price, BlockId::Hash(block_hash.clone()), false, @@ -1594,27 +1836,64 @@ impl Engine for HoneyBadgerBFT { } } + /// hbbft protects the start of the current posdao epoch start from being pruned. + fn pruning_protection_block_number(&self) -> Option { + // we try to get a read lock for 500 ms. + // that is a very long duration, but the information is important. + if let Some(hbbft_state_lock) = self.hbbft_state.try_read_for(Duration::from_millis(500)) { + if let Some(last_epoch_start_block) = + hbbft_state_lock.get_last_posdao_epoch_start_block() + { + return Some(last_epoch_start_block); + } + return Some(hbbft_state_lock.get_current_posdao_epoch_start_block()); + } else { + // better a potential stage 3 verification error instead of a deadlock ?! + // https://github.com/DMDcoin/diamond-node/issues/68 + warn!(target: "engine", "could not aquire read lock for retrieving the pruning_protection_block_number. Stage 3 verification error might follow up."); + return None; + } + } + + // note: this is by design not part of the PrometheusMetrics trait, + // it is part of the Engine trait and does nothing by default. fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) { + let is_staked = self.is_staked(); + + registry.register_gauge( + "hbbft_is_staked", + "Is the signer of the hbbft engine staked.", + is_staked as i64, + ); + self.hbbft_message_dispatcher.prometheus_metrics(registry); + if let Some(early_epoch_manager_option) = self + .early_epoch_manager + .try_lock_for(Duration::from_millis(250)) + { + if let Some(early_epoch_manager) = early_epoch_manager_option.as_ref() { + early_epoch_manager.prometheus_metrics(registry); + } + } } } #[cfg(test)] mod tests { use super::super::{contribution::Contribution, test::create_transactions::create_transaction}; + use crate::types::transaction::SignedTransaction; use crypto::publickey::{Generator, Random}; use ethereum_types::U256; use hbbft::{ - honey_badger::{HoneyBadger, HoneyBadgerBuilder}, NetworkInfo, + honey_badger::{HoneyBadger, HoneyBadgerBuilder}, }; - use rand_065; + use rand; use std::sync::Arc; - use types::transaction::SignedTransaction; #[test] fn test_single_contribution() { - let mut rng = rand_065::thread_rng(); + let mut rng = rand::thread_rng(); let net_infos = NetworkInfo::generate_map(0..1usize, &mut rng) .expect("NetworkInfo generation is expected to always succeed"); @@ -1624,6 +1903,7 @@ mod tests { let mut builder: HoneyBadgerBuilder = HoneyBadger::builder(Arc::new(net_info.clone())); + builder.max_future_epochs(0); let mut honey_badger = builder.build(); diff --git a/crates/ethcore/src/engines/hbbft/hbbft_engine_cache.rs b/crates/ethcore/src/engines/hbbft/hbbft_engine_cache.rs new file mode 100644 index 0000000000..c8e2bf1f04 --- /dev/null +++ b/crates/ethcore/src/engines/hbbft/hbbft_engine_cache.rs @@ -0,0 +1,143 @@ +use crate::{client::EngineClient, error::Error}; +use ethereum_types::Address; +use parking_lot::Mutex; + +#[derive(Debug, Clone)] +pub struct HbbftEngineCacheData { + pub signer_address: Address, + + pub is_staked: bool, + + pub is_available: bool, +} + +impl HbbftEngineCacheData { + pub fn new() -> Self { + HbbftEngineCacheData { + signer_address: Address::zero(), + is_staked: false, + is_available: false, + } + } +} + +pub struct HbbftEngineCache { + data: Mutex, +} + +impl HbbftEngineCache { + pub fn new() -> Self { + HbbftEngineCache { + data: Mutex::new(HbbftEngineCacheData::new()), + } + } + + pub fn is_staked(&self) -> bool { + self.data.lock().is_staked + } + + // pub fn signer_address(&self) -> Address { + // // this is dead code for now, but for further optimization we will use it in the future, + // self.data.lock().signer_address + // } + + pub fn is_available(&self) -> bool { + self.data.lock().is_available + } + + /// Refresh the cache values. + pub fn refresh_cache( + &mut self, + signer_address: Address, + engine_client: &dyn EngineClient, + ) -> Result<(), Error> { + //self.is_staked = false; + + let mut new_data = HbbftEngineCacheData::new(); + new_data.signer_address = signer_address; + let is_available = self.calc_is_available(signer_address, engine_client)?; + new_data.is_available = is_available; + new_data.is_staked = self.calc_is_staked(signer_address, engine_client)?; + + self.data.lock().clone_from(&new_data); + + return Ok(()); + } + + fn calc_is_available( + &mut self, + signer_address: Address, + engine_client: &dyn EngineClient, + ) -> Result { + let engine_client = engine_client; + + if signer_address.is_zero() { + // debug!(target: "consensus", "is_available: not available because mining address is zero: "); + return Ok(false); + } + + match super::contracts::validator_set::get_validator_available_since( + engine_client, + &signer_address, + ) { + Ok(available_since) => { + trace!(target: "consensus", "available_since: {}", available_since); + return Ok(!available_since.is_zero()); + } + Err(err) => { + warn!(target: "consensus", "Error get get_validator_available_since: ! {:?}", err); + } + } + return Ok(false); + } + + /// refreshes cache, if node is staked. + fn calc_is_staked( + &self, + mining_address: Address, + engine_client: &dyn EngineClient, + ) -> Result { + // is the configured validator stacked ?? + match super::contracts::validator_set::staking_by_mining_address( + engine_client, + &mining_address, + ) { + Ok(staking_address) => { + // if there is no pool for this validator defined, we know that + if staking_address.is_zero() { + return Ok(false); + } + match super::contracts::staking::stake_amount( + engine_client, + &staking_address, + &staking_address, + ) { + Ok(stake_amount) => { + trace!(target: "consensus", "stake_amount: {}", stake_amount); + + // we need to check if the pool stake amount is >= minimum stake + match super::contracts::staking::candidate_min_stake(engine_client) { + Ok(min_stake) => { + trace!(target: "consensus", "min_stake: {}", min_stake); + return Ok(stake_amount.ge(&min_stake)); + } + Err(err) => { + error!(target: "consensus", "Error get candidate_min_stake: ! {:?}", err); + return Ok(false); + //return Err(err.into()); + } + } + } + Err(err) => { + warn!(target: "consensus", "Error get stake_amount: ! {:?}", err); + return Ok(false); + } + } + } + Err(err) => { + warn!(target: "consensus", "Error get staking_by_mining_address: ! {:?}", err); + return Ok(false); + } + } + } +} diff --git a/crates/ethcore/src/engines/hbbft/hbbft_message_memorium.rs b/crates/ethcore/src/engines/hbbft/hbbft_message_memorium.rs index e6930c8743..235b13a480 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_message_memorium.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_message_memorium.rs @@ -1,18 +1,24 @@ +// Temporarily deactivated warnings, remove after implementation is complete. +#![allow(warnings)] + use bytes::ToPretty; //use hbbft::honey_badger::{self, MessageContent}; use hbbft::honey_badger::{self}; use parking_lot::RwLock; use stats::PrometheusMetrics; -use std::{collections::VecDeque, time::Duration}; +use std::{ + collections::VecDeque, + time::{Duration, Instant}, +}; // use threshold_crypto::{SignatureShare}; -use engines::hbbft::{sealing, NodeId}; +use crate::engines::hbbft::{NodeId, sealing}; // use hbbft::honey_badger::Message; // use serde::{Deserialize, Serialize}; // use serde_json::{json, Result, Value}; use std::{ - fs::{self, create_dir_all, File}, + fs::{self, File, create_dir_all}, io::Write, path::PathBuf, }; @@ -43,15 +49,21 @@ pub(crate) enum SealMessageState { pub(crate) struct NodeStakingEpochHistory { node_id: NodeId, last_good_sealing_message: u64, + last_good_sealing_message_time: Instant, last_late_sealing_message: u64, + last_late_sealing_message_time: Instant, last_error_sealing_message: u64, + last_error_sealing_message_time: Instant, + // summed up lateness of all seals, including bad seals. cumulative_lateness: u64, + sealing_blocks_good: Vec, sealing_blocks_late: Vec, sealing_blocks_bad: Vec, // messages. last_message_faulty: u64, last_message_good: u64, + last_message_good_time: Instant, num_faulty_messages: u64, num_good_messages: u64, // total_contributions_good: u64, @@ -60,64 +72,127 @@ pub(crate) struct NodeStakingEpochHistory { impl NodeStakingEpochHistory { pub fn new(node_id: NodeId) -> Self { + let now = Instant::now(); NodeStakingEpochHistory { node_id, last_good_sealing_message: 0, + last_good_sealing_message_time: now, last_late_sealing_message: 0, + last_late_sealing_message_time: now, last_error_sealing_message: 0, + last_error_sealing_message_time: now, cumulative_lateness: 0, sealing_blocks_good: Vec::new(), sealing_blocks_late: Vec::new(), sealing_blocks_bad: Vec::new(), last_message_faulty: 0, last_message_good: 0, + last_message_good_time: now, num_faulty_messages: 0, num_good_messages: 0, } } - /// mut ADD_... + /// calculates the cumulative lateness for that communication partner, + /// based on existing data, detecting blocks with missing late or good seals + fn calc_cumulative_lateness_gap( + &self, + block_num: u64, + staking_epoch_start_block_num: u64, + ) -> u64 { + if block_num <= 1 { + return 0; + } + + // add cumulative lateness, for all blocks between the last tracked block + // and the current block. + if self.last_late_sealing_message + 1 < block_num + || self.last_good_sealing_message + 1 < block_num + || self.last_error_sealing_message + 1 < block_num + { + let latest_message = u64::max( + u64::max( + u64::max( + self.last_late_sealing_message, + staking_epoch_start_block_num, + ), + self.last_good_sealing_message, + ), + self.last_error_sealing_message, + ); + + if (latest_message + 1) > block_num { + return 0; + } + + let difference = block_num - 1 - latest_message; + + return (difference * (difference + 1)) / 2; + } + return 0; + } /// protocols a good seal event. - pub fn add_good_seal_event(&mut self, event: &SealEventGood) { + pub fn add_good_seal_event( + &mut self, + event: &SealEventGood, + staking_epoch_start_block_num: u64, + ) { // by definition a "good sealing" is always on the latest block. let block_num = event.block_num; let last_good_sealing_message = self.last_good_sealing_message; - if block_num > last_good_sealing_message { - self.last_good_sealing_message = event.block_num; - } else { + if block_num < last_good_sealing_message { warn!(target: "hbbft_message_memorium", "add_good_seal_event: event.block_num {block_num} <= self.last_good_sealing_message {last_good_sealing_message}"); + return; } + + self.cumulative_lateness += + self.calc_cumulative_lateness_gap(event.block_num, staking_epoch_start_block_num); + self.last_good_sealing_message = event.block_num; + self.last_good_sealing_message_time = Instant::now(); self.sealing_blocks_good.push(event.block_num); } /// protocols a good seal event. - pub fn add_seal_event_late(&mut self, event: &SealEventLate) { + pub fn add_seal_event_late(&mut self, event: &SealEventLate, staking_epoch_start_block: u64) { // by definition a "good sealing" is always on the latest block. let block_num = event.block_num; - let last_late_sealing_message = self.last_late_sealing_message; - if block_num > last_late_sealing_message { - self.last_late_sealing_message = event.block_num; - } else { - warn!(target: "hbbft_message_memorium", "add_late_seal_event: event.block_num {block_num} <= self.last_late_sealing_message {last_late_sealing_message}"); + if block_num < self.last_late_sealing_message { + warn!(target: "hbbft_message_memorium", "out of order seal events: add_late_seal_event: event.block_num {block_num} <= self.last_late_sealing_message {}", self.last_late_sealing_message); + return; } + + self.cumulative_lateness += + self.calc_cumulative_lateness_gap(event.block_num, staking_epoch_start_block); + + self.last_late_sealing_message = event.block_num; + self.last_late_sealing_message_time = Instant::now(); self.cumulative_lateness += event.get_lateness(); self.sealing_blocks_late.push(event.block_num); } - pub(crate) fn add_bad_seal_event(&mut self, event: &SealEventBad) { + pub(crate) fn add_bad_seal_event( + &mut self, + event: &SealEventBad, + staking_epoch_start_block_num: u64, + ) { // by definition a "good sealing" is always on the latest block. let block_num = event.block_num; let last_bad_sealing_message = self.last_error_sealing_message; - if block_num > last_bad_sealing_message { - self.last_good_sealing_message = event.block_num; - } else { + if block_num < last_bad_sealing_message { warn!(target: "hbbft_message_memorium", "add_bad_seal_event: event.block_num {block_num} <= self.last_bad_sealing_message {last_bad_sealing_message}"); + return; } + + self.cumulative_lateness += + self.calc_cumulative_lateness_gap(block_num, staking_epoch_start_block_num); + self.cumulative_lateness += 1; + self.last_error_sealing_message = event.block_num; + self.last_error_sealing_message_time = Instant::now(); self.sealing_blocks_good.push(event.block_num); } @@ -128,9 +203,10 @@ impl NodeStakingEpochHistory { if block_num > last_message_faulty { self.last_message_faulty = block_num; - } else { - warn!(target: "hbbft_message_memorium", "add_message_event_faulty: event.block_num {block_num} <= last_message_faulty {last_message_faulty}"); - } + } // else { + // this log entry is trigering often, probably there are more than 1 good messages able per block.// this log entry is trigering often, probably there are more than 1 good messages able per block. + // warn!(target: "hbbft_message_memorium", "add_message_event_faulty: event.block_num {block_num} <= last_message_faulty {last_message_faulty}"); + // } self.num_faulty_messages += 1; } @@ -140,11 +216,13 @@ impl NodeStakingEpochHistory { let last_message_good = self.last_message_good; if block_num > last_message_good { - self.last_message_faulty = block_num; - } else { - warn!(target: "hbbft_message_memorium", "add_message_event_good: event.block_num {block_num} <= last_message_faulty {last_message_good}"); - } + self.last_message_good = block_num; + } // else { + // this log entry is trigering often, probably there are more than 1 good messages able per block. + // warn!(target: "hbbft_message_memorium", "add_message_event_good: ! event.block_num {block_num} > last_message_good {last_message_good}"); + // } self.num_good_messages += 1; + self.last_message_good_time = Instant::now(); } /// GETTERS @@ -167,6 +245,28 @@ impl NodeStakingEpochHistory { + self.get_total_error_sealing_messages() } + pub fn get_last_sealing_message(&self) -> u64 { + u64::max( + self.last_late_sealing_message, + self.last_good_sealing_message, + ) + } + + pub fn get_last_sealing_message_time(&self) -> Instant { + Instant::max( + self.last_late_sealing_message_time, + self.last_good_sealing_message_time, + ) + } + + pub fn get_last_good_message_time(&self) -> Instant { + self.last_message_good_time + } + + pub fn get_last_late_sealing_message(&self) -> u64 { + self.last_late_sealing_message + } + pub fn get_last_good_sealing_message(&self) -> u64 { self.last_good_sealing_message } @@ -198,7 +298,130 @@ impl NodeStakingEpochHistory { // faulty messages let last_message_faulty = self.last_message_faulty; - return format!("{staking_epoch},{node_id},{total_sealing_messages},{total_good_sealing_messages},{total_late_sealing_messages},{total_error_sealing_messages},{last_good_sealing_message},{last_late_sealing_message},{last_error_sealing_message},{cumulative_lateness},{total_good_messages},{total_faulty_messages},{last_message_good},{last_message_faulty}\n"); + return format!( + "{staking_epoch},{node_id},{total_sealing_messages},{total_good_sealing_messages},{total_late_sealing_messages},{total_error_sealing_messages},{last_good_sealing_message},{last_late_sealing_message},{last_error_sealing_message},{cumulative_lateness},{total_good_messages},{total_faulty_messages},{last_message_good},{last_message_faulty}\n" + ); + } + + // prometheus metrics + + fn prometheus_metrics( + &self, + r: &mut stats::PrometheusRegistry, + known_highest_block: u64, + epoch_start_block: u64, + ) { + // one problem that occurs here is that we have a dynamic name of the gauges. + // that could lead to troubles later in the UI, because we would have to adapt the UI to the dynamic names. + // a solution could be to give every node a number from 0 to n (n=25 for DMD), and supply the name as a text value, + // so we still can figure out the node id, but the name of the gauge keeps static. + + //let metric: Metric = Metric::new(); + //r.registry().register(c) + + //let node_id = self.get_node_id().0 .0; + + let other_node = self.get_node_id().as_8_byte_string(); + + //r.register_gauge_with_label(name, help, label, value) + r.register_gauge_with_other_node_label( + "cumulative_lateness_raw", + "cumulative lateness, raw value without lateness from missing seals", + other_node.as_str(), + self.cumulative_lateness as i64, + ); + + // if the node has not send an sealing message, + // it's cumulative lateness is not tracked. + + // we begin counting from the first block of the epoch. + let last_good_sealing_message = u64::max(self.last_good_sealing_message, epoch_start_block); + + let non_tracked_cumulative_lateness = + self.calc_cumulative_lateness_gap(known_highest_block, epoch_start_block); + + r.register_gauge_with_other_node_label( + "cumulative_lateness", + "cumulative lateness, including missing seals from that node.", + other_node.as_str(), + (self.cumulative_lateness + non_tracked_cumulative_lateness) as i64, + ); + + r.register_gauge_with_other_node_label( + "sealing_blocks_good", + "good sealed block messages", + other_node.as_str(), + self.sealing_blocks_good.len() as i64, + ); + + r.register_gauge_with_other_node_label( + "sealing_blocks_late", + "late sealed blocks", + other_node.as_str(), + self.sealing_blocks_late.len() as i64, + ); + + r.register_gauge_with_other_node_label( + "sealing_blocks_bad", + "bad block seals", + other_node.as_str(), + self.sealing_blocks_bad.len() as i64, + ); + + // last_good_sealing_message: u64, + // last_late_sealing_message: u64, + // last_error_sealing_message: u64, + + if self.last_good_sealing_message > 0 { + r.register_gauge_with_other_node_label( + "last_good_sealing_message", + "block number", + other_node.as_str(), + self.last_good_sealing_message as i64, + ); + } + + if self.last_late_sealing_message > 0 { + r.register_gauge_with_other_node_label( + "last_late_sealing_message", + "block number", + other_node.as_str(), + self.last_late_sealing_message as i64, + ); + } + + if self.last_error_sealing_message > 0 { + r.register_gauge_with_other_node_label( + "last_error_sealing_message", + "block number", + other_node.as_str(), + self.last_error_sealing_message as i64, + ); + } + + // last_message_faulty: u64, + // last_message_good: u64, + + // num_faulty_messages: u64, + // num_good_messages: u64, + + if self.last_message_good > 0 { + r.register_gauge_with_other_node_label( + "last_message_good", + "block number", + other_node.as_str(), + self.last_message_good as i64, + ); + } + + if self.last_message_faulty > 0 { + r.register_gauge_with_other_node_label( + "last_message_faulty", + "block number", + other_node.as_str(), + self.last_message_faulty as i64, + ); + } } } @@ -209,6 +432,10 @@ pub(crate) struct StakingEpochHistory { staking_epoch_start_block: u64, staking_epoch_end_block: u64, + /// highest block number that was processed for this epoch. + /// used to calculate the real lateness of Nodes. + highest_block_num: u64, + // stored the node staking epoch history. // since 25 is the exected maximum, a Vec has about the same perforamnce than a HashMap. node_staking_epoch_histories: Vec, @@ -226,12 +453,29 @@ impl StakingEpochHistory { staking_epoch, staking_epoch_start_block, staking_epoch_end_block, + highest_block_num: staking_epoch_start_block, node_staking_epoch_histories: Vec::new(), exported: false, } } - fn get_history_for_node(&mut self, node_id: &NodeId) -> &mut NodeStakingEpochHistory { + pub fn get_history_for_node(&self, node_id: &NodeId) -> Option<&NodeStakingEpochHistory> { + let index_result = self + .node_staking_epoch_histories + .iter() + .position(|x| &x.get_node_id() == node_id); + + match index_result { + Some(index) => { + return Some(&self.node_staking_epoch_histories[index]); + } + None => { + return None; + } + }; + } + + pub fn ensure_history_for_node(&mut self, node_id: &NodeId) -> &mut NodeStakingEpochHistory { let index_result = self .node_staking_epoch_histories .iter_mut() @@ -250,31 +494,37 @@ impl StakingEpochHistory { } pub fn on_seal_good(&mut self, event: &SealEventGood) { - let node_staking_epoch_history = self.get_history_for_node(&event.node_id); - node_staking_epoch_history.add_good_seal_event(event); + let staking_epoch_start_block = self.staking_epoch_start_block; + if event.block_num > self.highest_block_num { + self.highest_block_num = event.block_num; + } + let node_staking_epoch_history = self.ensure_history_for_node(&event.node_id); + node_staking_epoch_history.add_good_seal_event(event, staking_epoch_start_block); self.exported = false; } pub fn on_seal_late(&mut self, event: &SealEventLate) { - let node_staking_epoch_history = self.get_history_for_node(&event.node_id); - node_staking_epoch_history.add_seal_event_late(event); + let staking_epoch_start_block = self.staking_epoch_start_block; + let node_staking_epoch_history = self.ensure_history_for_node(&event.node_id); + node_staking_epoch_history.add_seal_event_late(event, staking_epoch_start_block); self.exported = false; } pub fn on_seal_bad(&mut self, event: &SealEventBad) { - let node_staking_epoch_history = self.get_history_for_node(&event.node_id); - node_staking_epoch_history.add_bad_seal_event(event); + let staking_epoch_start_block = self.staking_epoch_start_block; + let node_staking_epoch_history = self.ensure_history_for_node(&event.node_id); + node_staking_epoch_history.add_bad_seal_event(event, staking_epoch_start_block); self.exported = false; } pub fn on_message_faulty(&mut self, event: &MessageEventFaulty) { - let node_staking_epoch_history = self.get_history_for_node(&event.node_id); + let node_staking_epoch_history = self.ensure_history_for_node(&event.node_id); node_staking_epoch_history.add_message_event_faulty(event); self.exported = false; } pub fn on_message_good(&mut self, event: &MessageEventGood) { - let node_staking_epoch_history = self.get_history_for_node(&event.node_id); + let node_staking_epoch_history = self.ensure_history_for_node(&event.node_id); node_staking_epoch_history.add_message_event_good(event); self.exported = false; } @@ -320,7 +570,7 @@ pub struct SealEventLate { impl SealEventLate { // get's the block lateness in blocks. pub fn get_lateness(&self) -> u64 { - self.received_block_num - self.block_num + (self.received_block_num - self.block_num) + 1 } } @@ -395,13 +645,15 @@ impl HbbftMessageDispatcher { let builder = std::thread::Builder::new().name("MessageMemorial".to_string()); - match builder.spawn(move || loop { - // one loop cycle is very fast. - // so report_ function have their chance to aquire a write lock soon. - // and don't block the work thread for too long. - let work_result = arc_clone.write().work_message(); - if !work_result { - std::thread::sleep(std::time::Duration::from_millis(5000)); + match builder.spawn(move || { + loop { + // one loop cycle is very fast. + // so report_ function have their chance to aquire a write lock soon. + // and don't block the work thread for too long. + let work_result = arc_clone.write().work_message(); + if !work_result { + std::thread::sleep(std::time::Duration::from_millis(5000)); + } } }) { Ok(thread) => { @@ -494,6 +746,10 @@ impl HbbftMessageDispatcher { .write() .report_new_epoch(staking_epoch, staking_epoch_start_block); } + + pub fn get_memorium(&self) -> &std::sync::Arc> { + return &self.memorial; + } } pub(crate) struct HbbftMessageMemorium { @@ -648,7 +904,7 @@ impl HbbftMessageMemorium { fn on_seal_good(&mut self, seal: &SealEventGood) -> bool { debug!(target: "hbbft_message_memorium", "working on good seal!: {:?}", seal); let block_num = seal.block_num; - if let Some(epoch_history) = self.get_staking_epoch_history(block_num) { + if let Some(epoch_history) = self.get_staking_epoch_history_mut(block_num) { epoch_history.on_seal_good(seal); return true; } else { @@ -675,7 +931,7 @@ impl HbbftMessageMemorium { fn on_seal_late(&mut self, seal: &SealEventLate) -> bool { debug!(target: "hbbft_message_memorium", "working on good seal!: {:?}", seal); let block_num = seal.block_num; - if let Some(epoch_history) = self.get_staking_epoch_history(block_num) { + if let Some(epoch_history) = self.get_staking_epoch_history_mut(block_num) { epoch_history.on_seal_late(seal); return true; } else { @@ -686,7 +942,7 @@ impl HbbftMessageMemorium { fn on_seal_bad(&mut self, seal: &SealEventBad) -> bool { debug!(target: "hbbft_message_memorium", "working on good seal!: {:?}", seal); let block_num = seal.block_num; - if let Some(epoch_history) = self.get_staking_epoch_history(block_num) { + if let Some(epoch_history) = self.get_staking_epoch_history_mut(block_num) { epoch_history.on_seal_bad(seal); return true; } else { @@ -697,7 +953,7 @@ impl HbbftMessageMemorium { fn on_message_faulty(&mut self, event: &MessageEventFaulty) -> bool { debug!(target: "hbbft_message_memorium", "working on faulty message event!: {:?}", event); let block_num = event.block_num; - if let Some(epoch_history) = self.get_staking_epoch_history(block_num) { + if let Some(epoch_history) = self.get_staking_epoch_history_mut(block_num) { epoch_history.on_message_faulty(event); return true; } else { @@ -707,7 +963,7 @@ impl HbbftMessageMemorium { fn on_message_good(&mut self, event: &MessageEventGood) -> bool { debug!(target: "hbbft_message_memorium", "working on good message event!: {:?}", event); - if let Some(epoch_history) = self.get_staking_epoch_history(event.block_num) { + if let Some(epoch_history) = self.get_staking_epoch_history_mut(event.block_num) { epoch_history.on_message_good(event); return true; } else { @@ -715,9 +971,20 @@ impl HbbftMessageMemorium { } } + pub fn get_validator_data( + &self, + block_num: u64, + node_id: &NodeId, + ) -> Option<&Vec> { + if let Some(epoch_history) = self.get_staking_epoch_history(block_num) { + return Some(&epoch_history.node_staking_epoch_histories); + } + None + } + // report that hbbft has switched to a new staking epoch pub fn report_new_epoch(&mut self, staking_epoch: u64, staking_epoch_start_block: u64) { - warn!(target: "hbbft_message_memorium", "report new epoch: {}", staking_epoch); + debug!(target: "hbbft_message_memorium", "report new epoch: {}", staking_epoch); self.latest_epoch = staking_epoch; self.latest_epoch_start_block = staking_epoch_start_block; if let Ok(epoch_history_index) = self @@ -752,18 +1019,35 @@ impl HbbftMessageMemorium { } } - fn get_staking_epoch_history(&mut self, block_num: u64) -> Option<&mut StakingEpochHistory> { - { - //let histories = &mut self.staking_epoch_history; - - // self.staking_epoch_history.get_mut(index) - for i in 0..self.staking_epoch_history.len() { - let e = &self.staking_epoch_history[i]; - if block_num >= e.staking_epoch_start_block - && (e.staking_epoch_end_block == 0 || block_num <= e.staking_epoch_end_block) - { - return Some(&mut self.staking_epoch_history[i]); - } + pub fn get_staking_epoch_history(&self, block_num: u64) -> Option<&StakingEpochHistory> { + //let histories = &mut self.staking_epoch_history; + + // self.staking_epoch_history.get_mut(index) + for i in 0..self.staking_epoch_history.len() { + let e = &self.staking_epoch_history[i]; + if block_num >= e.staking_epoch_start_block + && (e.staking_epoch_end_block == 0 || block_num <= e.staking_epoch_end_block) + { + return Some(&self.staking_epoch_history[i]); + } + } + + None + } + + fn get_staking_epoch_history_mut( + &mut self, + block_num: u64, + ) -> Option<&mut StakingEpochHistory> { + //let histories = &mut self.staking_epoch_history; + + // self.staking_epoch_history.get_mut(index) + for i in 0..self.staking_epoch_history.len() { + let e = &self.staking_epoch_history[i]; + if block_num >= e.staking_epoch_start_block + && (e.staking_epoch_end_block == 0 || block_num <= e.staking_epoch_end_block) + { + return Some(&mut self.staking_epoch_history[i]); } } @@ -836,7 +1120,6 @@ impl HbbftMessageMemorium { } // good seals - if let Some(good_seal) = self.dispatched_seal_event_good.front() { // rust borrow system forced me into this useless clone... debug!(target: "hbbft_message_memorium", "work: good Seal!"); @@ -849,7 +1132,6 @@ impl HbbftMessageMemorium { } // late seals - if let Some(late_seal) = self.dispatched_seal_event_late.front() { // rust borrow system forced me into this useless clone... if self.on_seal_late(&late_seal.clone()) { @@ -971,93 +1253,6 @@ impl PrometheusMetrics for HbbftMessageDispatcher { } } -impl PrometheusMetrics for NodeStakingEpochHistory { - fn prometheus_metrics(&self, r: &mut stats::PrometheusRegistry) { - // one problem that occurs here is that we have a dynamic name of the gauges. - // that could lead to troubles later in the UI, because we would have to adapt the UI to the dynamic names. - // a solution could be to give every node a number from 0 to n (n=25 for DMD), and supply the name as a text value, - // so we still can figure out the node id, but the name of the gauge keeps static. - - //let metric: Metric = Metric::new(); - //r.registry().register(c) - - let label = self.get_node_id().0.to_hex(); - //r.register_gauge_with_label(name, help, label, value) - r.register_gauge_with_label( - format!("cumulative_lateness").as_str(), - format!("cumulative lateness").as_str(), - label.as_str(), - self.cumulative_lateness as i64, - ); - - r.register_gauge_with_label( - format!("sealing_blocks_good").as_str(), - format!("good sealed block messages").as_str(), - label.as_str(), - self.sealing_blocks_good.len() as i64, - ); - - r.register_gauge_with_label( - format!("sealing_blocks_late").as_str(), - format!("late sealed blocks").as_str(), - label.as_str(), - self.sealing_blocks_late.len() as i64, - ); - - r.register_gauge_with_label( - format!("sealing_blocks_bad").as_str(), - format!("bad block seals").as_str(), - label.as_str(), - self.sealing_blocks_bad.len() as i64, - ); - - // last_good_sealing_message: u64, - // last_late_sealing_message: u64, - // last_error_sealing_message: u64, - - r.register_gauge_with_label( - format!("last_good_sealing_message").as_str(), - format!("block number").as_str(), - label.as_str(), - self.last_good_sealing_message as i64, - ); - - r.register_gauge_with_label( - format!("last_late_sealing_message").as_str(), - format!("block number").as_str(), - label.as_str(), - self.last_late_sealing_message as i64, - ); - - r.register_gauge_with_label( - format!("last_error_sealing_message").as_str(), - format!("block number").as_str(), - label.as_str(), - self.last_error_sealing_message as i64, - ); - - // last_message_faulty: u64, - // last_message_good: u64, - - // num_faulty_messages: u64, - // num_good_messages: u64, - - r.register_gauge_with_label( - format!("last_message_faulty").as_str(), - format!("block number").as_str(), - label.as_str(), - self.last_message_faulty as i64, - ); - - r.register_gauge_with_label( - format!("last_message_good").as_str(), - format!("block number").as_str(), - label.as_str(), - self.last_message_faulty as i64, - ); - } -} - impl PrometheusMetrics for StakingEpochHistory { fn prometheus_metrics(&self, r: &mut stats::PrometheusRegistry) { r.register_gauge( @@ -1072,7 +1267,11 @@ impl PrometheusMetrics for StakingEpochHistory { ); for epoch_history in self.node_staking_epoch_histories.iter() { - epoch_history.prometheus_metrics(r); + epoch_history.prometheus_metrics( + r, + self.highest_block_num, + self.staking_epoch_start_block, + ); } } } @@ -1081,6 +1280,17 @@ impl PrometheusMetrics for HbbftMessageMemorium { fn prometheus_metrics(&self, r: &mut stats::PrometheusRegistry) { //let epoch_history_len = self.staking_epoch_history.len() as i64; + // r.register_gauge( + // "HbbftMessageMemorium_dispatched_message_event_faulty", + // "dispatched_message_event_faulty", + // self.dispatched_message_event_faulty.len() as i64, + // ); + // r.register_gauge( + // "HbbftMessageMemorium_dispatched_message_event_good", + // "dispatched_message_event_good", + // self.dispatched_message_event_good.len() as i64, + // ); + if let Some(history) = self.staking_epoch_history.iter().last() { history.prometheus_metrics(r); } @@ -1088,4 +1298,191 @@ impl PrometheusMetrics for HbbftMessageMemorium { } #[cfg(test)] -mod tests {} +mod tests { + use crate::engines::hbbft::{NodeId, hbbft_message_memorium::BadSealReason}; + + use super::{HbbftMessageMemorium, MessageEventGood, SealEventBad}; + + use crypto::publickey::{Generator, Random}; + use ethereum_types::Public; + + #[test] + fn test_message_memorium() { + use super::SealEventGood; + let mut memorium = HbbftMessageMemorium::new(0, "".to_string(), "".to_string()); + memorium.report_new_epoch(1, 100); + + let node1 = NodeId(Public::random()); + + // we need a second node, that sends good seals every block. + let node2 = NodeId(Public::random()); + + //memorium.on_seal_good(SealEventGood { }); + + memorium.on_seal_good(&SealEventGood { + node_id: node1.clone(), + block_num: 101, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 101, + }); + + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0] + .last_good_sealing_message, + 101 + ); + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0].cumulative_lateness, + 0 + ); + + // if we do skip block 101 with node1, and do not send a message at all - a late block should be tracked. + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 102, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 103, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node1.clone(), + block_num: 103, + }); + + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0].cumulative_lateness, + 1 + ); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 104, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node1.clone(), + block_num: 104, + }); + + // node was on time, so cumulative_lateness should be still one. + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0].cumulative_lateness, + 1 + ); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 105, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 106, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 107, + }); + + // node 1 was missing 3 blocks now. + // the cumulative lateness should sum up as follows: + // 1 - base value + // 1 - block 107 + // 2 - block 106 + // 3 - block 105 + // ------ + // 7 - total + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 108, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node1.clone(), + block_num: 108, + }); + + // node was on time, so cumulative_lateness should be still one. + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0].cumulative_lateness, + 7 + ); + + // test the bad message seals. + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 109, + }); + + memorium.on_seal_bad(&SealEventBad { + node_id: node1.clone(), + block_num: 109, + reason: BadSealReason::MismatchedNetworkInfo, + }); + + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0].cumulative_lateness, + 8 + ); + + // check if sealing message gaps are calculated the correct way with Bad Sealing Messages as well. + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 110, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 111, + }); + + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 112, + }); + + // we will receive a bad Seal for block 113 + memorium.on_seal_good(&SealEventGood { + node_id: node2.clone(), + block_num: 113, + }); + + memorium.on_seal_bad(&SealEventBad { + node_id: node1.clone(), + block_num: 113, + reason: BadSealReason::MismatchedNetworkInfo, + }); + + // node 1 was missing 3 blocks now, and has written 1 bad block. + // the cumulative lateness should sum up as follows: + // 8 - base value + // 1 - block 113 (bad) + // 1 - block 112 (missed) + // 2 - block 111 (missed) + // 3 - block 110 (missed) + // 0 - block 109 (bad - already counted.) + // ------ + // 15 - total + + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[0].cumulative_lateness, + 15 + ); + + // since node2 was our reference node, that always created blocks, it's cumulative lateness should be 0 + + assert_eq!( + memorium.staking_epoch_history[0].node_staking_epoch_histories[1].cumulative_lateness, + 0 + ); + } +} diff --git a/crates/ethcore/src/engines/hbbft/hbbft_network_fork_manager.rs b/crates/ethcore/src/engines/hbbft/hbbft_network_fork_manager.rs new file mode 100644 index 0000000000..cd52a379db --- /dev/null +++ b/crates/ethcore/src/engines/hbbft/hbbft_network_fork_manager.rs @@ -0,0 +1,345 @@ +use std::{ + collections::{BTreeMap, VecDeque}, + sync::Arc, +}; + +use ethereum_types::H512; +use ethjson::spec::hbbft::HbbftNetworkFork; +use hbbft::{ + NetworkInfo, + sync_key_gen::{Ack, AckOutcome, Part, PartOutcome, SyncKeyGen}, + util::max_faulty, +}; +use parking_lot::RwLock; + +use crate::engines::{ + EngineSigner, + hbbft::contracts::keygen_history::{KeyPairWrapper, PublicWrapper}, +}; + +use super::NodeId; + +#[derive(Debug)] +struct HbbftFork { + // start_timestamp: u64, + start_block: u64, + + // start epoch is set, if the fork has been started. + start_epoch: Option, + + // end_block is set when the fork process is finished and the network operation has normaliced again. + end_block: Option, + + validators: Vec, + parts: Vec, + acks: Vec>, +} + +impl HbbftFork { + pub fn from_definition(fork_definiton: &HbbftNetworkFork) -> HbbftFork { + let parts = fork_definiton.parts.iter().map(|p| { + if let Ok(part) = bincode::deserialize( p.as_slice()) { + part + } else { + error!(target:"engine", "hbbft-hardfork: could not interprete part from spec: {:?}", p.as_slice()); + panic!("hbbft-hardfork: could not interprete part from spec: {:?}", p.as_slice()); + } + }).collect(); + + let acks = fork_definiton.acks.iter().map(|acks| { + let mut fork_acks: Vec = Vec::new(); + + for ack_bytes in acks { + if let Ok(ack) = bincode::deserialize( ack_bytes.as_slice()) { + fork_acks.push(ack); + } else { + error!(target:"engine", "hbbft-hardfork: could not interprete acks from spec: {:?}", ack_bytes.as_slice()); + panic!("hbbft-hardfork: could not interprete acks from spec: {:?}", ack_bytes.as_slice()); + } + } + fork_acks + }).collect(); + + let node_ids = fork_definiton + .validators + .iter() + .map(|h| NodeId(H512::from_slice(h.as_slice()))) + .collect(); + + HbbftFork { + start_block: fork_definiton.block_number_start, + start_epoch: None, + end_block: fork_definiton.block_number_end, + validators: node_ids, + parts, + acks, + } + } +} + +/// Hbbft network fork manager. +/// This manager is responsible for managing the forks. +/// It allows cheap queries to see if a Fork is pending, +/// and stores information about a fork that is finished. +pub struct HbbftNetworkForkManager { + /// a ordered list with upcoming forks. + finished_forks: VecDeque, + + /// a ordered list with upcoming forks, including a fork that is in progress. + /// see @is_currently_forking for more information. + pending_forks: VecDeque, + + /// we cannot apply the RAI pattern because of the delayed Hbbft initialization + /// this variable tracks if the fork manager is initialized or not. + is_init: bool, + + own_id: NodeId, +} + +impl HbbftNetworkForkManager { + /// Returns None if not forking + /// Returns a List of Addresses that become the new validator set and + /// declares the fork as active, + pub fn should_fork( + &mut self, + last_block_number: u64, + current_epoch: u64, + signer_lock: Arc>>>, + ) -> Option> { + // fields omitted + + if let Some(next_fork) = self.pending_forks.front_mut() { + if next_fork.start_block == last_block_number { + let wrapper = KeyPairWrapper { + inner: signer_lock.clone(), + }; + + let mut rng = rand::thread_rng(); + let mut pub_keys_btree: BTreeMap = BTreeMap::new(); + + for v in next_fork.validators.iter() { + pub_keys_btree.insert(v.clone(), PublicWrapper { inner: v.clone().0 }); + } + + let pub_keys: Arc> = Arc::new(pub_keys_btree); + let mut skg = match SyncKeyGen::new( + self.own_id, + wrapper, + pub_keys, + max_faulty(next_fork.validators.len()), + &mut rng, + ) { + Ok(s) => s.0, + Err(e) => { + error!(target: "engine", "hbbft-hardfork: could not create SyncKeyGen: {:?}", e); + panic!("hbbft-hardfork: could not create SyncKeyGen: {:?}", e); + } + }; + + //adding the PARTs to the SyncKeyGen + + for i in 0..next_fork.validators.len() { + let part = next_fork.parts.get(i).unwrap(); + let node_id = next_fork.validators.get(i).unwrap(); + let outcome = skg.handle_part(node_id, part.clone(), &mut rng).unwrap(); + + match outcome { + PartOutcome::Invalid(e) => { + error!(target: "engine", "hbbft-hardfork: Part for node {} is invalid: {:?}", node_id.as_8_byte_string(), e); + panic!( + "hbbft-hardfork: Part for node {} is invalid: {:?}", + node_id.as_8_byte_string(), + e + ); + } + PartOutcome::Valid(_) => {} + } + } + + for i in 0..next_fork.validators.len() { + let acks = next_fork.acks.get(i).unwrap(); + + for ack in acks.iter() { + let node_id = next_fork.validators.get(i).unwrap(); + let outcome = skg.handle_ack(node_id, ack.clone()).unwrap(); + + match outcome { + AckOutcome::Invalid(e) => { + error!(target: "engine", "hbbft-hardfork: Part for node {} is invalid: {:?}", node_id.as_8_byte_string(), e); + panic!( + "hbbft-hardfork: Part for node {} is invalid: {:?}", + node_id.as_8_byte_string(), + e + ); + } + AckOutcome::Valid => {} + } + } + } + + if !skg.is_ready() { + error!(target: "engine", "hbbft-hardfork: missing parts for SyncKeyGen for fork {:?}", next_fork); + panic!( + "hbbft-hardfork: missing parts for SyncKeyGen for fork {:?}", + next_fork + ); + } + + let (pks, sks) = match skg.generate() { + Ok((p, s)) => (p, s), + Err(e) => { + error!(target: "engine", "hbbft-hardfork: could not generate keys for fork: {:?} {:?}", e, next_fork); + panic!( + "hbbft-hardfork: could not generate keys for fork: {:?} {:?}", + e, next_fork + ); + } + }; + + let result = + NetworkInfo::::new(self.own_id, sks, pks, next_fork.validators.clone()); + + return Some(result); + } else if next_fork.start_block > last_block_number { + // in the following blocks after the fork process was started, + // it is possible for the network to have now ended the fork process. + // we are checking if the current epoch is greater than the start epoch. + + if let Some(start_epoch) = next_fork.start_epoch { + if current_epoch == start_epoch + 1 { + next_fork.end_block = Some(last_block_number); + + // the fork process is finished. + // we are moving the fork to the finished forks list. + + self.finished_forks + .push_back(self.pending_forks.pop_front().unwrap()); + } + } + } // else: we are just waiting for the fork to happen. + } + None + } + + /// Initializes the fork Manager, + /// with the information of the current block. + /// the Fork Manager is able to determine when the next fork is pending. + /// Forks that are already known to be finished, + /// have to be declared as finished. + pub fn initialize( + &mut self, + own_id: NodeId, + startup_block_number: u64, + fork_definition_config: Vec, + ) { + if self.is_init { + panic!("HbbftNetworkForkManager is already initialized"); + } + + if fork_definition_config.len() == 0 { + self.is_init = true; + return; + } + + debug!(target: "engine", "hbbft-hardfork: initializing HbbftNetworkForkManager. Startup block number: {} total forks defined: {}", startup_block_number, fork_definition_config.len()); + + self.own_id = own_id; + + let mut fork_definition = fork_definition_config.clone(); + fork_definition.sort_by_key(|fork| fork.block_number_start); + + // the fork definition can contain + // - forks that are already finished + // - forks that are pending + + // there is one corner case: + // we could be in a current fork, + // if there is a a fork defined, + // that started in the past, + // is ongoing, and the normal key generation did not proceed to a new block. + + // first of all, we are appending all forks that happened in the past and are considered finished. + + for fork_def in fork_definition.iter() { + if let Some(end_block) = fork_def.block_number_end { + // if the fork is known to be ended, + // and the end is after current block, + // we do not need to take care about this fork anymore. + if end_block < startup_block_number { + debug!(target: "engine", "hbbft-hardfork: ignoring already finished fork {:?}", fork_def); + continue; + } + + let fork = HbbftFork::from_definition(fork_def); + debug!(target: "engine", "hbbft-hardfork: added upcoming fork - add block {:?}", fork.start_block); + + self.pending_forks.push_back(fork); + } else if fork_def.block_number_start >= startup_block_number { + let fork = HbbftFork::from_definition(fork_def); + debug!(target: "engine", "hbbft-hardfork: added upcoming fork - add block {:?}", fork.start_block); + + self.pending_forks.push_back(fork); + } + } + + self.is_init = true; + } + + pub fn new() -> HbbftNetworkForkManager { + HbbftNetworkForkManager { + finished_forks: VecDeque::new(), + pending_forks: VecDeque::new(), + is_init: false, + own_id: NodeId::default(), + } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::engines::signer::from_keypair; + use crypto::publickey::{KeyPair, Secret}; + use ethjson::spec::hbbft::HbbftNetworkFork; + use std::str::FromStr; + + #[test] + fn test_fork_manager_should_fork() { + let mut fork_manager = HbbftNetworkForkManager::new(); + + let test_file_content = std::fs::read("res/local_tests/hbbft/hbbft_test_fork.json") + .expect("could not read test file."); + let test_fork = serde_json::from_slice::(test_file_content.as_slice()) + .expect("fork file is parsable."); + + //let test_client = HbbftTestClient::new(); + + let key1 = KeyPair::from_secret( + Secret::from_str("0000000000000000000000000000000000000000000000000000000000000001") + .unwrap(), + ) + .unwrap(); + + let signer = from_keypair(key1); + + //let signer = Box::new(Signer (key1)); + let signer_lock = std::sync::Arc::new(RwLock::new(Some(signer))); + + let own_id = NodeId::default(); + fork_manager.initialize(own_id, 8, vec![test_fork]); + assert!( + fork_manager + .should_fork(9, 1, signer_lock.clone()) + .is_none() + ); + let fork = fork_manager.should_fork(10, 1, signer_lock.clone()); + assert!(fork.is_some()); + assert!(fork.unwrap().num_nodes() == 2); + assert!( + fork_manager + .should_fork(11, 1, signer_lock.clone()) + .is_none() + ); + } +} diff --git a/crates/ethcore/src/engines/hbbft/hbbft_peers_handler.rs b/crates/ethcore/src/engines/hbbft/hbbft_peers_handler.rs new file mode 100644 index 0000000000..34d2e3c0d3 --- /dev/null +++ b/crates/ethcore/src/engines/hbbft/hbbft_peers_handler.rs @@ -0,0 +1,263 @@ +use std::sync::{ + Arc, Weak, + atomic::{AtomicBool, Ordering}, +}; + +use ethereum_types::Address; +use io::IoHandler; +use parking_lot::{Mutex, RwLock}; + +use crate::{ + client::EngineClient, + engines::{EngineError, hbbft::contracts::validator_set::send_tx_announce_availability}, + error::Error, +}; + +use super::{ + NodeId, + contracts::validator_set::{get_validator_available_since, staking_by_mining_address}, + hbbft_peers_management::HbbftPeersManagement, +}; + +#[derive(Debug)] +pub enum HbbftConnectToPeersMessage { + SetSignerAddress(Address), + ConnectToPendingPeers(Vec
), + ConnectToCurrentPeers(Vec), + AnnounceOwnInternetAddress, + AnnounceAvailability, + DisconnectAllValidators, +} + +/// IOChannel handler for doing hbbft peers management and hbbft service transactions async. +pub struct HbbftPeersHandler { + peers_management: Mutex, + client: Arc>>>, + has_sent_availability_tx: AtomicBool, + mining_address: Mutex
, +} + +impl HbbftPeersHandler { + pub fn new(client: Arc>>>) -> Self { + Self { + peers_management: Mutex::new(HbbftPeersManagement::new()), + client, + has_sent_availability_tx: AtomicBool::new(false), + mining_address: Mutex::new(Address::zero()), // Initialize with zero address, can be set later + } + } + + fn client_arc(&self) -> Result, Error> { + return self + .client + .read() + .as_ref() + .and_then(Weak::upgrade) + .ok_or(EngineError::RequiresClient.into()); + } + + fn get_mining_address(&self) -> Address { + // Lock the mutex to safely access the mining address + return self.mining_address.lock().clone(); + } + + fn announce_availability(&self) -> Result<(), Error> { + if self.has_sent_availability_tx.load(Ordering::SeqCst) { + return Ok(()); + } + + let mining_address = self.get_mining_address(); + + if mining_address.is_zero() { + error!(target: "engine", "Mining address is zero, cannot announce availability."); + return Err( + EngineError::SystemCallResultInvalid("Mining address is zero".to_string()).into(), + ); + } + + let engine_client = self.client_arc()?; + + let block_chain_client = engine_client + .as_full_client() + .ok_or("BlockchainClient required")?; + + if block_chain_client.is_major_syncing() { + return Ok(()); + } + + match get_validator_available_since(engine_client.as_ref(), &mining_address) { + Ok(s) => { + if s.is_zero() { + //debug!(target: "engine", "sending announce availability transaction"); + info!(target: "engine", "sending announce availability transaction"); + match send_tx_announce_availability(block_chain_client, &mining_address) { + Ok(()) => {} + Err(call_error) => { + error!(target: "engine", "CallError during announce availability. {:?}", call_error); + return Err(EngineError::SystemCallResultInvalid( + "CallError during announce availability".to_string(), + ) + .into()); + } + } + } + + // we store "HAS_SENT" if we SEND, + // or if we are already marked as available. + self.has_sent_availability_tx.store(true, Ordering::SeqCst); + //return Ok(()); + return Ok(()); + } + Err(e) => { + error!(target: "engine", "Error trying to send availability check: {:?}", e); + return Err(EngineError::SystemCallResultInvalid( + "Error trying to send availability check".to_string(), + ) + .into()); + } + } + } + + fn announce_own_internet_address(&self) -> Result<(), Error> { + // todo: + // if the network is unable to process this transaction, + // we are keeping to announce out internet address. + + let mining_address = self.get_mining_address(); + + if mining_address.is_zero() { + error!(target: "engine", "Mining address is zero, will not announce own internet address."); + return Err( + EngineError::SystemCallResultInvalid("Mining address is zero".to_string()).into(), + ); + } + + let engine_client = self.client_arc()?; + + // TODO: + // staking by mining address could be cached. + // but it COULD also get changed in the contracts, during the time the node is running. + // most likely since a Node can get staked, and than it becomes a mining address. + // a good solution for this is not to do this expensive operation that fequently. + let staking_address = match staking_by_mining_address( + engine_client.as_ref(), + &mining_address, + ) { + Ok(staking_address) => { + if staking_address.is_zero() { + //TODO: here some fine handling can improve performance. + //with this implementation every node (validator or not) + //needs to query this state every block. + //trace!(target: "engine", "availability handling not a validator"); + return Ok(()); + } + staking_address + } + Err(call_error) => { + let message = format!( + "unable to ask for corresponding staking address for given mining address: {:?}", + call_error + ); + error!(target: "engine", "{:?}", message); + + return Err(EngineError::SystemCallResultInvalid(message).into()); + } + }; + + let block_chain_client = engine_client + .as_full_client() + .ok_or("BlockchainClient required")?; + + if let Err(error) = self.peers_management.lock().announce_own_internet_address( + block_chain_client, + engine_client.as_ref(), + &mining_address, + &staking_address, + ) { + error!(target: "engine", "Error trying to announce own internet address: {:?}", error); + } + + return Ok(()); + } + + fn handle_message(&self, message: &HbbftConnectToPeersMessage) -> Result<(), Error> { + match message { + HbbftConnectToPeersMessage::ConnectToPendingPeers(peers) => { + match self + .peers_management + .lock() + .connect_to_pending_validators(&self.client_arc()?, peers) + { + Ok(value) => { + if value > 0 { + debug!(target: "engine", "Added {:?} reserved peers because they are pending validators.", value); + } + return Ok(()); + } + Err(err) => { + return Err(format!( + "Error connecting to other pending validators: {:?}", + err + ) + .into()); + } + } + } + HbbftConnectToPeersMessage::ConnectToCurrentPeers(validator_set) => { + // connecting to current validators. + self.peers_management + .lock() + .connect_to_current_validators(validator_set, &self.client_arc()?); + return Ok(()); + } + + HbbftConnectToPeersMessage::AnnounceOwnInternetAddress => { + if let Err(error) = self.announce_own_internet_address() { + bail!("Error announcing own internet address: {:?}", error); + } + return Ok(()); + } + + HbbftConnectToPeersMessage::SetSignerAddress(signer_address) => { + info!(target: "engine", "Setting signer address to: {:?}", signer_address); + *self.mining_address.lock() = signer_address.clone(); + + self.peers_management + .lock() + .set_validator_address(signer_address.clone()); + return Ok(()); + } + + HbbftConnectToPeersMessage::DisconnectAllValidators => { + self.peers_management + .lock() + .disconnect_all_validators(&self.client_arc()?); + return Ok(()); + } + + HbbftConnectToPeersMessage::AnnounceAvailability => { + if let Err(error) = self.announce_availability() { + bail!("Error announcing availability: {:?}", error); + } + return Ok(()); + } + } + } +} + +impl IoHandler for HbbftPeersHandler { + fn message( + &self, + _io: &io::IoContext, + message: &HbbftConnectToPeersMessage, + ) { + match self.handle_message(message) { + Ok(_) => { + trace!(target: "engine", "Hbbft Queue successfully worked message {:?}", message); + } + Err(e) => { + error!(target: "engine", "Error handling HbbftConnectToPeersMessage: {:?} {:?}", message, e); + } + } + } +} diff --git a/crates/ethcore/src/engines/hbbft/hbbft_peers_management.rs b/crates/ethcore/src/engines/hbbft/hbbft_peers_management.rs index f5b44f53f5..8a8cf7c11b 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_peers_management.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_peers_management.rs @@ -9,26 +9,20 @@ use crate::{ ethereum::public_key_to_address::public_key_to_address, }; +use super::{NodeId, contracts::staking::get_pool_public_key}; use bytes::ToPretty; - use ethereum_types::Address; -use hbbft::NetworkInfo; - -use super::{contracts::staking::get_pool_public_key, NodeId}; #[derive(Clone, Debug)] struct ValidatorConnectionData { // mining_address: Address, - staking_address: Address, - socket_addr: SocketAddr, - public_key: NodeId, + // staking_address: Address, + // socket_addr: SocketAddr, + // public_key: NodeId, peer_string: String, mining_address: Address, } -// impl ValidatorConnectionData { -// } - pub struct HbbftPeersManagement { own_validator_address: Address, last_written_internet_address: Option, @@ -46,7 +40,9 @@ impl HbbftPeersManagement { } } - /// connections are not always required + /// connections are not always required. + /// - during syncing + /// - if not validator address specified. fn should_not_connect(&self, client: &dyn BlockChainClient) -> bool { // don't do any connections while the network is syncing. // the connection is not required yet, and might be outdated. @@ -95,7 +91,7 @@ impl HbbftPeersManagement { ) { connected_current_pending_validators.push(connected_validator); } else { - warn!(target: "Engine", "could not add pending validator to reserved peers: {}", pending_validator_address); + debug!(target: "Engine", "could not add pending validator to reserved peers: {}", pending_validator_address); } } } @@ -158,10 +154,12 @@ impl HbbftPeersManagement { // a current validator. pub fn connect_to_current_validators( &mut self, - network_info: &NetworkInfo, + validator_set: &Vec, client_arc: &Arc, ) { - warn!(target: "Engine", "adding current validators as reserved peers: {}", network_info.validator_set().all_ids().count()); + if validator_set.len() > 0 { + debug!(target: "Engine", "adding current validators as reserved peers. potential {}", validator_set.len()); + } // todo: iterate over NodeIds, extract the address // we do not need to connect to ourself. // figure out the IP and port from the contracts @@ -181,8 +179,6 @@ impl HbbftPeersManagement { return; } - let ids: Vec<&NodeId> = network_info.validator_set().all_ids().collect(); - // let mut validators_to_remove: BTreeSet = BTreeSet::new(); let mut validators_to_remove: BTreeSet
= self @@ -193,8 +189,8 @@ impl HbbftPeersManagement { // validators_to_remove let mut current_validator_connections: Vec = Vec::new(); - - for node in ids.iter() { + let mut validators_to_connect_count = 0; + for node in validator_set.iter() { let address = public_key_to_address(&node.0); if address == self.own_validator_address { @@ -208,49 +204,59 @@ impl HbbftPeersManagement { self.connect_to_validator(client, block_chain_client, &address) { validators_to_remove.remove(&connection.mining_address); + validators_to_connect_count += 1; current_validator_connections.push(connection); } else { warn!(target: "Engine", "could not add current validator to reserved peers: {}", address); } } - info!("removing {} reserved peers, because they are neither a pending validator nor a current validator.", validators_to_remove.len()); + if validators_to_remove.len() > 0 { + info!( + "removing {} reserved peers, because they are neither a pending validator nor a current validator.", + validators_to_remove.len() + ); - let mut peers_management_guard = block_chain_client.reserved_peers_management().lock(); + let mut peers_management_guard = block_chain_client.reserved_peers_management().lock(); - if let Some(peers_management) = peers_management_guard.as_deref_mut() { - for current_validator in self.connected_current_validators.iter() { - if validators_to_remove.contains(¤t_validator.mining_address) { - match peers_management.remove_reserved_peer(¤t_validator.peer_string) { - Ok(_) => { - info!(target: "Engine", "removed reserved peer {}", current_validator.peer_string); - } - Err(error) => { - warn!(target: "Engine", "could not remove reserved peer {}: reason: {}", current_validator.peer_string, error); + if let Some(peers_management) = peers_management_guard.as_deref_mut() { + for current_validator in self.connected_current_validators.iter() { + if validators_to_remove.contains(¤t_validator.mining_address) { + match peers_management.remove_reserved_peer(¤t_validator.peer_string) + { + Ok(_) => { + info!(target: "Engine", "removed reserved peer {}", current_validator.peer_string); + } + Err(error) => { + warn!(target: "Engine", "could not remove reserved peer {}: reason: {}", current_validator.peer_string, error); + } } } } - } - peers_management - .get_reserved_peers() - .iter() - .for_each(|peer| { - info!(target: "Engine", "reserved peer: {}", peer); - }); + peers_management + .get_reserved_peers() + .iter() + .for_each(|peer| { + info!(target: "Engine", "reserved peer: {}", peer); + }); + } } - // we have now connected all additional current validators, kept the connection for those that have already been connected, // and we have disconnected all previous validators that are not current validators anymore. // so we now can set the information of collected validators. + if validators_to_connect_count > 0 { + info!(target: "Engine", "added {} current validators as reserved peers.", validators_to_connect_count); + } + self.connected_current_validators = current_validator_connections; } - // if we drop out as a current validator, - // as well a pending validator, we should drop - // all reserved connections. - // in later addition, we will keep the Partner Node Connections here. (upcomming feature) + /// if we drop out as a current validator, + /// as well a pending validator, we should drop + /// all reserved connections. + /// in later addition, we will keep the Partner Node Connections here. (upcoming feature) pub fn disconnect_all_validators(&mut self, client_arc: &Arc) { // we safely can disconnect even in situation where we are syncing. @@ -292,7 +298,9 @@ impl HbbftPeersManagement { } } - info!(target: "engine", "removed {} peers from reserved peers management.", removed.len()); + if removed.len() > 0 { + info!(target: "engine", "removed {} peers from reserved peers management.", removed.len()); + } } // regardless of disconnect problems here, we clear all the data here. @@ -307,6 +315,7 @@ impl HbbftPeersManagement { /// because those should be current validators by now. /// Make sure to connect to the new current validators, /// before disconnecting from the pending validators. + #[allow(dead_code)] pub fn disconnect_pending_validators( &mut self, client: &dyn BlockChainClient, @@ -318,7 +327,7 @@ impl HbbftPeersManagement { let mut guard = client .reserved_peers_management() .try_lock_for(Duration::from_millis(100)) - .ok_or("Error".to_string())?; + .ok_or("Could not acquire reserved peers management within 100ms".to_string())?; if let Some(reserved_peers_management) = guard.as_deref_mut() { let mut kept_peers = Vec::::new(); @@ -364,14 +373,17 @@ impl HbbftPeersManagement { engine_client: &dyn EngineClient, mining_address: &Address, staking_address: &Address, - ) -> Result<(), String> { + ) -> Result { + if !self.should_announce_own_internet_address(block_chain_client) { + return Ok(false); + } // updates the nodes internet address if the information on the blockchain is outdated. // check if the stored internet address differs from our. // we do not need to do a special handling for 0.0.0.0, because // our IP is always different to that. - warn!(target: "engine", "checking if internet address needs to be updated."); + trace!(target: "engine", "checking if internet address needs to be updated."); let current_endpoint = if let Some(peers_management) = block_chain_client .reserved_peers_management() @@ -382,7 +394,7 @@ impl HbbftPeersManagement { endpoint } else { warn!(target: "engine", "devp2p endpoint not available."); - return Ok(()); + return Ok(false); } } else { error!(target: "engine", "Unable to lock reserved_peers_management"); @@ -390,20 +402,20 @@ impl HbbftPeersManagement { }; //let peers_management = - warn!(target: "engine", "current Endpoint: {:?}", current_endpoint); + trace!(target: "engine", "current Endpoint: {:?}", current_endpoint); // todo: we can improve performance, // by assuming that we are the only one who writes the internet address. // so we have to query this data only once, and then we can cache it. match get_validator_internet_address(engine_client, &staking_address) { Ok(validator_internet_address) => { - warn!(target: "engine", "stored validator address{:?}", validator_internet_address); + trace!(target: "engine", "stored validator address{:?}", validator_internet_address); if validator_internet_address.eq(¤t_endpoint) { // if the current stored endpoint is the same as the current endpoint, // we don't need to do anything. // but we cache the current endpoint, so we don't have to query the db again. self.last_written_internet_address = Some(current_endpoint); - return Ok(()); + return Ok(false); } match set_validator_internet_address( @@ -413,7 +425,7 @@ impl HbbftPeersManagement { ) { Ok(()) => { self.last_written_internet_address = Some(current_endpoint); - return Ok(()); + return Ok(true); } Err(err) => { error!(target: "engine", "unable to set validator internet address: {:?}", err); @@ -536,6 +548,7 @@ fn connect_to_validator_core( }; if socket_addr.port() == 0 { + debug!(target: "engine", "connect_to_validator_core: no port specified for Node ( Public (NodeId): {:?} , staking address: {}, socket_addr: {:?}", node_id, staking_address, socket_addr); // we interprate port 0 as NULL. return None; } @@ -549,14 +562,15 @@ fn connect_to_validator_core( info!(target: "engine", "adding reserved peer: {}", peer_string); if let Err(err) = peers_management.add_reserved_peer(&peer_string) { warn!(target: "engine", "failed to adding reserved: {} : {}", peer_string, err); + return None; } return Some(ValidatorConnectionData { - staking_address: staking_address, + //staking_address: staking_address, //mining_address: *address, - socket_addr: socket_addr, + //socket_addr: socket_addr, peer_string, - public_key: node_id.clone(), + //public_key: node_id.clone(), mining_address: Address::zero(), // all caller of this function will set this value. }); } else { diff --git a/crates/ethcore/src/engines/hbbft/hbbft_state.rs b/crates/ethcore/src/engines/hbbft/hbbft_state.rs index 3c93244e7a..725e5b75fc 100644 --- a/crates/ethcore/src/engines/hbbft/hbbft_state.rs +++ b/crates/ethcore/src/engines/hbbft/hbbft_state.rs @@ -1,31 +1,38 @@ -use client::traits::EngineClient; -use engines::signer::EngineSigner; +use crate::{ + client::traits::EngineClient, + engines::signer::EngineSigner, + types::{header::Header, ids::BlockId}, +}; use ethcore_miner::pool::{PoolVerifiedTransaction, ScoredTransaction}; use ethereum_types::U256; +use ethjson::spec::hbbft::HbbftNetworkFork; use hbbft::{ + Epoched, NetworkInfo, crypto::{PublicKey, Signature}, honey_badger::{self, HoneyBadgerBuilder}, - Epoched, NetworkInfo, }; +use io::IoService; use parking_lot::{Mutex, RwLock}; use rand::seq::IteratorRandom; use std::{ collections::{BTreeMap, HashMap}, sync::Arc, + time::Duration, }; -use types::{header::Header, ids::BlockId}; use crate::engines::hbbft::contracts::permission::get_minimum_gas_from_permission_contract; use super::{ + NodeId, contracts::{ keygen_history::{initialize_synckeygen, synckeygen_to_network_info}, staking::{get_posdao_epoch, get_posdao_epoch_start}, validator_set::ValidatorType, }, contribution::Contribution, - hbbft_peers_management::HbbftPeersManagement, - NodeId, + hbbft_early_epoch_end_manager::HbbftEarlyEpochEndManager, + hbbft_network_fork_manager::HbbftNetworkForkManager, + hbbft_peers_handler::HbbftConnectToPeersMessage, }; pub type HbMessage = honey_badger::Message; @@ -38,9 +45,13 @@ pub(crate) struct HbbftState { network_info: Option>, honey_badger: Option, public_master_key: Option, + historic_public_keys: BTreeMap, current_posdao_epoch: u64, current_posdao_epoch_start_block: u64, + last_fork_start_block: Option, + last_posdao_epoch_start_block: Option, future_messages_cache: BTreeMap>, + fork_manager: HbbftNetworkForkManager, } impl HbbftState { @@ -49,9 +60,13 @@ impl HbbftState { network_info: None, honey_badger: None, public_master_key: None, + historic_public_keys: BTreeMap::new(), current_posdao_epoch: 0, current_posdao_epoch_start_block: 0, + last_posdao_epoch_start_block: None, + last_fork_start_block: None, future_messages_cache: BTreeMap::new(), + fork_manager: HbbftNetworkForkManager::new(), } } @@ -61,6 +76,26 @@ impl HbbftState { return Some(builder.build()); } + /// Resets only the underlying HoneyBadger instance, leaving all other state intact. + /// Returns Some(()) if the instance was recreated, or None if there is no network_info + /// or HoneyBadger creation failed. + pub fn reset_honeybadger(&mut self) -> Option<()> { + let network_info = self.network_info.as_ref()?.clone(); + let honey_badger = self.new_honey_badger(network_info)?; + self.honey_badger = Some(honey_badger); + Some(()) + } + + pub fn init_fork_manager( + &mut self, + own_id: NodeId, + latest_block: u64, + fork_definition: Vec, + ) { + self.fork_manager + .initialize(own_id, latest_block, fork_definition); + } + /** * Updates the underlying honeybadger instance, possible switching into a new * honeybadger instance if according to contracts a new staking epoch has started. @@ -70,7 +105,8 @@ impl HbbftState { &mut self, client: Arc, signer: &Arc>>>, - peers_management_mutex: &Mutex, + peers_service: &IoService, + early_epoch_end_manager_mutex: &Mutex>, current_minimum_gas_price: &Mutex>, block_id: BlockId, force: bool, @@ -84,6 +120,51 @@ impl HbbftState { } } + // https://github.com/DMDcoin/diamond-node/issues/98 + // check here if we are in a fork scenario. + // in a fork scenario, the new honeybadger keys will come from the config, + // and not from the contracts. + // also the current block will trigger the epoch end, + // this will start the loop for finding a new validator set, + // probably it will fail multiple times, + // because nodes that do not apply to the fork rule will drop out. + // this might happen for a lot of key-gen rounds, until a set with responsive validators + // can be found. + + if let Some(last_block_number) = client.block_number(block_id) { + debug!(target: "engine", "Current Block: {}", last_block_number); + if let Some(network_info) = self.fork_manager.should_fork( + last_block_number, + self.current_posdao_epoch, + signer.clone(), + ) { + info!(target: "engine", "Forking at block {last_block_number}, starting new honeybadger instance with new validator set."); + + for id in network_info.validator_set().all_ids() { + info!(target: "engine", "Fork Validator: {}", id); + } + + self.public_master_key = Some(network_info.public_key_set().public_key()); + self.honey_badger = Some(self.new_honey_badger(network_info.clone())?); + + self.historic_public_keys + .insert(target_posdao_epoch, self.public_master_key.unwrap().clone()); + + for x in network_info.validator_set().all_ids() { + info!(target: "engine", "Validator: {:?}", x); + } + + self.network_info = Some(network_info); + self.last_fork_start_block = Some(last_block_number); + self.current_posdao_epoch_start_block = last_block_number; + + return Some(()); + } + } else { + error!(target: "engine", "fork: could not get block number for block_id: {:?}", block_id); + } + // + if !force && self.current_posdao_epoch == target_posdao_epoch { // hbbft state is already up to date. // @todo Return proper error codes. @@ -91,17 +172,28 @@ impl HbbftState { } let posdao_epoch_start = get_posdao_epoch_start(&*client, block_id).ok()?; - let synckeygen = initialize_synckeygen( + let synckeygen = match initialize_synckeygen( &*client, signer, BlockId::Number(posdao_epoch_start.low_u64()), ValidatorType::Current, - ) - .ok()?; - assert!(synckeygen.is_ready()); + ) { + Ok(synckey) => synckey, + Err(e) => { + error!(target: "engine", "error initializing synckeygen for block: {:?}: {:?}", block_id, e); + return None; + } + }; + + if !synckeygen.is_ready() { + error!(target: "engine", "Synckeygen not ready when it should be!"); + return None; + } let (pks, sks) = synckeygen.generate().ok()?; self.public_master_key = Some(pks.public_key()); + self.historic_public_keys + .insert(target_posdao_epoch, pks.public_key()); // Clear network info and honey badger instance, since we may not be in this POSDAO epoch any more. info!(target: "engine", "public master key: {:?}", pks.public_key()); @@ -109,6 +201,7 @@ impl HbbftState { self.honey_badger = None; // Set the current POSDAO epoch # self.current_posdao_epoch = target_posdao_epoch; + self.last_posdao_epoch_start_block = Some(self.current_posdao_epoch_start_block); self.current_posdao_epoch_start_block = posdao_epoch_start.as_u64(); trace!(target: "engine", "Switched hbbft state to epoch {}.", self.current_posdao_epoch); @@ -116,23 +209,23 @@ impl HbbftState { // apply DAO updates here. // update the current minimum gas price. - match get_minimum_gas_from_permission_contract(client.as_ref(), BlockId::Number(self.current_posdao_epoch_start_block)) { + match get_minimum_gas_from_permission_contract( + client.as_ref(), + BlockId::Number(self.current_posdao_epoch_start_block), + ) { Ok(min_gas) => { *current_minimum_gas_price.lock() = Some(min_gas); - }, + } Err(err) => { warn!(target: "engine", "Could not read min gas from hbbft permission contract. {:?}.", err); - }, + } } if sks.is_none() { info!(target: "engine", "We are not part of the HoneyBadger validator set - running as regular node."); - // we can disconnect the peers here. - if let Some(mut peers_management) = - peers_management_mutex.try_lock_for(std::time::Duration::from_millis(50)) - { - peers_management.disconnect_all_validators(&client); - } + peers_service + .send_message(HbbftConnectToPeersMessage::DisconnectAllValidators) + .ok()?; return Some(()); } @@ -142,20 +235,32 @@ impl HbbftState { info!(target: "engine", "HoneyBadger Algorithm initialized! Running as validator node."); - // this is importent, but we should not risk deadlocks... - // maybe we should refactor this to a message Queue system, and pass a "connect_to_current_validators" message - if let Some(mut peers_management) = - peers_management_mutex.try_lock_for(std::time::Duration::from_millis(250)) - { - peers_management.connect_to_current_validators(&network_info, &client); - } else { - // maybe we should work with signals that signals that connect_to_current_validators should happen - // instead of trying to achieve a lock here. - // in this case: - // if Node A cannot acquire the lock, but Node B can, then Node B connects to Node A, - // and we are find. - // if both nodes cannot acquire the lock, then we are busted. - warn!(target: "engine", "could not acquire to connect to current validators on switching to new validator set for staking epoch {}.", self.current_posdao_epoch); + peers_service + .send_message(HbbftConnectToPeersMessage::ConnectToCurrentPeers( + self.get_validator_set(), + )) + .ok()?; + + let allowed_devp2p_warmup_time = Duration::from_secs(1200); + + if let Some(full_client) = client.as_full_client() { + let signing_address = if let Some(s) = signer.read().as_ref() { + s.address() + } else { + error!(target: "engine", "early epoch manager: signer is not set!"); + ethereum_types::Address::zero() + }; + + *early_epoch_end_manager_mutex.lock() = + HbbftEarlyEpochEndManager::create_early_epoch_end_manager( + allowed_devp2p_warmup_time, + full_client, + client.as_ref(), + self.current_posdao_epoch, + self.current_posdao_epoch_start_block, + self.get_validator_set(), + &signing_address, + ); } Some(()) @@ -281,10 +386,12 @@ impl HbbftState { match honey_badger.handle_message(&sender_id, message) { Ok(step) => return Ok(Some((step, network_info.clone()))), Err(err) => { - // TODO: Report consensus step errors - // maybe we are not part of the HBBFT Set anymore ? - // maybe the sender is not Part of the hbbft set ? - // maybe we have the wrong hbbft for decryption ? + // the sender is possible not in the hbbft set anymore + // and can ignore this error and not process a step. + let epoch = message_epoch; + if epoch < self.current_posdao_epoch_start_block { + return Ok(None); + } error!(target: "consensus", "Error on handling HoneyBadger message from {} in epoch {} error: {:?}", sender_id, message_epoch, err); return Err(err); @@ -333,7 +440,7 @@ impl HbbftState { if let Some(latest_block) = client.block_number(BlockId::Latest) { if honey_badger.epoch() != latest_block + 1 { - info!(target: "consensus", "Detected an attempt to send a hbbft contribution for block {} before the previous block was imported to the chain. (latest block: {})", honey_badger.epoch(), latest_block); + debug!(target: "consensus", "Detected an attempt to send a hbbft contribution for block {} before the previous block was imported to the chain. (latest block: {})", honey_badger.epoch(), latest_block); return None; } } @@ -406,13 +513,13 @@ impl HbbftState { if tx.nonce() >= min_nonce { transactions_subset.push(tx); } else { - info!(target: "consensus", "Block creation: Pending transaction with nonce too low, got {}, expected at least {}", tx.nonce(), min_nonce); + debug!(target: "consensus", "Block creation: Pending transaction with nonce too low, got {}, expected at least {}", tx.nonce(), min_nonce); } } } } - info!(target: "consensus", "Block creation: Honeybadger epoch {}, Transactions subset target size: {}, actual size: {}, from available {}.", honey_badger.epoch(), transactions_subset_size, transactions_subset.len(), max_transactions_for_block.len()); + trace!(target: "consensus", "Block creation: Honeybadger epoch {}, Transactions subset target size: {}, actual size: {}, from available {}.", honey_badger.epoch(), transactions_subset_size, transactions_subset.len(), max_transactions_for_block.len()); let signed_transactions = transactions_subset .iter() @@ -422,7 +529,7 @@ impl HbbftState { // Now we can select the transactions to include in our contribution. let input_contribution = Contribution::new(&signed_transactions); - let mut rng = rand_065::thread_rng(); + let mut rng = rand::thread_rng(); let step = honey_badger.propose(&input_contribution, &mut rng); match step { Ok(step) => Some((step, network_info)), @@ -441,6 +548,8 @@ impl HbbftState { signature: &Signature, header: &Header, ) -> bool { + // maybe add the option: "not ready yet ?!" + self.skip_to_current_epoch(client.clone(), signer); // Check if posdao epoch fits the parent block of the header seal to verify. @@ -453,55 +562,32 @@ impl HbbftState { return false; } }; - if self.current_posdao_epoch != target_posdao_epoch { + + if self.current_posdao_epoch > target_posdao_epoch { trace!(target: "consensus", "verify_seal - hbbft state epoch does not match epoch at the header's parent, attempting to reconstruct the appropriate public key share from scratch."); - // If the requested block nr is already imported we try to generate the public master key from scratch. - let posdao_epoch_start = match get_posdao_epoch_start( - &*client, - BlockId::Number(parent_block_nr), - ) { - Ok(epoch_start) => epoch_start, - Err(e) => { - error!(target: "consensus", "Querying epoch start block failed with error: {:?}", e); - return false; - } - }; - let synckeygen = match initialize_synckeygen( - &*client, - &Arc::new(RwLock::new(Option::None)), - BlockId::Number(posdao_epoch_start.low_u64()), - ValidatorType::Current, - ) { - Ok(synckeygen) => synckeygen, - Err(e) => { - error!(target: "consensus", "Synckeygen failed with error: {:?}", e); + match self.historic_public_keys.get(&target_posdao_epoch) { + Some(key) => { + if key.verify(signature, header.bare_hash()) { + return true; + } else { + error!(target: "consensus", "Failed to verify seal - historic public key verification failed!"); + return false; + } + } + None => { + warn!(target: "consensus", "unable to verifiy seal for historic block, public key not available."); return false; } - }; - - if !synckeygen.is_ready() { - error!(target: "consensus", "Synckeygen not ready when it sohuld be!"); - return false; } - - let pks = match synckeygen.generate() { - Ok((pks, _)) => pks, - Err(e) => { - error!(target: "consensus", "Generating of public key share failed with error: {:?}", e); - return false; + } else { + // not a historic block, we can use the current public key. + match self.public_master_key { + Some(key) => key.verify(signature, header.bare_hash()), + None => { + error!(target: "consensus", "Failed to verify seal - public master key not available!"); + false } - }; - - trace!(target: "consensus", "verify_seal - successfully reconstructed public key share of past posdao epoch."); - return pks.public_key().verify(signature, header.bare_hash()); - } - - match self.public_master_key { - Some(key) => key.verify(signature, header.bare_hash()), - None => { - error!(target: "consensus", "Failed to verify seal - public master key not available!"); - false } } } @@ -514,9 +600,14 @@ impl HbbftState { ) -> Option> { self.skip_to_current_epoch(client.clone(), signer); - let posdao_epoch = get_posdao_epoch(&*client, BlockId::Number(block_nr - 1)) - .ok()? - .low_u64(); + // Performance: we can use cache here, since this contract calls return deterministic results. + let posdao_epoch = match get_posdao_epoch(&*client, BlockId::Number(block_nr - 1)) { + Ok(number) => number.low_u64(), + Err(e) => { + error!(target: "consensus", "Failed to get network info - reading POSDAO epoch from contract failed! Error: {:?}", e); + return None; + } + }; if self.current_posdao_epoch != posdao_epoch { error!(target: "consensus", "Trying to get the network info from a different epoch. Current epoch: {}, Requested epoch: {}", @@ -527,8 +618,25 @@ impl HbbftState { self.network_info.clone() } - pub fn get_current_network_info(&self) -> Option> { - return self.network_info.clone(); + // pub fn get_current_network_info(&self) -> &Option> { + // return &self.network_info; + // } + + pub fn is_validator(&self) -> bool { + self.network_info.as_ref().is_some_and(|n| n.is_validator()) + } + + pub fn get_validator_set(&self) -> Vec { + if let Some(network_info) = &self.network_info { + let result: Vec = network_info + .validator_set() + .all_ids() + .map(|n| n.clone()) + .collect(); + return result; + } + + return Vec::new(); } pub fn get_current_posdao_epoch(&self) -> u64 { @@ -538,4 +646,8 @@ impl HbbftState { pub fn get_current_posdao_epoch_start_block(&self) -> u64 { self.current_posdao_epoch_start_block } + + pub fn get_last_posdao_epoch_start_block(&self) -> Option { + self.last_posdao_epoch_start_block + } } diff --git a/crates/ethcore/src/engines/hbbft/keygen_transactions.rs b/crates/ethcore/src/engines/hbbft/keygen_transactions.rs index 507cabca4c..b611865e58 100644 --- a/crates/ethcore/src/engines/hbbft/keygen_transactions.rs +++ b/crates/ethcore/src/engines/hbbft/keygen_transactions.rs @@ -1,30 +1,71 @@ -use client::traits::{EngineClient, TransactionRequest}; -use engines::{ - hbbft::{ - contracts::{ - keygen_history::{ - engine_signer_to_synckeygen, get_current_key_gen_round, has_acks_of_address_data, - key_history_contract, part_of_address, PublicWrapper, KEYGEN_HISTORY_ADDRESS, - }, - staking::get_posdao_epoch, - validator_set::{ - get_pending_validator_key_generation_mode, get_validator_pubkeys, KeyGenMode, - ValidatorType, +use crate::{ + client::traits::{EngineClient, TransactionRequest}, + engines::{ + hbbft::{ + contracts::{ + keygen_history::{ + KEYGEN_HISTORY_ADDRESS, PublicWrapper, engine_signer_to_synckeygen, + get_current_key_gen_round, has_acks_of_address_data, key_history_contract, + part_of_address, + }, + staking::get_posdao_epoch, + validator_set::{ + KeyGenMode, ValidatorType, get_pending_validator_key_generation_mode, + get_validator_pubkeys, + }, }, + utils::bound_contract::CallError, }, - utils::bound_contract::CallError, + signer::EngineSigner, }, - signer::EngineSigner, + types::ids::BlockId, }; -use ethereum_types::{Address, U256}; +use ethcore_miner::pool::local_transactions::Status; +use ethereum_types::{Address, Public, U256}; +use hash::H256; +use hbbft::sync_key_gen::SyncKeyGen; use itertools::Itertools; use parking_lot::RwLock; -use std::{collections::BTreeMap, sync::Arc}; -use types::ids::BlockId; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; + +use crate::client::BlockChainClient; + +static MAX_BLOCKCHAIN_AGE_FOR_KEYGEN: u64 = 10; // seconds + +pub enum ServiceTransactionType { + /// KeyGenTransaction: (u64: epoch, u64: round, KeyGenMode) + KeyGenTransaction(u64, u64, KeyGenMode), +} + +pub struct ServiceTransactionMemory { + /// Time when the transaction was send. + pub send_time: Instant, + + // It would be good to have a transaction Hash here. + pub transaction_hash: H256, + + /// Type of the transaction, e.g. KeyGen Part or Ack. + pub transaction_type: ServiceTransactionType, + + /// Nonce of the transaction it was send with. + //pub nonce: U256, + + /// Block number, at which this transaction was "sent", + /// in the meaning of prepared to be propagated. + pub block_sent: u64, + // It would be good to know if the Service Transaction got included. + // pub inclusion_block: Option, +} pub struct KeygenTransactionSender { - last_keygen_mode: KeyGenMode, - keygen_mode_counter: u64, + /// Minimum delay between for resending key gen transactions in milliseconds. + key_gen_transaction_delay_milliseconds: u128, + + /// Minimum delay for resending key gen transactions, in milliseconds. + key_gen_transaction_delay_blocks: u64, + + /// Last key gen service transaction we sent. + last_keygen_service_transaction: Option, } enum ShouldSendKeyAnswer { @@ -36,14 +77,31 @@ enum ShouldSendKeyAnswer { Yes, } -static KEYGEN_TRANSACTION_SEND_DELAY: u64 = 3; -static KEYGEN_TRANSACTION_RESEND_DELAY: u64 = 10; +#[derive(Debug)] +pub enum KeyGenError { + NoSigner, + NoFullClient, + NoPartToWrite, + #[allow(dead_code)] + CallError(CallError), + Unexpected, +} + +impl From for KeyGenError { + fn from(e: CallError) -> Self { + KeyGenError::CallError(e) + } +} impl KeygenTransactionSender { - pub fn new() -> Self { + pub fn new( + key_gen_transaction_delay_blocks: u64, + key_gen_transaction_delay_milliseconds: u128, + ) -> Self { KeygenTransactionSender { - last_keygen_mode: KeyGenMode::Other, - keygen_mode_counter: 0, + last_keygen_service_transaction: None, + key_gen_transaction_delay_blocks, + key_gen_transaction_delay_milliseconds, } } @@ -52,29 +110,108 @@ impl KeygenTransactionSender { client: &dyn EngineClient, mining_address: &Address, mode_to_check: KeyGenMode, + upcoming_epoch: &U256, + current_round: &U256, ) -> Result { let keygen_mode = get_pending_validator_key_generation_mode(client, mining_address)?; if keygen_mode == mode_to_check { - if self.last_keygen_mode == mode_to_check { - self.keygen_mode_counter += 1; - if self.keygen_mode_counter == KEYGEN_TRANSACTION_SEND_DELAY { - return Ok(ShouldSendKeyAnswer::Yes); - } else if self.keygen_mode_counter > KEYGEN_TRANSACTION_SEND_DELAY { - // Part should have been sent already, - // give the chain time to include the transaction before trying a re-send. - if (self.keygen_mode_counter - KEYGEN_TRANSACTION_SEND_DELAY) - % KEYGEN_TRANSACTION_RESEND_DELAY - == 0 - { - return Ok(ShouldSendKeyAnswer::Yes); + match &self.last_keygen_service_transaction { + Some(last_sent) => { + match &last_sent.transaction_type { + ServiceTransactionType::KeyGenTransaction( + historic_upcoming_epoch, + historic_round, + historic_key_gen_mode, + ) => { + if *historic_key_gen_mode != keygen_mode + || *historic_upcoming_epoch != upcoming_epoch.as_u64() + || *historic_round != current_round.as_u64() + { + // other key gen mode, we need to send. + return Ok(ShouldSendKeyAnswer::Yes); + } + + let mut transaction_lost = false; + // check if our last sent transaction is still pending. + if let Some(service_tx_state) = + client.local_transaction_status(&last_sent.transaction_hash) + { + match service_tx_state { + Status::Culled(_) + | Status::Dropped(_) + | Status::Rejected(..) + | Status::Replaced { .. } + | Status::Invalid(_) + | Status::Canceled(_) => { + transaction_lost = true; + } + _ => {} + } + } else { + // the transaction got lost, and probably transaction info got already deleted. + // it still might also got already included into a block. + transaction_lost = true; + } + + if transaction_lost { + // maybe we lost the key gen transaction, because it got included into a block. + + // make sure we did not just witness block inclusion. + if let Some(full_client) = client.as_full_client() { + if let Some(transaction) = full_client.block_transaction( + types::ids::TransactionId::Hash(last_sent.transaction_hash), + ) { + // our service transaction got included. + warn!(target: "engine", "key gen transaction got included in block {} but we are still in wrong state ?!", transaction.block_number); + return Ok(ShouldSendKeyAnswer::NoWaiting); + } else { + // our transaction is not pending anymore, and also has not got included into a block, we should resend. + return Ok(ShouldSendKeyAnswer::Yes); + } + } else { + // that should really never happen. + warn!(target:"engine", "could not get full client to check for inclusion of key gen transaction"); + } + } + + // if we are still in the same situation, we need to figure out if we just should retry to send our last transaction. + if last_sent.send_time.elapsed().as_millis() + < self.key_gen_transaction_delay_milliseconds + { + // we sent a transaction recently, so we should wait a bit. + return Ok(ShouldSendKeyAnswer::NoWaiting); + } + + let current_block = client.block_number(BlockId::Latest).unwrap_or(0); + + // this check also prevents the resending of Transactions if no block got mined. (e.g. because of stalled network) + if last_sent.block_sent + self.key_gen_transaction_delay_blocks + > current_block + { + // rational behind: + // if blocks are not created anyway, + // we do not have to send new transactions. + + // example: + // send on block 10 (last_sent.block_sent = 10) + // key_gen_transaction_delay_blocks = 2 + // resent after Block 12. + // current block is 11: waiting + // current block is 12: waiting + // current block is 13: not entering => YES + + // we sent a transaction recently, so we should wait a bit. + return Ok(ShouldSendKeyAnswer::NoWaiting); + } + + return Ok(ShouldSendKeyAnswer::Yes); + } } - } else { - return Ok(ShouldSendKeyAnswer::NoWaiting); } - } else { - self.last_keygen_mode = mode_to_check; - self.keygen_mode_counter = 1; - return Ok(ShouldSendKeyAnswer::NoWaiting); + None => { + // we never sent a key gen transaction, so we should send one. + return Ok(ShouldSendKeyAnswer::Yes); + } } } return Ok(ShouldSendKeyAnswer::NoNotThisKeyGenMode); @@ -84,96 +221,177 @@ impl KeygenTransactionSender { &mut self, client: &dyn EngineClient, mining_address: &Address, + upcoming_epoch: &U256, + current_round: &U256, ) -> Result { - self.should_send(client, mining_address, KeyGenMode::WritePart) + self.should_send( + client, + mining_address, + KeyGenMode::WritePart, + upcoming_epoch, + current_round, + ) } fn should_send_ack( &mut self, client: &dyn EngineClient, mining_address: &Address, + upcoming_epoch: &U256, + current_round: &U256, ) -> Result { - self.should_send(client, mining_address, KeyGenMode::WriteAck) + self.should_send( + client, + mining_address, + KeyGenMode::WriteAck, + upcoming_epoch, + current_round, + ) } - /// Returns a collection of transactions the pending validator has to submit in order to - /// complete the keygen history contract data necessary to generate the next key and switch to the new validator set. + /// sends key gen transaction if there are any to send. pub fn send_keygen_transactions( &mut self, client: &dyn EngineClient, signer: &Arc>>>, - ) -> Result<(), CallError> { + ) -> Result<(), KeyGenError> { // If we have no signer there is nothing for us to send. let address = match signer.read().as_ref() { Some(signer) => signer.address(), None => { - trace!(target: "engine", "Could not send keygen transactions, because signer module could not be retrieved"); - return Err(CallError::ReturnValueInvalid); + warn!(target: "engine", "Could not send keygen transactions, because signer module could not be retrieved"); + return Err(KeyGenError::NoSigner); } }; - let full_client = client.as_full_client().ok_or(CallError::NotFullClient)?; + let full_client = client.as_full_client().ok_or(KeyGenError::NoFullClient)?; // If the chain is still syncing, do not send Parts or Acks. if full_client.is_major_syncing() { - debug!(target:"engine", "skipping sending key gen transaction, because we are syncing"); - return Ok(()); + if let Some(lastes_block) = client.block_header(BlockId::Latest) { + let now = std::time::UNIX_EPOCH + .elapsed() + .expect("Time not available") + .as_secs(); + if now > lastes_block.timestamp() + MAX_BLOCKCHAIN_AGE_FOR_KEYGEN { + debug!(target:"engine", "skipping sending key gen transaction, because we are syncing."); + return Ok(()); + } else { + trace!(target:"engine", "We are syncing, but the latest block is recent. continuing sending key gen transactions"); + } + } else { + debug!(target:"engine", "skipping sending key gen transaction, because we are syncing and could not retrieve latest block."); + return Ok(()); + } } trace!(target:"engine", " get_validator_pubkeys..."); - let vmap = get_validator_pubkeys(&*client, BlockId::Latest, ValidatorType::Pending)?; + let vmap = get_validator_pubkeys(&*client, BlockId::Latest, ValidatorType::Pending) + .map_err(|e| KeyGenError::CallError(e))?; + let pub_keys: BTreeMap<_, _> = vmap .values() .map(|p| (*p, PublicWrapper { inner: p.clone() })) .collect(); + let pub_keys_arc = Arc::new(pub_keys); + let upcoming_epoch = + get_posdao_epoch(client, BlockId::Latest).map_err(|e| KeyGenError::CallError(e))? + 1; + + //let pub_key_len = pub_keys.len(); // if synckeygen creation fails then either signer or validator pub keys are problematic. // Todo: We should expect up to f clients to write invalid pub keys. Report and re-start pending validator set selection. - let (mut synckeygen, part) = engine_signer_to_synckeygen(signer, Arc::new(pub_keys)) - .map_err(|_| CallError::ReturnValueInvalid)?; + let (mut synckeygen, part) = match engine_signer_to_synckeygen(signer, pub_keys_arc.clone()) + { + Ok((synckeygen_, part_)) => (synckeygen_, part_), + Err(e) => { + warn!(target:"engine", "engine_signer_to_synckeygen pub keys count {:?} error {:?}", pub_keys_arc.len(), e); + //let mut failure_pub_keys: Vec = Vec::new(); + let mut failure_pub_keys: Vec = Vec::new(); + pub_keys_arc.iter().for_each(|(k, v)| { + warn!(target:"engine", "pub key {}", k.as_bytes().iter().join("")); + + if !v.is_valid() { + warn!(target:"engine", "INVALID pub key {}", k); + + // append the bytes of the public key to the failure_pub_keys. + k.as_bytes().iter().for_each(|b| { + failure_pub_keys.push(*b); + }); + } + }); + + // if we should send our parts, we will send the public keys of the troublemakers instead. + + let current_round = get_current_key_gen_round(client)?; + + match self + .should_send_part(client, &address, &upcoming_epoch, ¤t_round) + .map_err(|e| KeyGenError::CallError(e))? + { + ShouldSendKeyAnswer::NoNotThisKeyGenMode => { + return Err(KeyGenError::Unexpected); + } + ShouldSendKeyAnswer::NoWaiting => return Err(KeyGenError::Unexpected), + ShouldSendKeyAnswer::Yes => { + let serialized_part = match bincode::serialize(&failure_pub_keys) { + Ok(part) => part, + Err(e) => { + warn!(target:"engine", "could not serialize part: {:?}", e); + return Err(KeyGenError::Unexpected); + } + }; + + let current_round = get_current_key_gen_round(client)?; + + self.send_part_transaction( + full_client, + client, + &address, + &upcoming_epoch, + ¤t_round, + serialized_part, + )?; + return Ok(()); + } + } + } + }; // If there is no part then we are not part of the pending validator set and there is nothing for us to do. let part_data = match part { Some(part) => part, - None => return Err(CallError::ReturnValueInvalid), + None => { + warn!(target:"engine", "no part to write."); + return Err(KeyGenError::NoPartToWrite); + } }; - let upcoming_epoch = get_posdao_epoch(client, BlockId::Latest)? + 1; - trace!(target:"engine", "preparing to send PARTS for upcoming epoch: {}", upcoming_epoch); + let current_round = get_current_key_gen_round(client)?; + + trace!(target:"engine", "preparing to send keys for upcoming epoch: {} - round {}", upcoming_epoch, current_round); // Check if we already sent our part. - match self.should_send_part(client, &address)? { + match self.should_send_part(client, &address, &upcoming_epoch, ¤t_round)? { ShouldSendKeyAnswer::Yes => { let serialized_part = match bincode::serialize(&part_data) { Ok(part) => part, - Err(_) => return Err(CallError::ReturnValueInvalid), + Err(e) => { + warn!(target:"engine", "could not serialize part: {:?}", e); + return Err(KeyGenError::Unexpected); + } }; - let serialized_part_len = serialized_part.len(); - let current_round = get_current_key_gen_round(client)?; - let write_part_data = key_history_contract::functions::write_part::call( - upcoming_epoch, - current_round, + + self.send_part_transaction( + full_client, + client, + &address, + &upcoming_epoch, + ¤t_round, serialized_part, - ); - - // the required gas values have been approximated by - // experimenting and it's a very rough estimation. - // it can be further fine tuned to be just above the real consumption. - // ACKs require much more gas, - // and usually run into the gas limit problems. - let gas: usize = serialized_part_len * 800 + 100_000; - - let part_transaction = - TransactionRequest::call(*KEYGEN_HISTORY_ADDRESS, write_part_data.0) - .gas(U256::from(gas)) - .nonce(full_client.nonce(&address, BlockId::Latest).unwrap()) - .gas_price(U256::from(10000000000u64)); - full_client - .transact_silently(part_transaction) - .map_err(|_| CallError::ReturnValueInvalid)?; - - trace!(target:"engine", "PART Transaction send."); + )?; + return Ok(()); } ShouldSendKeyAnswer::NoWaiting => { @@ -184,70 +402,178 @@ impl KeygenTransactionSender { ShouldSendKeyAnswer::NoNotThisKeyGenMode => {} } - trace!(target:"engine", "checking for acks..."); + trace!(target:"engine", "has_acks_of_address_data: {:?}", has_acks_of_address_data(client, address)); + + // Now we are sure all parts are ready, let's check if we sent our Acks. + match self.should_send_ack(client, &address, &upcoming_epoch, ¤t_round)? { + ShouldSendKeyAnswer::Yes => { + self.send_ack_transaction( + full_client, + client, + &address, + &upcoming_epoch, + ¤t_round, + &vmap, + &mut synckeygen, + )?; + } + _ => {} + } + + Ok(()) + } + + fn send_ack_transaction( + &mut self, + full_client: &dyn BlockChainClient, + client: &dyn EngineClient, + mining_address: &Address, + upcoming_epoch: &U256, + current_round: &U256, + vmap: &BTreeMap, + synckeygen: &mut SyncKeyGen, + ) -> Result<(), KeyGenError> { // Return if any Part is missing. let mut acks = Vec::new(); for v in vmap.keys().sorted() { acks.push( - match part_of_address(&*client, *v, &vmap, &mut synckeygen, BlockId::Latest) { - Ok(part_result) => { - match part_result { - Some(ack) => ack, - None => { - trace!(target:"engine", "could not retrieve part for {}", *v); - return Err(CallError::ReturnValueInvalid); - } - } - } - Err(err) => { - error!(target:"engine", "could not retrieve part for {} call failed. Error: {:?}", *v, err); - return Err(err); - } - } - ); + match part_of_address(&*client, *v, &vmap, synckeygen, BlockId::Latest) { + Ok(part_result) => { + match part_result { + Some(ack) => ack, + None => { + trace!(target:"engine", "could not retrieve part for {}", *v); + return Ok(()); + } + } + } + Err(err) => { + error!(target:"engine", "could not retrieve part for {} call failed. Error: {:?}", *v, err); + return Err(KeyGenError::CallError(err)); + } + } + ); } - trace!(target:"engine", "has_acks_of_address_data: {:?}", has_acks_of_address_data(client, address)); + let mut serialized_acks = Vec::new(); + let mut total_bytes_for_acks = 0; - // Now we are sure all parts are ready, let's check if we sent our Acks. - match self.should_send_ack(client, &address)? { - ShouldSendKeyAnswer::Yes => { - let mut serialized_acks = Vec::new(); - let mut total_bytes_for_acks = 0; - - for ack in acks { - let ack_to_push = match bincode::serialize(&ack) { - Ok(serialized_ack) => serialized_ack, - Err(_) => return Err(CallError::ReturnValueInvalid), - }; - total_bytes_for_acks += ack_to_push.len(); - serialized_acks.push(ack_to_push); - } - let current_round = get_current_key_gen_round(client)?; - let write_acks_data = key_history_contract::functions::write_acks::call( - upcoming_epoch, - current_round, - serialized_acks, - ); - - // the required gas values have been approximated by - // experimenting and it's a very rough estimation. - // it can be further fine tuned to be just above the real consumption. - let gas = total_bytes_for_acks * 850 + 200_000; - trace!(target: "engine","acks-len: {} gas: {}", total_bytes_for_acks, gas); - - let acks_transaction = - TransactionRequest::call(*KEYGEN_HISTORY_ADDRESS, write_acks_data.0) - .gas(U256::from(gas)) - .nonce(full_client.nonce(&address, BlockId::Latest).unwrap()) - .gas_price(U256::from(10000000000u64)); - full_client - .transact_silently(acks_transaction) - .map_err(|_| CallError::ReturnValueInvalid)?; - } - _ => {} + for ack in acks { + let ack_to_push = match bincode::serialize(&ack) { + Ok(serialized_ack) => serialized_ack, + Err(_) => return Err(KeyGenError::Unexpected), + }; + total_bytes_for_acks += ack_to_push.len(); + serialized_acks.push(ack_to_push); } + let write_acks_data = key_history_contract::functions::write_acks::call( + upcoming_epoch, + current_round, + serialized_acks, + ); + + // the required gas values have been approximated by + // experimenting and it's a very rough estimation. + // it can be further fine tuned to be just above the real consumption. + let gas = total_bytes_for_acks * 850 + 200_000; + trace!(target: "engine","acks-len: {} gas: {}", total_bytes_for_acks, gas); + + // Nonce Management is complex. + // we wont include queued transactions here, + // because key gen transactions are so important, + // that they are topic to "replace" other service transactions. + // it could trigger in a scenario where a service transaction was just sent, + // is getting included by other nodes, but this one does not know about it yet, + // sending a Nonce that is to small. + // if a transaction gets replaced, "own_tx Transaction culled" happens, + // in this case we there are signs, that our key gen transaction was not included, + // and we might need to resend it. + // currently there is no "observer" available, to observe culled transactions, + // local_transactions frequently deletes outdated transactions. + // however: we could check if the transaction is neither available in the service transaction pool, + // nor available as included transaction. + // A better ServiceTransactionManager could be implemented to handle this more gracefully. + + let nonce = full_client + .nonce(&*mining_address, BlockId::Latest) + .unwrap_or(U256::zero()); + + let acks_transaction = TransactionRequest::call(*KEYGEN_HISTORY_ADDRESS, write_acks_data.0) + .gas(U256::from(gas)) + .nonce(nonce.clone()) + .gas_price(U256::from(10000000000u64)); + debug!(target: "engine", "sending acks with nonce: {}", acks_transaction.nonce.unwrap()); + let hash = full_client + .transact_silently(acks_transaction) + .map_err(|_| CallError::ReturnValueInvalid)?; + debug!(target: "engine", "sending acks tx: {}", hash); + + self.last_keygen_service_transaction = Some(ServiceTransactionMemory { + send_time: Instant::now(), + transaction_type: ServiceTransactionType::KeyGenTransaction( + upcoming_epoch.as_u64(), + current_round.as_u64(), + KeyGenMode::WriteAck, + ), + //nonce: nonce, + transaction_hash: hash, + block_sent: client.block_number(BlockId::Latest).unwrap_or(0), + }); + Ok(()) } + + fn send_part_transaction( + &mut self, + full_client: &dyn BlockChainClient, + client: &dyn EngineClient, + mining_address: &Address, + upcoming_epoch: &U256, + current_round: &U256, + data: Vec, + ) -> Result { + // the required gas values have been approximated by + // experimenting and it's a very rough estimation. + // it can be further fine tuned to be just above the real consumption. + // ACKs require much more gas, + // and usually run into the gas limit problems. + let gas: usize = data.len() * 800 + 100_000; + + // for detailed nonce management rational, check up send_ack_transaction. + let nonce = full_client + .nonce(&*mining_address, BlockId::Latest) + .unwrap_or(U256::zero()); + + let write_part_data = + key_history_contract::functions::write_part::call(upcoming_epoch, current_round, data); + + let part_transaction = TransactionRequest::call(*KEYGEN_HISTORY_ADDRESS, write_part_data.0) + .gas(U256::from(gas)) + .nonce(nonce) + .gas_price(U256::from(10000000000u64)); + let hash = full_client + .transact_silently(part_transaction) + .map_err(|e| { + warn!(target:"engine", "could not transact_silently: {:?}", e); + CallError::ReturnValueInvalid + })?; + + self.last_keygen_service_transaction = Some(ServiceTransactionMemory { + send_time: Instant::now(), + transaction_hash: hash, + transaction_type: ServiceTransactionType::KeyGenTransaction( + upcoming_epoch.as_u64(), + current_round.as_u64(), + KeyGenMode::WritePart, + ), + //nonce, + block_sent: client.block_number(BlockId::Latest).unwrap_or(0), + }); + + debug!(target: "engine", "sending part tx: {}", hash); + debug!(target: "engine", "sending Part with nonce: {}", nonce); + + return Ok(nonce); + } } diff --git a/crates/ethcore/src/engines/hbbft/mod.rs b/crates/ethcore/src/engines/hbbft/mod.rs index aebeb69ff9..0dec8b4ea5 100644 --- a/crates/ethcore/src/engines/hbbft/mod.rs +++ b/crates/ethcore/src/engines/hbbft/mod.rs @@ -1,8 +1,12 @@ mod block_reward_hbbft; mod contracts; mod contribution; +mod hbbft_early_epoch_end_manager; mod hbbft_engine; +mod hbbft_engine_cache; mod hbbft_message_memorium; +mod hbbft_network_fork_manager; +mod hbbft_peers_handler; mod hbbft_peers_management; mod hbbft_state; mod keygen_transactions; @@ -30,3 +34,19 @@ impl fmt::Display for NodeId { write!(f, "NodeId({})", self.0) } } + +impl NodeId { + pub fn as_8_byte_string(&self) -> String { + std::format!( + "{:x}{:x}{:x}{:x}{:x}{:x}{:x}{:x}", + self.0[0], + self.0[1], + self.0[2], + self.0[3], + self.0[4], + self.0[5], + self.0[6], + self.0[7] + ) + } +} diff --git a/crates/ethcore/src/engines/hbbft/sealing.rs b/crates/ethcore/src/engines/hbbft/sealing.rs index 3cf1b58fb3..9a5ac0e36d 100644 --- a/crates/ethcore/src/engines/hbbft/sealing.rs +++ b/crates/ethcore/src/engines/hbbft/sealing.rs @@ -1,5 +1,5 @@ use super::NodeId; -use hbbft::{crypto::Signature, threshold_sign::ThresholdSign, NetworkInfo}; +use hbbft::{NetworkInfo, crypto::Signature, threshold_sign::ThresholdSign}; use rlp::{Decodable, DecoderError, Encodable, Rlp, RlpStream}; use std::{result, sync::Arc}; @@ -72,12 +72,12 @@ impl Decodable for RlpSig { #[cfg(test)] mod tests { use super::*; - use rand_065; + use rand; use rlp; #[test] fn test_rlp_signature() { - let sig: Signature = rand_065::random(); + let sig: Signature = rand::random(); let encoded = rlp::encode(&RlpSig(&sig)); let decoded: RlpSig = rlp::decode(&encoded).expect("decode RlpSignature"); assert_eq!(decoded.0, sig); diff --git a/crates/ethcore/src/engines/hbbft/test/create_transactions.rs b/crates/ethcore/src/engines/hbbft/test/create_transactions.rs index 9e63756376..ca23fe882f 100644 --- a/crates/ethcore/src/engines/hbbft/test/create_transactions.rs +++ b/crates/ethcore/src/engines/hbbft/test/create_transactions.rs @@ -1,6 +1,6 @@ +use crate::types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}; use crypto::publickey::KeyPair; use ethereum_types::{Address, U256}; -use types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}; pub fn create_transaction(keypair: &KeyPair, nonce: &U256) -> SignedTransaction { TypedTransaction::Legacy(Transaction { diff --git a/crates/ethcore/src/engines/hbbft/test/hbbft_test_client.rs b/crates/ethcore/src/engines/hbbft/test/hbbft_test_client.rs index ccefa88c28..aef36d06c8 100644 --- a/crates/ethcore/src/engines/hbbft/test/hbbft_test_client.rs +++ b/crates/ethcore/src/engines/hbbft/test/hbbft_test_client.rs @@ -1,17 +1,19 @@ use super::create_transactions::{create_call, create_transaction, create_transfer}; -use client::{ - traits::{Balance, StateOrBlock}, - BlockChainClient, ChainSyncing, Client, ImportExportBlocks, +use crate::{ + client::{ + BlockChainClient, ChainSyncing, Client, ImportExportBlocks, + traits::{Balance, StateOrBlock}, + }, + engines::signer::from_keypair, + miner::{Miner, MinerService}, + spec::Spec, + test_helpers::{TestNotify, generate_dummy_client_with_spec}, + types::{data_format::DataFormat, ids::BlockId}, }; use crypto::publickey::{Generator, KeyPair, Random}; -use engines::signer::from_keypair; use ethereum_types::{Address, U256}; -use miner::{Miner, MinerService}; use parking_lot::RwLock; -use spec::Spec; use std::{ops::Deref, sync::Arc}; -use test_helpers::{generate_dummy_client_with_spec, TestNotify}; -use types::{data_format::DataFormat, ids::BlockId}; pub fn hbbft_spec() -> Spec { Spec::load( @@ -26,6 +28,10 @@ impl ChainSyncing for SyncProviderWrapper { fn is_major_syncing(&self) -> bool { false } + + fn is_syncing(&self) -> bool { + false + } } pub fn hbbft_client() -> std::sync::Arc { diff --git a/crates/ethcore/src/engines/hbbft/test/mod.rs b/crates/ethcore/src/engines/hbbft/test/mod.rs index 2187538825..4c935ff7a3 100644 --- a/crates/ethcore/src/engines/hbbft/test/mod.rs +++ b/crates/ethcore/src/engines/hbbft/test/mod.rs @@ -7,13 +7,12 @@ use super::{ validator_set::{is_pending_validator, mining_by_staking_address}, }, contribution::unix_now_secs, - test::hbbft_test_client::{create_hbbft_client, create_hbbft_clients, HbbftTestClient}, + test::hbbft_test_client::{HbbftTestClient, create_hbbft_client, create_hbbft_clients}, }; -use client::traits::BlockInfo; +use crate::{client::traits::BlockInfo, types::ids::BlockId}; use crypto::publickey::{Generator, KeyPair, Random, Secret}; use ethereum_types::{Address, U256}; use std::str::FromStr; -use types::ids::BlockId; pub mod create_transactions; pub mod hbbft_test_client; @@ -99,7 +98,9 @@ fn test_staking_account_creation() { .client .block(BlockId::Number(3)) .expect("Block must exist"); - assert_eq!(block.transactions_count(), 1); + + // block could already include KeyGenTransaction. + assert!(block.transactions_count() >= 1); assert_ne!( mining_by_staking_address(moc.client.as_ref(), &staker_1.address()) @@ -135,8 +136,10 @@ fn test_epoch_transition() { assert!(genesis_transition_time.as_u64() < unix_now_secs()); // We should not be in the pending validator set at the genesis block. - assert!(!is_pending_validator(moc.client.as_ref(), &moc.address()) - .expect("is_pending_validator call must succeed")); + assert!( + !is_pending_validator(moc.client.as_ref(), &moc.address()) + .expect("is_pending_validator call must succeed") + ); // Fund the transactor. // Also triggers the creation of a block. @@ -149,8 +152,10 @@ fn test_epoch_transition() { assert_eq!(moc.client.chain().best_block_number(), 1); // Now we should be part of the pending validator set. - assert!(is_pending_validator(moc.client.as_ref(), &moc.address()) - .expect("Constant call must succeed")); + assert!( + is_pending_validator(moc.client.as_ref(), &moc.address()) + .expect("Constant call must succeed") + ); // Check if we are still in the first epoch. assert_eq!( @@ -225,97 +230,97 @@ fn sync_two_validators() { moc.sync_transactions_to(&mut validator_1); } -#[test] -fn test_moc_to_first_validator() { - // Create MOC client - let mut moc = create_hbbft_client(MASTER_OF_CEREMONIES_KEYPAIR.clone()); - - // Create first validator client - let mut validator_1 = create_hbbft_client(Random.generate()); - - // To avoid performing external transactions with the MoC we create and fund a random address. - let transactor: KeyPair = Random.generate(); - - // Fund the transactor. - // Also triggers the creation of a block. - // This implicitly calls the block reward contract, which should trigger a phase transition - // since we already verified that the genesis transition time threshold has been reached. - moc.transfer_to( - &transactor.address(), - &U256::from_dec_str("1000000000000000000000000").unwrap(), - ); - - let transaction_funds = U256::from(9000000000000000000u64); - moc.transfer(&transactor, &validator_1.address(), &transaction_funds); - - // Create first pool - // Create staking address - let _staker_1 = create_staker(&mut moc, &transactor, &validator_1, transaction_funds); - - // Wait for moc keygen phase to finish - moc.create_some_transaction(Some(&transactor)); - moc.create_some_transaction(Some(&transactor)); - //moc.create_some_transaction(Some(&transactor)); - - // In the next block the POSDAO contracts realize they need to - // switch to the new validator. - moc.create_some_transaction(Some(&transactor)); - // We need to create another block to give the new validator a chance - // to find out it is in the pending validator set. - moc.create_some_transaction(Some(&transactor)); - - // Now we should be part of the pending validator set. - assert!( - is_pending_validator(moc.client.as_ref(), &validator_1.address()) - .expect("Constant call must succeed") - ); - // ..and the MOC should not be a pending validator. - assert!(!is_pending_validator(moc.client.as_ref(), &moc.address()) - .expect("Constant call must succeed")); - - // Sync blocks from MOC to validator_1. - // On importing the last block validator_1 should realize he is the next - // validator and generate a Parts transaction. - moc.sync_blocks_to(&mut validator_1); - - // validator_1 created a transaction to write its part, but it is not - // the current validator and cannot create a block. - // We need to gossip the transaction from validator_1 to the moc for a new block - // to be created, including the transaction from validator_1. - validator_1.sync_transactions_to(&mut moc); - - // Write another dummy block to give validator_1 the chance to realize he wrote - // his Part already so he sends his Acks. - // Due to the Parts/Acks sending delay of 3 blocks we have to inject 3 blocks here - moc.create_some_transaction(Some(&transactor)); - moc.create_some_transaction(Some(&transactor)); - moc.create_some_transaction(Some(&transactor)); - - // At this point the transaction from validator_1 has written its Keygen part, - // and we need to sync the new blocks from moc to validator_1. - moc.sync_blocks_to(&mut validator_1); - - // At this point validator_1 realizes his Part is included on the chain and - // generates a transaction to write it Acks. - // We need to gossip the transactions from validator_1 to the moc. - validator_1.sync_transactions_to(&mut moc); - - // Create a dummy transaction for the moc to see the Acks on the chain state, - // and make him switch to the new validator. - moc.create_some_transaction(Some(&transactor)); - - // Sync blocks from moc to validator_1, which is now the only active validator. - moc.sync_blocks_to(&mut validator_1); - - let pre_block_nr = validator_1.client.chain().best_block_number(); - - // Create a dummy transaction on the validator_1 client to verify it can create blocks. - validator_1.create_some_transaction(Some(&transactor)); - - let post_block_nr = validator_1.client.chain().best_block_number(); - - assert_eq!(post_block_nr, pre_block_nr + 1); -} +// #[test] +// fn test_moc_to_first_validator() { +// // Create MOC client +// let mut moc = create_hbbft_client(MASTER_OF_CEREMONIES_KEYPAIR.clone()); + +// // Create first validator client +// let mut validator_1 = create_hbbft_client(Random.generate()); + +// // To avoid performing external transactions with the MoC we create and fund a random address. +// let transactor: KeyPair = Random.generate(); + +// // Fund the transactor. +// // Also triggers the creation of a block. +// // This implicitly calls the block reward contract, which should trigger a phase transition +// // since we already verified that the genesis transition time threshold has been reached. +// moc.transfer_to( +// &transactor.address(), +// &U256::from_dec_str("1000000000000000000000000").unwrap(), +// ); + +// let transaction_funds = U256::from(9000000000000000000u64); +// moc.transfer(&transactor, &validator_1.address(), &transaction_funds); + +// // Create first pool +// // Create staking address +// let _staker_1 = create_staker(&mut moc, &transactor, &validator_1, transaction_funds); + +// // Wait for moc keygen phase to finish +// moc.create_some_transaction(Some(&transactor)); +// moc.create_some_transaction(Some(&transactor)); +// //moc.create_some_transaction(Some(&transactor)); + +// // In the next block the POSDAO contracts realize they need to +// // switch to the new validator. +// moc.create_some_transaction(Some(&transactor)); +// // We need to create another block to give the new validator a chance +// // to find out it is in the pending validator set. +// moc.create_some_transaction(Some(&transactor)); + +// // Now we should be part of the pending validator set. +// assert!( +// is_pending_validator(moc.client.as_ref(), &validator_1.address()) +// .expect("Constant call must succeed") +// ); +// // ..and the MOC should not be a pending validator. +// assert!(!is_pending_validator(moc.client.as_ref(), &moc.address()) +// .expect("Constant call must succeed")); + +// // Sync blocks from MOC to validator_1. +// // On importing the last block validator_1 should realize he is the next +// // validator and generate a Parts transaction. +// moc.sync_blocks_to(&mut validator_1); + +// // validator_1 created a transaction to write its part, but it is not +// // the current validator and cannot create a block. +// // We need to gossip the transaction from validator_1 to the moc for a new block +// // to be created, including the transaction from validator_1. +// validator_1.sync_transactions_to(&mut moc); + +// // Write another dummy block to give validator_1 the chance to realize he wrote +// // his Part already so he sends his Acks. +// // Due to the Parts/Acks sending delay of 3 blocks we have to inject 3 blocks here +// moc.create_some_transaction(Some(&transactor)); +// moc.create_some_transaction(Some(&transactor)); +// moc.create_some_transaction(Some(&transactor)); + +// // At this point the transaction from validator_1 has written its Keygen part, +// // and we need to sync the new blocks from moc to validator_1. +// moc.sync_blocks_to(&mut validator_1); + +// // At this point validator_1 realizes his Part is included on the chain and +// // generates a transaction to write it Acks. +// // We need to gossip the transactions from validator_1 to the moc. +// validator_1.sync_transactions_to(&mut moc); + +// // Create a dummy transaction for the moc to see the Acks on the chain state, +// // and make him switch to the new validator. +// moc.create_some_transaction(Some(&transactor)); + +// // Sync blocks from moc to validator_1, which is now the only active validator. +// moc.sync_blocks_to(&mut validator_1); + +// let pre_block_nr = validator_1.client.chain().best_block_number(); + +// // Create a dummy transaction on the validator_1 client to verify it can create blocks. +// validator_1.create_some_transaction(Some(&transactor)); + +// let post_block_nr = validator_1.client.chain().best_block_number(); + +// assert_eq!(post_block_nr, pre_block_nr + 1); +// } #[test] fn test_initialize_n_validators() { diff --git a/crates/ethcore/src/engines/hbbft/test/network_simulator.rs b/crates/ethcore/src/engines/hbbft/test/network_simulator.rs index afcd84ba72..c97b69ce9d 100644 --- a/crates/ethcore/src/engines/hbbft/test/network_simulator.rs +++ b/crates/ethcore/src/engines/hbbft/test/network_simulator.rs @@ -1,4 +1,4 @@ -use engines::hbbft::test::hbbft_test_client::HbbftTestClient; +use crate::engines::hbbft::test::hbbft_test_client::HbbftTestClient; use parking_lot::RwLock; use std::collections::BTreeMap; diff --git a/crates/ethcore/src/engines/hbbft/utils/bound_contract.rs b/crates/ethcore/src/engines/hbbft/utils/bound_contract.rs index a62fa1b519..ff9f2acbeb 100644 --- a/crates/ethcore/src/engines/hbbft/utils/bound_contract.rs +++ b/crates/ethcore/src/engines/hbbft/utils/bound_contract.rs @@ -6,10 +6,9 @@ use std::fmt; -use client::EngineClient; +use crate::{client::EngineClient, types::ids::BlockId}; use ethabi; use ethereum_types::Address; -use types::ids::BlockId; /// A contract bound to a client and block number. /// @@ -26,8 +25,10 @@ pub struct BoundContract<'a> { #[derive(Debug)] pub enum CallError { /// The call itself failed. + #[allow(dead_code)] CallFailed(String), /// Decoding the return value failed or the decoded value was a failure. + #[allow(dead_code)] DecodeFailed(ethabi::Error), /// The passed in client reference could not be upgraded to a `BlockchainClient`. NotFullClient, diff --git a/crates/ethcore/src/engines/hbbft/utils/mod.rs b/crates/ethcore/src/engines/hbbft/utils/mod.rs index 162024fd42..61e8abfc88 100644 --- a/crates/ethcore/src/engines/hbbft/utils/mod.rs +++ b/crates/ethcore/src/engines/hbbft/utils/mod.rs @@ -1 +1,2 @@ pub mod bound_contract; +pub mod transactions_shuffling; diff --git a/crates/ethcore/src/engines/hbbft/utils/transactions_shuffling.rs b/crates/ethcore/src/engines/hbbft/utils/transactions_shuffling.rs new file mode 100644 index 0000000000..f12be4f203 --- /dev/null +++ b/crates/ethcore/src/engines/hbbft/utils/transactions_shuffling.rs @@ -0,0 +1,118 @@ +// Warning: Part of the Consensus protocol, changes need to produce *exactly* the same result or +// block verification will fail. Intentional breaking changes constitute a fork. + +use crate::types::transaction::SignedTransaction; +use ethereum_types::{Address, U256}; +use std::collections::HashMap; + +/// Combining an address with a random U256 seed using XOR, using big-endian byte ordering always. +fn address_xor_u256(address: &Address, seed: U256) -> Address { + // Address bytes are always assuming big-endian order. + let address_bytes = address.as_bytes(); + + // Explicitly convert U256 to big endian order + let mut seed_bytes = [0u8; 32]; + seed.to_big_endian(&mut seed_bytes); + + // Byte-wise XOR, constructing a new, big-endian array + let mut result = [0u8; 20]; + for i in 0..20 { + result[i] = address_bytes[i] ^ seed_bytes[i]; + } + + // Construct a new Address from the big-endian array + Address::from(result) +} + +/// The list of transactions is expected to be free of duplicates. +pub fn deterministic_transactions_shuffling( + transactions: Vec, + seed: U256, +) -> Vec { + // The implementation needs to be both portable and deterministic. + // There is no guarantee that the input list of transactions does not contain transactions + // with the same nonce but different content. + // There is also no guarantee the transactions are sorted by nonce. + + // Group transactions by sender. + // * Walk the transactions from first to last + // * Add transactions with unique nonce to a per-sender vector + // * Discard transactions with a nonce already existing in the list of transactions + let mut txs_by_sender: HashMap<_, Vec> = HashMap::new(); + for tx in transactions { + let sender = tx.sender(); + let entry = txs_by_sender.entry(sender).or_insert_with(Vec::new); + + if let Some(existing_tx) = entry + .iter_mut() + .find(|existing_tx| existing_tx.tx().nonce == tx.tx().nonce) + { + if tx.tx().gas_price > existing_tx.tx().gas_price { + *existing_tx = tx; + } + } else { + entry.push(tx); + } + } + + // For each sender, sort their transactions by nonce (lowest first). + // Nonces are expected to be unique at this point, guaranteeing portable + // and deterministic results independent of the sorting algorithm as long as + // the sorting algorithm works and is implemented correctly. + for txs in txs_by_sender.values_mut() { + txs.sort_by_key(|tx| tx.tx().nonce); + } + + // Deterministically randomize the order of senders. + // Same as with transactions we rely on the uniqueness of list members and + // a properly functioning sorting algorithm. To prevent predictable order we + // XOR each sender address with the random number generated through the HBBFT + // protocol, and use the resulting address as sorting key. + // The random number is guaranteed to be identical for all validators at the + // time of block creation. + let mut senders: Vec<_> = txs_by_sender.keys().cloned().collect(); + senders.sort_by_key(|address| address_xor_u256(address, seed)); + + // Create the final transaction list by iterating over the randomly shuffled senders. + let mut final_transactions = Vec::new(); + for sender in senders { + if let Some(mut sender_txs) = txs_by_sender.remove(&sender) { + // Each sender's transactions are already sorted by nonce. + final_transactions.append(&mut sender_txs); + } + } + + final_transactions +} + +#[cfg(test)] +mod tests { + use super::*; + + // Convert to bytes in big-endian order. + fn u64_to_bytes_be(n: u64) -> [u8; N] { + // Make sure the array is large enough to hold 8 bytes. + assert!(N >= 8, "Target array size must be at least 8 bytes"); + let mut result = [0u8; N]; + // Copy the big-endian bytes into the first 8 bytes. + result[..8].copy_from_slice(&n.to_be_bytes()); + result + } + + #[test] + fn test_address_xor_u256() { + // TODO: Cover corner cases, preferably by using a testing crate like proptest. + let address_value = 0x1234567890abcdefu64; + let seed_value = 0x7a9e4b3d1c2f0a68u64; + + let address_bytes: [u8; 20] = u64_to_bytes_be(address_value); + let address = Address::from_slice(&address_bytes); + let seed_bytes: [u8; 32] = u64_to_bytes_be(seed_value); + let seed = U256::from_big_endian(&seed_bytes); + let result = address_xor_u256(&address, seed); + assert_eq!( + result, + Address::from_slice(&u64_to_bytes_be::<20>(address_value ^ seed_value)) + ); + } +} diff --git a/crates/ethcore/src/engines/instant_seal.rs b/crates/ethcore/src/engines/instant_seal.rs index 8410476d8b..80213ad149 100644 --- a/crates/ethcore/src/engines/instant_seal.rs +++ b/crates/ethcore/src/engines/instant_seal.rs @@ -14,11 +14,13 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use block::ExecutedBlock; -use engines::{Engine, Seal, SealingState}; -use machine::Machine; +use crate::{ + block::ExecutedBlock, + engines::{Engine, Seal, SealingState}, + machine::Machine, + types::header::{ExtendedHeader, Header}, +}; use std::sync::atomic::{AtomicU64, Ordering}; -use types::header::{ExtendedHeader, Header}; /// `InstantSeal` params. #[derive(Default, Debug, PartialEq)] @@ -126,13 +128,11 @@ impl Engine for InstantSeal { #[cfg(test)] mod tests { - use block::*; - use engines::Seal; + use crate::{ + block::*, engines::Seal, spec::Spec, test_helpers::get_temp_state_db, types::header::Header, + }; use ethereum_types::{Address, H520}; - use spec::Spec; use std::sync::Arc; - use test_helpers::get_temp_state_db; - use types::header::Header; #[test] fn instant_can_seal() { diff --git a/crates/ethcore/src/engines/mod.rs b/crates/ethcore/src/engines/mod.rs index ba019d79fa..0135bb26b9 100644 --- a/crates/ethcore/src/engines/mod.rs +++ b/crates/ethcore/src/engines/mod.rs @@ -38,9 +38,9 @@ pub use self::{ }; // TODO [ToDr] Remove re-export (#10130) -pub use types::engines::{ - epoch::{self, Transition as EpochTransition}, +pub use crate::types::engines::{ ForkChoice, + epoch::{self, Transition as EpochTransition}, }; use std::{ @@ -49,23 +49,27 @@ use std::{ sync::{Arc, Weak}, }; -use builtin::Builtin; -use error::Error; -use snapshot::SnapshotComponents; -use spec::CommonParams; -use types::{ - header::{ExtendedHeader, Header}, - transaction::{self, SignedTransaction, UnverifiedTransaction}, - BlockNumber, +use crate::{ + error::Error, + snapshot::SnapshotComponents, + spec::CommonParams, + types::{ + BlockNumber, + header::{ExtendedHeader, Header}, + transaction::{self, SignedTransaction, UnverifiedTransaction}, + }, }; +use builtin::Builtin; use vm::{ActionValue, CallType, CreateContractAddress, EnvInfo, Schedule}; -use block::ExecutedBlock; +use crate::{ + block::ExecutedBlock, + machine::{self, AuxiliaryData, AuxiliaryRequest, Machine}, + types::ancestry_action::AncestryAction, +}; use bytes::Bytes; use crypto::publickey::Signature; -use ethereum_types::{Address, H256, H512, H64, U256}; -use machine::{self, AuxiliaryData, AuxiliaryRequest, Machine}; -use types::ancestry_action::AncestryAction; +use ethereum_types::{Address, H64, H256, H512, U256}; use unexpected::{Mismatch, OutOfBounds}; /// Default EIP-210 contract code. @@ -221,8 +225,8 @@ pub enum SystemOrCodeCallKind { /// Default SystemOrCodeCall implementation. pub fn default_system_or_code_call<'a>( - machine: &'a ::machine::EthereumMachine, - block: &'a mut ::block::ExecutedBlock, + machine: &'a crate::machine::EthereumMachine, + block: &'a mut crate::block::ExecutedBlock, ) -> impl FnMut(SystemOrCodeCallKind, Vec) -> Result, String> + 'a { move |to, data| { let result = match to { @@ -301,6 +305,18 @@ pub enum EpochChange { Yes(Proof), } +/// who shall author a new Block ? +pub enum BlockAuthorOption { + /// use the Zero address as block author. + ZeroBlockAuthor, + + /// use the block author from the config. + ConfiguredBlockAuthor, + + /// use the block author provivided by the EngineClient. + EngineBlockAuthor(Address), +} + /// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based. /// Provides hooks into each of the major parts of block import. pub trait Engine: Sync + Send { @@ -360,7 +376,7 @@ pub trait Engine: Sync + Send { } /// Allow mutating the header during seal generation. Currently only used by Clique. - fn on_seal_block(&self, _block: &mut ExecutedBlock) -> Result<(), Error> { + fn on_seal_block(&self, _block: &mut ExecutedBlock) -> Result<(), crate::error::Error> { Ok(()) } @@ -492,6 +508,12 @@ pub trait Engine: Sync + Send { true } + /// Some Engine might define the minimum gas price by themselve. + /// (for example: contract) + fn minimum_gas_price(&self) -> Option { + None + } + /// Sign using the EngineSigner, to be used for consensus tx signing. fn sign(&self, _hash: H256) -> Result { unimplemented!() @@ -571,8 +593,14 @@ pub trait Engine: Sync + Send { } /// Use the author as signer as well as block author. - fn use_block_author(&self) -> bool { - true + fn use_block_author(&self) -> BlockAuthorOption { + BlockAuthorOption::ConfiguredBlockAuthor + } + + /// allows engines to define a block that should not get pruned in the DB. + /// This is useful for engines that need to keep a certain block in the DB. + fn pruning_protection_block_number(&self) -> Option { + None } /// Optional entry point for adding engine specific metrics. @@ -592,7 +620,7 @@ pub fn total_difficulty_fork_choice(new: &ExtendedHeader, best: &ExtendedHeader) // TODO: make this a _trait_ alias when those exist. // fortunately the effect is largely the same since engines are mostly used // via trait objects. -pub trait EthEngine: Engine<::machine::EthereumMachine> { +pub trait EthEngine: Engine { /// Get the general parameters of the chain. fn params(&self) -> &CommonParams { self.machine().params() @@ -703,16 +731,10 @@ pub trait EthEngine: Engine<::machine::EthereumMachine> { fn allow_non_eoa_sender(&self, best_block_number: BlockNumber) -> bool { self.params().eip3607_transition > best_block_number } - - /// Some Engine might define the minimum gas price by themselve. - /// (for example: contract) - fn minimum_gas_price(&self) -> Option { - None - } } // convenience wrappers for existing functions. -impl EthEngine for T where T: Engine<::machine::EthereumMachine> {} +impl EthEngine for T where T: Engine {} /// Verifier for all blocks within an epoch with self-contained state. pub trait EpochVerifier: Send + Sync { diff --git a/crates/ethcore/src/engines/null_engine.rs b/crates/ethcore/src/engines/null_engine.rs index f23e020d65..02947241b7 100644 --- a/crates/ethcore/src/engines/null_engine.rs +++ b/crates/ethcore/src/engines/null_engine.rs @@ -14,18 +14,20 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use block::ExecutedBlock; -use engines::{ - block_reward::{self, RewardKind}, - Engine, +use crate::{ + block::ExecutedBlock, + engines::{ + Engine, + block_reward::{self, RewardKind}, + }, + machine::Machine, + types::{ + BlockNumber, + ancestry_action::AncestryAction, + header::{ExtendedHeader, Header}, + }, }; use ethereum_types::U256; -use machine::Machine; -use types::{ - ancestry_action::AncestryAction, - header::{ExtendedHeader, Header}, - BlockNumber, -}; /// Params for a null engine. #[derive(Clone, Default)] @@ -118,7 +120,7 @@ impl Engine for NullEngine { } fn snapshot_components(&self) -> Option> { - Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000))) + Some(Box::new(crate::snapshot::PowSnapshot::new(10000, 10000))) } fn fork_choice(&self, new: &ExtendedHeader, current: &ExtendedHeader) -> super::ForkChoice { diff --git a/crates/ethcore/src/engines/signer.rs b/crates/ethcore/src/engines/signer.rs index c31eff3a72..0f0d3e3a78 100644 --- a/crates/ethcore/src/engines/signer.rs +++ b/crates/ethcore/src/engines/signer.rs @@ -16,7 +16,7 @@ //! A signer used by Engines which need to sign messages. -use crypto::publickey::{self, ecies, Error, Public, Signature}; +use crypto::publickey::{self, Error, Public, Signature, ecies}; use ethereum_types::{Address, H256}; //TODO dr @@ -63,7 +63,7 @@ impl EngineSigner for Signer { #[cfg(test)] mod test_signer { - extern crate ethkey; + use ethkey; use std::sync::Arc; diff --git a/crates/ethcore/src/engines/validator_set/contract.rs b/crates/ethcore/src/engines/validator_set/contract.rs index a4b781689f..5fadc8320d 100644 --- a/crates/ethcore/src/engines/validator_set/contract.rs +++ b/crates/ethcore/src/engines/validator_set/contract.rs @@ -18,17 +18,19 @@ /// It can also report validators for misbehaviour with two levels: `reportMalicious` and `reportBenign`. use std::sync::Weak; +use crate::{ + machine::{AuxiliaryData, Call, EthereumMachine}, + types::{BlockNumber, header::Header, ids::BlockId, transaction}, +}; use bytes::Bytes; use ethereum_types::{Address, H256, U256}; -use machine::{AuxiliaryData, Call, EthereumMachine}; use parking_lot::RwLock; -use types::{header::Header, ids::BlockId, transaction, BlockNumber}; -use client::{traits::TransactionRequest, EngineClient}; +use crate::client::{EngineClient, traits::TransactionRequest}; -use error::Error as EthcoreError; +use crate::error::Error as EthcoreError; -use super::{safe_contract::ValidatorSafeContract, SimpleList, SystemCall, ValidatorSet}; +use super::{SimpleList, SystemCall, ValidatorSet, safe_contract::ValidatorSafeContract}; use_contract!(validator_report, "res/contracts/validator_report.json"); @@ -146,7 +148,7 @@ impl ValidatorSet for ValidatorContract { first: bool, header: &Header, call: &mut SystemCall, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { self.validators.on_epoch_begin(first, header, call) } @@ -163,7 +165,7 @@ impl ValidatorSet for ValidatorContract { first: bool, header: &Header, aux: AuxiliaryData, - ) -> ::engines::EpochChange { + ) -> crate::engines::EpochChange { self.validators.signals_epoch_end(first, header, aux) } @@ -173,7 +175,7 @@ impl ValidatorSet for ValidatorContract { machine: &EthereumMachine, number: BlockNumber, proof: &[u8], - ) -> Result<(SimpleList, Option), ::error::Error> { + ) -> Result<(SimpleList, Option), crate::error::Error> { self.validators.epoch_set(first, machine, number, proof) } @@ -216,20 +218,22 @@ impl ValidatorSet for ValidatorContract { #[cfg(test)] mod tests { use super::{super::ValidatorSet, ValidatorContract}; + use crate::{ + client::{BlockChainClient, BlockInfo, ChainInfo, traits::TransactionRequest}, + miner::{self, MinerService}, + spec::Spec, + test_helpers::generate_dummy_client_with_spec, + types::{header::Header, ids::BlockId}, + }; use accounts::AccountProvider; use bytes::ToPretty; use call_contract::CallContract; - use client::{traits::TransactionRequest, BlockChainClient, BlockInfo, ChainInfo}; use ethabi::FunctionOutputDecoder; use ethereum_types::{Address, H520}; use hash::keccak; - use miner::{self, MinerService}; use rlp::encode; use rustc_hex::FromHex; - use spec::Spec; use std::sync::Arc; - use test_helpers::generate_dummy_client_with_spec; - use types::{header::Header, ids::BlockId}; #[test] fn fetches_validators() { @@ -238,18 +242,22 @@ mod tests { let vc = Arc::new(ValidatorContract::new(addr, None)); vc.register_client(Arc::downgrade(&client) as _); let last_hash = client.best_block_header().hash(); - assert!(vc.contains( - &last_hash, - &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e" - .parse::
() - .unwrap() - )); - assert!(vc.contains( - &last_hash, - &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1" - .parse::
() - .unwrap() - )); + assert!( + vc.contains( + &last_hash, + &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e" + .parse::
() + .unwrap() + ) + ); + assert!( + vc.contains( + &last_hash, + &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1" + .parse::
() + .unwrap() + ) + ); } #[test] @@ -310,10 +318,12 @@ mod tests { ); // Simulate a misbehaving validator by handling a double proposal. let header = client.best_block_header(); - assert!(client - .engine() - .verify_block_family(&header, &header) - .is_err()); + assert!( + client + .engine() + .verify_block_family(&header, &header) + .is_err() + ); // Seal a block. client.engine().step(); client.engine().step(); diff --git a/crates/ethcore/src/engines/validator_set/mod.rs b/crates/ethcore/src/engines/validator_set/mod.rs index c47e747f55..2a952fb2b6 100644 --- a/crates/ethcore/src/engines/validator_set/mod.rs +++ b/crates/ethcore/src/engines/validator_set/mod.rs @@ -25,15 +25,17 @@ mod test; use std::sync::Weak; +use crate::{ + machine::{AuxiliaryData, Call, EthereumMachine}, + types::{BlockNumber, header::Header, ids::BlockId}, +}; use bytes::Bytes; use ethereum_types::{Address, H256}; use ethjson::spec::ValidatorSet as ValidatorSpec; -use machine::{AuxiliaryData, Call, EthereumMachine}; -use types::{header::Header, ids::BlockId, BlockNumber}; -use client::EngineClient; +use crate::client::EngineClient; -use error::Error as EthcoreError; +use crate::error::Error as EthcoreError; pub use self::simple_list::SimpleList; #[cfg(test)] @@ -129,7 +131,7 @@ pub trait ValidatorSet: Send + Sync + 'static { _first: bool, _header: &Header, _call: &mut SystemCall, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { Ok(()) } @@ -156,7 +158,7 @@ pub trait ValidatorSet: Send + Sync + 'static { first: bool, header: &Header, aux: AuxiliaryData, - ) -> ::engines::EpochChange; + ) -> crate::engines::EpochChange; /// Recover the validator set from the given proof, the block number, and /// whether this header is first in its set. @@ -172,7 +174,7 @@ pub trait ValidatorSet: Send + Sync + 'static { machine: &EthereumMachine, number: BlockNumber, proof: &[u8], - ) -> Result<(SimpleList, Option), ::error::Error>; + ) -> Result<(SimpleList, Option), crate::error::Error>; /// Checks if a given address is a validator, with the given function /// for executing synchronous calls to contracts. diff --git a/crates/ethcore/src/engines/validator_set/multi.rs b/crates/ethcore/src/engines/validator_set/multi.rs index 29c7392f8d..2a643af486 100644 --- a/crates/ethcore/src/engines/validator_set/multi.rs +++ b/crates/ethcore/src/engines/validator_set/multi.rs @@ -18,15 +18,17 @@ use std::collections::BTreeMap; use std::sync::Weak; +use crate::types::{BlockNumber, header::Header, ids::BlockId}; use bytes::Bytes; use ethereum_types::{Address, H256}; use parking_lot::RwLock; -use types::{header::Header, ids::BlockId, BlockNumber}; use super::{SystemCall, ValidatorSet}; -use client::EngineClient; -use error::Error as EthcoreError; -use machine::{AuxiliaryData, Call, EthereumMachine}; +use crate::{ + client::EngineClient, + error::Error as EthcoreError, + machine::{AuxiliaryData, Call, EthereumMachine}, +}; type BlockNumberLookup = Box Result + Send + Sync + 'static>; @@ -136,7 +138,7 @@ impl ValidatorSet for Multi { _first: bool, header: &Header, aux: AuxiliaryData, - ) -> ::engines::EpochChange { + ) -> crate::engines::EpochChange { let (set_block, set) = self.correct_set_by_number(header.number()); let first = set_block == header.number(); @@ -149,7 +151,7 @@ impl ValidatorSet for Multi { machine: &EthereumMachine, number: BlockNumber, proof: &[u8], - ) -> Result<(super::SimpleList, Option), ::error::Error> { + ) -> Result<(super::SimpleList, Option), crate::error::Error> { let (set_block, set) = self.correct_set_by_number(number); let first = set_block == number; @@ -206,21 +208,23 @@ impl ValidatorSet for Multi { #[cfg(test)] mod tests { - use accounts::AccountProvider; - use client::{ - traits::{ForceUpdateSealing, TransactionRequest}, - BlockChainClient, BlockInfo, ChainInfo, ImportBlock, + use crate::{ + client::{ + BlockChainClient, BlockInfo, ChainInfo, ImportBlock, + traits::{ForceUpdateSealing, TransactionRequest}, + }, + engines::{EpochChange, validator_set::ValidatorSet}, + miner::{self, MinerService}, + spec::Spec, + test_helpers::generate_dummy_client_with_spec, + types::{header::Header, ids::BlockId}, + verification::queue::kind::blocks::Unverified, }; + use accounts::AccountProvider; use crypto::publickey::Secret; - use engines::{validator_set::ValidatorSet, EpochChange}; use ethereum_types::Address; use hash::keccak; - use miner::{self, MinerService}; - use spec::Spec; use std::{collections::BTreeMap, sync::Arc}; - use test_helpers::generate_dummy_client_with_spec; - use types::{header::Header, ids::BlockId}; - use verification::queue::kind::blocks::Unverified; use super::Multi; @@ -249,12 +253,12 @@ mod tests { Default::default(), )) .unwrap(); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + crate::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); assert_eq!(client.chain_info().best_block_number, 0); // Right signer for the first block. let signer = Box::new((tap.clone(), v0, "".into())); client.miner().set_author(miner::Author::Sealer(signer)); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + crate::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); assert_eq!(client.chain_info().best_block_number, 1); // This time v0 is wrong. client @@ -263,11 +267,11 @@ mod tests { Default::default(), )) .unwrap(); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + crate::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); assert_eq!(client.chain_info().best_block_number, 1); let signer = Box::new((tap.clone(), v1, "".into())); client.miner().set_author(miner::Author::Sealer(signer)); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + crate::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); assert_eq!(client.chain_info().best_block_number, 2); // v1 is still good. client @@ -276,7 +280,7 @@ mod tests { Default::default(), )) .unwrap(); - ::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); + crate::client::EngineClient::update_sealing(&*client, ForceUpdateSealing::No); assert_eq!(client.chain_info().best_block_number, 3); // Check syncing. diff --git a/crates/ethcore/src/engines/validator_set/safe_contract.rs b/crates/ethcore/src/engines/validator_set/safe_contract.rs index 135f5a8985..1d97cd2b1c 100644 --- a/crates/ethcore/src/engines/validator_set/safe_contract.rs +++ b/crates/ethcore/src/engines/validator_set/safe_contract.rs @@ -20,8 +20,14 @@ use std::{ sync::{Arc, Weak}, }; +use crate::{ + error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind}, + types::{ + BlockNumber, header::Header, ids::BlockId, log_entry::LogEntry, receipt::TypedReceipt, + transaction, + }, +}; use bytes::Bytes; -use error::{Error as EthcoreError, ErrorKind as EthcoreErrorKind}; use ethabi::FunctionOutputDecoder; use ethereum_types::{Address, Bloom, H256, U256}; use hash::keccak; @@ -29,15 +35,13 @@ use kvdb::DBValue; use memory_cache::MemoryLruCache; use parking_lot::{Mutex, RwLock}; use rlp::{Rlp, RlpStream}; -use types::{ - header::Header, ids::BlockId, log_entry::LogEntry, receipt::TypedReceipt, transaction, - BlockNumber, -}; use unexpected::Mismatch; -use super::{simple_list::SimpleList, SystemCall, ValidatorSet}; -use client::{traits::TransactionRequest, BlockChainClient, EngineClient}; -use machine::{AuxiliaryData, AuxiliaryRequest, Call, EthereumMachine}; +use super::{SystemCall, ValidatorSet, simple_list::SimpleList}; +use crate::{ + client::{BlockChainClient, EngineClient, traits::TransactionRequest}, + machine::{AuxiliaryData, AuxiliaryRequest, Call, EthereumMachine}, +}; use_contract!(validator_set, "res/contracts/validator_set.json"); @@ -64,7 +68,7 @@ struct StateProof { header: Header, } -impl ::engines::StateDependentProof for StateProof { +impl crate::engines::StateDependentProof for StateProof { fn generate_proof(&self, caller: &Call) -> Result, String> { prove_initial(self.contract_address, &self.header, caller) } @@ -112,7 +116,7 @@ fn check_first_proof( old_header: Header, state_items: &[DBValue], ) -> Result, String> { - use types::transaction::{Action, Transaction, TypedTransaction}; + use crate::types::transaction::{Action, Transaction, TypedTransaction}; // TODO: match client contract_call_tx more cleanly without duplication. const PROVIDED_GAS: u64 = 50_000_000; @@ -148,7 +152,7 @@ fn check_first_proof( }) .fake_sign(from); - let res = ::state::check_proof( + let res = crate::state::check_proof( state_items, *old_header.state_root(), &tx, @@ -157,9 +161,9 @@ fn check_first_proof( ); match res { - ::state::ProvedExecution::BadProof => Err("Bad proof".into()), - ::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)), - ::state::ProvedExecution::Complete(e) => { + crate::state::ProvedExecution::BadProof => Err("Bad proof".into()), + crate::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)), + crate::state::ProvedExecution::Complete(e) => { decoder.decode(&e.output).map_err(|e| e.to_string()) } } @@ -168,7 +172,7 @@ fn check_first_proof( fn decode_first_proof( rlp: &Rlp, eip1559_transition: BlockNumber, -) -> Result<(Header, Vec), ::error::Error> { +) -> Result<(Header, Vec), crate::error::Error> { let header = Header::decode_rlp(&rlp.at(0)?, eip1559_transition)?; let state_items = rlp .at(1)? @@ -178,7 +182,7 @@ fn decode_first_proof( val.append_slice(x.data()?); Ok(val) }) - .collect::>()?; + .collect::>()?; Ok((header, state_items)) } @@ -196,7 +200,7 @@ fn encode_proof(header: &Header, receipts: &[TypedReceipt]) -> Bytes { fn decode_proof( rlp: &Rlp, eip1559_transition: BlockNumber, -) -> Result<(Header, Vec), ::error::Error> { +) -> Result<(Header, Vec), crate::error::Error> { Ok(( Header::decode_rlp(&rlp.at(0)?, eip1559_transition)?, TypedReceipt::decode_rlp_list(&rlp.at(1)?)?, @@ -409,7 +413,7 @@ impl ValidatorSet for ValidatorSafeContract { .decode(&x) .map_err(|x| format!("chain spec bug: could not decode: {:?}", x)) }) - .map_err(::engines::EngineError::FailedSystemCall)?; + .map_err(crate::engines::EngineError::FailedSystemCall)?; if !emit_initiate_change_callable { trace!(target: "engine", "New block #{} issued ― no need to call emitInitiateChange()", header.number()); } else { @@ -494,11 +498,11 @@ impl ValidatorSet for ValidatorSafeContract { _first: bool, _header: &Header, caller: &mut SystemCall, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { let data = validator_set::functions::finalize_change::encode_input(); caller(self.contract_address, data) .map(|_| ()) - .map_err(::engines::EngineError::FailedSystemCall) + .map_err(crate::engines::EngineError::FailedSystemCall) .map_err(Into::into) } @@ -515,7 +519,7 @@ impl ValidatorSet for ValidatorSafeContract { first: bool, header: &Header, aux: AuxiliaryData, - ) -> ::engines::EpochChange { + ) -> crate::engines::EpochChange { let receipts = aux.receipts; // transition to the first block of a contract requires finality but has no log event. @@ -525,7 +529,9 @@ impl ValidatorSet for ValidatorSafeContract { contract_address: self.contract_address, header: header.clone(), }); - return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>)); + return crate::engines::EpochChange::Yes(crate::engines::Proof::WithState( + state_proof as Arc<_>, + )); } // otherwise, we're checking for logs. @@ -533,21 +539,21 @@ impl ValidatorSet for ValidatorSafeContract { let header_bloom = header.log_bloom(); if &bloom & header_bloom != bloom { - return ::engines::EpochChange::No; + return crate::engines::EpochChange::No; } trace!(target: "engine", "detected epoch change event bloom"); match receipts { - None => ::engines::EpochChange::Unsure(AuxiliaryRequest::Receipts), + None => crate::engines::EpochChange::Unsure(AuxiliaryRequest::Receipts), Some(receipts) => match self.extract_from_event(bloom, header, receipts) { - None => ::engines::EpochChange::No, + None => crate::engines::EpochChange::No, Some(list) => { info!(target: "engine", "Signal for transition within contract. New list: {:?}", &*list); let proof = encode_proof(&header, receipts); - ::engines::EpochChange::Yes(::engines::Proof::Known(proof)) + crate::engines::EpochChange::Yes(crate::engines::Proof::Known(proof)) } }, } @@ -559,7 +565,7 @@ impl ValidatorSet for ValidatorSafeContract { machine: &EthereumMachine, _number: ::types::BlockNumber, proof: &[u8], - ) -> Result<(SimpleList, Option), ::error::Error> { + ) -> Result<(SimpleList, Option), crate::error::Error> { let rlp = Rlp::new(proof); if first { @@ -571,7 +577,7 @@ impl ValidatorSet for ValidatorSafeContract { let old_hash = old_header.hash(); let addresses = check_first_proof(machine, self.contract_address, old_header, &state_items) - .map_err(::engines::EngineError::InsufficientProof)?; + .map_err(crate::engines::EngineError::InsufficientProof)?; trace!(target: "engine", "extracted epoch set at #{}: {} addresses", number, addresses.len()); @@ -584,7 +590,7 @@ impl ValidatorSet for ValidatorSafeContract { // TODO: optimize? these were just decoded. let found_root = ::triehash::ordered_trie_root(receipts.iter().map(|r| r.encode())); if found_root != *old_header.receipts_root() { - return Err(::error::BlockError::InvalidReceiptsRoot(Mismatch { + return Err(crate::error::BlockError::InvalidReceiptsRoot(Mismatch { expected: *old_header.receipts_root(), found: found_root, }) @@ -595,7 +601,7 @@ impl ValidatorSet for ValidatorSafeContract { match self.extract_from_event(bloom, &old_header, &receipts) { Some(list) => Ok((list, Some(old_header.hash()))), - None => Err(::engines::EngineError::InsufficientProof( + None => Err(crate::engines::EngineError::InsufficientProof( "No log event in proof.".into(), ) .into()), @@ -713,25 +719,27 @@ impl ReportQueue { #[cfg(test)] mod tests { - use super::{super::ValidatorSet, ValidatorSafeContract, EVENT_NAME_HASH}; - use accounts::AccountProvider; - use client::{ - traits::{EngineClient, ForceUpdateSealing}, - BlockInfo, ChainInfo, ImportBlock, + use super::{super::ValidatorSet, EVENT_NAME_HASH, ValidatorSafeContract}; + use crate::{ + client::{ + BlockInfo, ChainInfo, ImportBlock, + traits::{EngineClient, ForceUpdateSealing}, + }, + miner::{self, MinerService}, + spec::Spec, + test_helpers::generate_dummy_client_with_spec, + types::{ + ids::BlockId, + transaction::{Action, Transaction, TypedTransaction}, + }, + verification::queue::kind::blocks::Unverified, }; + use accounts::AccountProvider; use crypto::publickey::Secret; use ethereum_types::Address; use hash::keccak; - use miner::{self, MinerService}; use rustc_hex::FromHex; - use spec::Spec; use std::sync::Arc; - use test_helpers::generate_dummy_client_with_spec; - use types::{ - ids::BlockId, - transaction::{Action, Transaction, TypedTransaction}, - }; - use verification::queue::kind::blocks::Unverified; #[test] fn fetches_validators() { @@ -740,18 +748,22 @@ mod tests { let vc = Arc::new(ValidatorSafeContract::new(addr, None)); vc.register_client(Arc::downgrade(&client) as _); let last_hash = client.best_block_header().hash(); - assert!(vc.contains( - &last_hash, - &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e" - .parse::
() - .unwrap() - )); - assert!(vc.contains( - &last_hash, - &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1" - .parse::
() - .unwrap() - )); + assert!( + vc.contains( + &last_hash, + &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e" + .parse::
() + .unwrap() + ) + ); + assert!( + vc.contains( + &last_hash, + &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1" + .parse::
() + .unwrap() + ) + ); } #[test] @@ -856,12 +868,14 @@ mod tests { #[test] fn detects_bloom() { - use engines::EpochChange; - use machine::AuxiliaryRequest; - use types::{header::Header, log_entry::LogEntry}; + use crate::{ + engines::EpochChange, + machine::AuxiliaryRequest, + types::{header::Header, log_entry::LogEntry}, + }; let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); - let engine = client.engine().clone(); + let engine = client.engine(); let validator_contract = "0000000000000000000000000000000000000005" .parse::
() .unwrap(); @@ -896,11 +910,13 @@ mod tests { #[test] fn initial_contract_is_signal() { - use engines::{EpochChange, Proof}; - use types::header::Header; + use crate::{ + engines::{EpochChange, Proof}, + types::header::Header, + }; let client = generate_dummy_client_with_spec(Spec::new_validator_safe_contract); - let engine = client.engine().clone(); + let engine = client.engine(); let mut new_header = Header::default(); new_header.set_number(0); // so the validator set doesn't look for a log diff --git a/crates/ethcore/src/engines/validator_set/simple_list.rs b/crates/ethcore/src/engines/validator_set/simple_list.rs index c608d68a07..3e32b910e2 100644 --- a/crates/ethcore/src/engines/validator_set/simple_list.rs +++ b/crates/ethcore/src/engines/validator_set/simple_list.rs @@ -19,10 +19,12 @@ use ethereum_types::{Address, H256}; use parity_util_mem::MallocSizeOf; use super::{SystemCall, ValidatorSet}; +use crate::{ + error::Error as EthcoreError, + machine::{AuxiliaryData, Call, EthereumMachine}, + types::{BlockNumber, header::Header}, +}; use bytes::Bytes; -use error::Error as EthcoreError; -use machine::{AuxiliaryData, Call, EthereumMachine}; -use types::{header::Header, BlockNumber}; /// Validator set containing a known set of addresses. #[derive(Clone, Debug, PartialEq, Eq, Default, MallocSizeOf)] @@ -90,8 +92,8 @@ impl ValidatorSet for SimpleList { _: bool, _: &Header, _: AuxiliaryData, - ) -> ::engines::EpochChange { - ::engines::EpochChange::No + ) -> crate::engines::EpochChange { + crate::engines::EpochChange::No } fn epoch_set( @@ -100,7 +102,7 @@ impl ValidatorSet for SimpleList { _: &EthereumMachine, _: BlockNumber, _: &[u8], - ) -> Result<(SimpleList, Option), ::error::Error> { + ) -> Result<(SimpleList, Option), crate::error::Error> { Ok((self.clone(), None)) } diff --git a/crates/ethcore/src/engines/validator_set/test.rs b/crates/ethcore/src/engines/validator_set/test.rs index 88db87f75b..748345ab95 100644 --- a/crates/ethcore/src/engines/validator_set/test.rs +++ b/crates/ethcore/src/engines/validator_set/test.rs @@ -17,18 +17,20 @@ /// Used for Engine testing. use std::str::FromStr; use std::sync::{ - atomic::{AtomicUsize, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicUsize, Ordering as AtomicOrdering}, }; +use crate::types::{BlockNumber, header::Header}; use bytes::Bytes; use ethereum_types::{Address, H256}; use parity_util_mem::MallocSizeOf; -use types::{header::Header, BlockNumber}; use super::{SimpleList, SystemCall, ValidatorSet}; -use error::Error as EthcoreError; -use machine::{AuxiliaryData, Call, EthereumMachine}; +use crate::{ + error::Error as EthcoreError, + machine::{AuxiliaryData, Call, EthereumMachine}, +}; /// Set used for testing with a single validator. #[derive(Clone, MallocSizeOf)] @@ -47,10 +49,9 @@ impl Default for TestSet { impl TestSet { pub fn new(last_malicious: Arc, last_benign: Arc) -> Self { TestSet { - validator: SimpleList::new(vec![Address::from_str( - "7d577a597b2742b498cb5cf0c26cdcd726d39e6e", - ) - .unwrap()]), + validator: SimpleList::new(vec![ + Address::from_str("7d577a597b2742b498cb5cf0c26cdcd726d39e6e").unwrap(), + ]), last_malicious, last_benign, } @@ -99,8 +100,8 @@ impl ValidatorSet for TestSet { _: bool, _: &Header, _: AuxiliaryData, - ) -> ::engines::EpochChange { - ::engines::EpochChange::No + ) -> crate::engines::EpochChange { + crate::engines::EpochChange::No } fn epoch_set( @@ -109,7 +110,7 @@ impl ValidatorSet for TestSet { _: &EthereumMachine, _: BlockNumber, _: &[u8], - ) -> Result<(SimpleList, Option), ::error::Error> { + ) -> Result<(SimpleList, Option), crate::error::Error> { Ok((self.validator.clone(), None)) } diff --git a/crates/ethcore/src/error.rs b/crates/ethcore/src/error.rs index 2a27c34b6e..6a84c3901c 100644 --- a/crates/ethcore/src/error.rs +++ b/crates/ethcore/src/error.rs @@ -22,18 +22,20 @@ use std::{error, fmt, time::SystemTime}; +use crate::{ + snapshot::Error as SnapshotError, + types::{BlockNumber, transaction::Error as TransactionError}, +}; use crypto::publickey::Error as EthkeyError; use ethereum_types::{Address, Bloom, H256, U256}; use ethtrie::TrieError; use rlp; use snappy::InvalidInput; -use snapshot::Error as SnapshotError; -use types::{transaction::Error as TransactionError, BlockNumber}; use unexpected::{Mismatch, OutOfBounds}; -use engines::EngineError; +use crate::engines::EngineError; -pub use executed::{CallError, ExecutionError}; +pub use crate::executed::{CallError, ExecutionError}; #[derive(Debug, PartialEq, Clone, Eq)] /// Errors concerning block processing. diff --git a/crates/ethcore/src/ethereum/ethash.rs b/crates/ethcore/src/ethereum/ethash.rs index 1040d144f9..ee185c4523 100644 --- a/crates/ethcore/src/ethereum/ethash.rs +++ b/crates/ethcore/src/ethereum/ethash.rs @@ -21,25 +21,26 @@ use std::{ sync::Arc, }; -use ethereum_types::{H256, H64, U256}; +use crate::types::{ + BlockNumber, + header::{ExtendedHeader, Header}, +}; +use ethereum_types::{H64, H256, U256}; use ethjson::{self, uint::Uint}; use hash::KECCAK_EMPTY_LIST_RLP; use rlp::Rlp; -use types::{ - header::{ExtendedHeader, Header}, - BlockNumber, -}; use unexpected::{Mismatch, OutOfBounds}; -use block::ExecutedBlock; -use engines::{ - self, - block_reward::{self, BlockRewardContract, RewardKind}, - Engine, +use crate::{ + block::ExecutedBlock, + engines::{ + self, Engine, + block_reward::{self, BlockRewardContract, RewardKind}, + }, + error::{BlockError, Error}, + machine::EthereumMachine, }; -use error::{BlockError, Error}; -use ethash::{self, quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; -use machine::EthereumMachine; +use ethash::{self, EthashManager, OptimizeFor, quick_get_difficulty, slow_hash_block_number}; /// Number of blocks in an ethash snapshot. // make dependent on difficulty incrment divisor? @@ -449,7 +450,7 @@ impl Engine for Arc { } fn snapshot_components(&self) -> Option> { - Some(Box::new(::snapshot::PowSnapshot::new( + Some(Box::new(crate::snapshot::PowSnapshot::new( SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS, ))) @@ -561,18 +562,20 @@ fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number: u mod tests { use super::{ super::{new_homestead_test_machine, new_mcip3_test, new_morden}, - ecip1017_eras_block_reward, Ethash, EthashParams, + Ethash, EthashParams, ecip1017_eras_block_reward, + }; + use crate::{ + block::*, + engines::Engine, + error::{BlockError, Error, ErrorKind}, + spec::Spec, + test_helpers::get_temp_state_db, + types::header::Header, }; - use block::*; - use engines::Engine; - use error::{BlockError, Error, ErrorKind}; - use ethereum_types::{Address, H256, H64, U256}; + use ethereum_types::{Address, H64, H256, U256}; use rlp; - use spec::Spec; use std::{collections::BTreeMap, str::FromStr, sync::Arc}; use tempdir::TempDir; - use test_helpers::get_temp_state_db; - use types::header::Header; fn test_spec() -> Spec { let tempdir = TempDir::new("").unwrap(); diff --git a/crates/ethcore/src/ethereum/mod.rs b/crates/ethcore/src/ethereum/mod.rs index 3e8f49c559..cfebc56fba 100644 --- a/crates/ethcore/src/ethereum/mod.rs +++ b/crates/ethcore/src/ethereum/mod.rs @@ -29,7 +29,7 @@ pub mod public_key_to_address; pub use self::{denominations::*, ethash::Ethash}; use super::spec::*; -use machine::EthereumMachine; +use crate::machine::EthereumMachine; /// Load chain spec from `SpecParams` and JSON. pub fn load<'a, T: Into>>>(params: T, b: &[u8]) -> Spec { @@ -369,11 +369,13 @@ pub fn new_kovan_wasm_test_machine() -> EthereumMachine { #[cfg(test)] mod tests { use super::*; + use crate::{ + state::*, + test_helpers::get_temp_state_db, + types::{view, views::BlockView}, + }; use ethereum_types::{H160, H256, U256}; - use state::*; use std::str::FromStr; - use test_helpers::get_temp_state_db; - use types::{view, views::BlockView}; #[test] fn ensure_db_good() { diff --git a/crates/ethcore/src/executed.rs b/crates/ethcore/src/executed.rs index 6e609f8e2d..946fb22ed9 100644 --- a/crates/ethcore/src/executed.rs +++ b/crates/ethcore/src/executed.rs @@ -16,11 +16,13 @@ //! Transaction execution format module. +use crate::{ + trace::{FlatTrace, VMTrace}, + types::{log_entry::LogEntry, state_diff::StateDiff}, +}; use bytes::Bytes; use ethereum_types::{Address, U256, U512}; use ethtrie; -use trace::{FlatTrace, VMTrace}; -use types::{log_entry::LogEntry, state_diff::StateDiff}; use vm; use std::{error, fmt}; diff --git a/crates/ethcore/src/executive.rs b/crates/ethcore/src/executive.rs index 8b0f0b42bc..a948b6c99b 100644 --- a/crates/ethcore/src/executive.rs +++ b/crates/ethcore/src/executive.rs @@ -15,20 +15,22 @@ // along with OpenEthereum. If not, see . //! Transaction Execution environment. +pub use crate::executed::{Executed, ExecutionResult}; +use crate::{ + executed::ExecutionError, + externalities::*, + factory::VmFactory, + machine::EthereumMachine as Machine, + state::{Backend as StateBackend, CleanupMode, State, Substate}, + trace::{self, Tracer, VMTracer}, + transaction_ext::Transaction, + types::transaction::{Action, SignedTransaction, TypedTransaction}, +}; use bytes::{Bytes, BytesRef}; use ethereum_types::{Address, H256, U256, U512}; use evm::{CallType, FinalizationResult, Finalize}; -use executed::ExecutionError; -pub use executed::{Executed, ExecutionResult}; -use externalities::*; -use factory::VmFactory; use hash::keccak; -use machine::EthereumMachine as Machine; -use state::{Backend as StateBackend, CleanupMode, State, Substate}; use std::{cmp, convert::TryFrom, sync::Arc}; -use trace::{self, Tracer, VMTracer}; -use transaction_ext::Transaction; -use types::transaction::{Action, SignedTransaction, TypedTransaction}; use vm::{ self, AccessList, ActionParams, ActionValue, CleanDustMode, CreateContractAddress, EnvInfo, ResumeCall, ResumeCreate, ReturnData, Schedule, TrapError, @@ -259,9 +261,7 @@ impl<'a> CallCreateExecutive<'a> { ) -> Self { trace!( "Executive::call(params={:?}) self.env_info={:?}, parent_static={}", - params, - info, - parent_static_flag + params, info, parent_static_flag ); let gas = params.gas; @@ -315,9 +315,7 @@ impl<'a> CallCreateExecutive<'a> { ) -> Self { trace!( "Executive::create(params={:?}) self.env_info={:?}, static={}", - params, - info, - static_flag + params, info, static_flag ); let gas = params.gas; @@ -884,135 +882,162 @@ impl<'a> CallCreateExecutive<'a> { let mut callstack: Vec<(Option
, CallCreateExecutive<'a>)> = Vec::new(); loop { match last_res { - None => { - match callstack.pop() { - Some((_, exec)) => { - let second_last = callstack.last_mut(); - let parent_substate = match second_last { - Some((_, ref mut second_last)) => second_last.unconfirmed_substate().expect("Current stack value is created from second last item; second last item must be call or create; qed"), + None => match callstack.pop() { + Some((_, exec)) => { + let second_last = callstack.last_mut(); + let parent_substate = match second_last { + Some((_, second_last)) => second_last.unconfirmed_substate().expect("Current stack value is created from second last item; second last item must be call or create; qed"), None => top_substate, }; - last_res = Some((exec.is_create, exec.gas, exec.exec(state, parent_substate, tracer, vm_tracer))); - }, - None => panic!("When callstack only had one item and it was executed, this function would return; callstack never reaches zero item; qed"), - } - }, - Some((is_create, gas, Ok(val))) => { - let current = callstack.pop(); - - match current { - Some((address, mut exec)) => { - if is_create { - let address = address.expect("If the last executed status was from a create executive, then the destination address was pushed to the callstack; address is_some if it is_create; qed"); - - match val { - Ok(ref val) if val.apply_state => { - tracer.done_trace_create( - gas - val.gas_left, - &val.return_data, - address - ); - }, - Ok(_) => { - tracer.done_trace_failed(&vm::Error::Reverted); - }, - Err(ref err) => { - tracer.done_trace_failed(err); - }, - } - - vm_tracer.done_subtrace(); - - let second_last = callstack.last_mut(); - let parent_substate = match second_last { - Some((_, ref mut second_last)) => second_last.unconfirmed_substate().expect("Current stack value is created from second last item; second last item must be call or create; qed"), + last_res = Some(( + exec.is_create, + exec.gas, + exec.exec(state, parent_substate, tracer, vm_tracer), + )); + } + None => panic!( + "When callstack only had one item and it was executed, this function would return; callstack never reaches zero item; qed" + ), + }, + Some((is_create, gas, Ok(val))) => { + let current = callstack.pop(); + + match current { + Some((address, mut exec)) => { + if is_create { + let address = address.expect("If the last executed status was from a create executive, then the destination address was pushed to the callstack; address is_some if it is_create; qed"); + + match val { + Ok(ref val) if val.apply_state => { + tracer.done_trace_create( + gas - val.gas_left, + &val.return_data, + address, + ); + } + Ok(_) => { + tracer.done_trace_failed(&vm::Error::Reverted); + } + Err(ref err) => { + tracer.done_trace_failed(err); + } + } + + vm_tracer.done_subtrace(); + + let second_last = callstack.last_mut(); + let parent_substate = match second_last { + Some((_, second_last)) => second_last.unconfirmed_substate().expect("Current stack value is created from second last item; second last item must be call or create; qed"), None => top_substate, }; - let contract_create_result = into_contract_create_result(val, &address, exec.unconfirmed_substate().expect("Executive is resumed from a create; it has an unconfirmed substate; qed")); - last_res = Some((exec.is_create, exec.gas, exec.resume_create( - contract_create_result, - state, - parent_substate, - tracer, - vm_tracer - ))); - } else { - match val { - Ok(ref val) if val.apply_state => { - tracer.done_trace_call( - gas - val.gas_left, - &val.return_data, - ); - }, - Ok(_) => { - tracer.done_trace_failed(&vm::Error::Reverted); - }, - Err(ref err) => { - tracer.done_trace_failed(err); - }, - } - - vm_tracer.done_subtrace(); - - let second_last = callstack.last_mut(); - let parent_substate = match second_last { - Some((_, ref mut second_last)) => second_last.unconfirmed_substate().expect("Current stack value is created from second last item; second last item must be call or create; qed"), + let contract_create_result = into_contract_create_result(val, &address, exec.unconfirmed_substate().expect("Executive is resumed from a create; it has an unconfirmed substate; qed")); + last_res = Some(( + exec.is_create, + exec.gas, + exec.resume_create( + contract_create_result, + state, + parent_substate, + tracer, + vm_tracer, + ), + )); + } else { + match val { + Ok(ref val) if val.apply_state => { + tracer + .done_trace_call(gas - val.gas_left, &val.return_data); + } + Ok(_) => { + tracer.done_trace_failed(&vm::Error::Reverted); + } + Err(ref err) => { + tracer.done_trace_failed(err); + } + } + + vm_tracer.done_subtrace(); + + let second_last = callstack.last_mut(); + let parent_substate = match second_last { + Some((_, second_last)) => second_last.unconfirmed_substate().expect("Current stack value is created from second last item; second last item must be call or create; qed"), None => top_substate, }; - last_res = Some((exec.is_create, exec.gas, exec.resume_call( - into_message_call_result(val), - state, - parent_substate, - tracer, - vm_tracer - ))); - } - }, - None => return val, - } - }, - Some((_, _, Err(TrapError::Call(subparams, resume)))) => { - tracer.prepare_trace_call(&subparams, resume.depth + 1, resume.machine.builtin(&subparams.address, resume.info.number).is_some()); - vm_tracer.prepare_subtrace(subparams.code.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); - - let sub_exec = CallCreateExecutive::new_call_raw( - subparams, - resume.info, - resume.machine, - resume.schedule, - resume.factory, - resume.depth + 1, - resume.stack_depth, - resume.static_flag, - ); - - callstack.push((None, resume)); - callstack.push((None, sub_exec)); - last_res = None; - }, - Some((_, _, Err(TrapError::Create(subparams, address, resume)))) => { - tracer.prepare_trace_create(&subparams); - vm_tracer.prepare_subtrace(subparams.code.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); - - let sub_exec = CallCreateExecutive::new_create_raw( - subparams, - resume.info, - resume.machine, - resume.schedule, - resume.factory, - resume.depth + 1, - resume.stack_depth, - resume.static_flag - ); - - callstack.push((Some(address), resume)); - callstack.push((None, sub_exec)); - last_res = None; - }, - } + last_res = Some(( + exec.is_create, + exec.gas, + exec.resume_call( + into_message_call_result(val), + state, + parent_substate, + tracer, + vm_tracer, + ), + )); + } + } + None => return val, + } + } + Some((_, _, Err(TrapError::Call(subparams, resume)))) => { + tracer.prepare_trace_call( + &subparams, + resume.depth + 1, + resume + .machine + .builtin(&subparams.address, resume.info.number) + .is_some(), + ); + vm_tracer.prepare_subtrace( + subparams + .code + .as_ref() + .map_or_else(|| &[] as &[u8], |d| &*d as &[u8]), + ); + + let sub_exec = CallCreateExecutive::new_call_raw( + subparams, + resume.info, + resume.machine, + resume.schedule, + resume.factory, + resume.depth + 1, + resume.stack_depth, + resume.static_flag, + ); + + callstack.push((None, resume)); + callstack.push((None, sub_exec)); + last_res = None; + } + Some((_, _, Err(TrapError::Create(subparams, address, resume)))) => { + tracer.prepare_trace_create(&subparams); + vm_tracer.prepare_subtrace( + subparams + .code + .as_ref() + .map_or_else(|| &[] as &[u8], |d| &*d as &[u8]), + ); + + let sub_exec = CallCreateExecutive::new_create_raw( + subparams, + resume.info, + resume.machine, + resume.schedule, + resume.factory, + resume.depth + 1, + resume.stack_depth, + resume.static_flag, + ); + + callstack.push((Some(address), resume)); + callstack.push((None, sub_exec)); + last_res = None; + } + } } } } @@ -1531,22 +1556,31 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let fees_value = fees_value.saturating_sub(burnt_fee); - trace!("exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", - t.tx().gas, sstore_refunds, suicide_refunds, refunds_bound, gas_left_prerefund, refunded, gas_left, gas_used, refund_value, fees_value); + trace!( + "exec::finalize: t.gas={}, sstore_refunds={}, suicide_refunds={}, refunds_bound={}, gas_left_prerefund={}, refunded={}, gas_left={}, gas_used={}, refund_value={}, fees_value={}\n", + t.tx().gas, + sstore_refunds, + suicide_refunds, + refunds_bound, + gas_left_prerefund, + refunded, + gas_left, + gas_used, + refund_value, + fees_value + ); let sender = t.sender(); trace!( "exec::finalize: Refunding refund_value={}, sender={}\n", - refund_value, - sender + refund_value, sender ); // Below: NoEmpty is safe since the sender must already be non-null to have sent this transaction self.state .add_balance(&sender, &refund_value, CleanupMode::NoEmpty)?; trace!( "exec::finalize: Compensating author: fees_value={}, author={}\n", - fees_value, - &self.info.author + fees_value, &self.info.author ); self.state.add_balance( &self.info.author, @@ -1627,38 +1661,40 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { #[allow(dead_code)] mod tests { use super::*; + use crate::{ + error::ExecutionError, + machine::EthereumMachine, + state::{CleanupMode, Substate}, + test_helpers::{get_temp_state, get_temp_state_with_factory}, + trace::{ + ExecutiveTracer, ExecutiveVMTracer, FlatTrace, MemoryDiff, NoopTracer, NoopVMTracer, + StorageDiff, Tracer, VMExecutedOperation, VMOperation, VMTrace, VMTracer, trace, + }, + types::transaction::{ + AccessListTx, Action, EIP1559TransactionTx, Transaction, TypedTransaction, + }, + }; use crypto::publickey::{Generator, Random}; - use error::ExecutionError; use ethereum_types::{Address, BigEndianHash, H160, H256, U256, U512}; - use evm::{Factory, VMType}; - use machine::EthereumMachine; + use evm::{Factory, VMType, evm_test, evm_test_ignore}; use rustc_hex::FromHex; - use state::{CleanupMode, Substate}; use std::{str::FromStr, sync::Arc}; - use test_helpers::{get_temp_state, get_temp_state_with_factory}; - use trace::{ - trace, ExecutiveTracer, ExecutiveVMTracer, FlatTrace, MemoryDiff, NoopTracer, NoopVMTracer, - StorageDiff, Tracer, VMExecutedOperation, VMOperation, VMTrace, VMTracer, - }; - use types::transaction::{ - AccessListTx, Action, EIP1559TransactionTx, Transaction, TypedTransaction, - }; use vm::{ActionParams, ActionValue, CallType, CreateContractAddress, EnvInfo}; fn make_frontier_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_frontier_test_machine(); + let mut machine = crate::ethereum::new_frontier_test_machine(); machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); machine } fn make_byzantium_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_byzantium_test_machine(); + let mut machine = crate::ethereum::new_byzantium_test_machine(); machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); machine } fn make_london_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_london_test_machine(); + let mut machine = crate::ethereum::new_london_test_machine(); machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); machine } @@ -2052,7 +2088,7 @@ mod tests { .add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty) .unwrap(); let info = EnvInfo::default(); - let machine = ::ethereum::new_byzantium_test_machine(); + let machine = crate::ethereum::new_byzantium_test_machine(); let schedule = machine.schedule(info.number); let mut substate = Substate::new(); let mut tracer = ExecutiveTracer::default(); @@ -2952,7 +2988,7 @@ mod tests { params.code = Some(Arc::new(code)); params.value = ActionValue::Transfer(U256::zero()); let info = EnvInfo::default(); - let machine = ::ethereum::new_byzantium_test_machine(); + let machine = crate::ethereum::new_byzantium_test_machine(); let schedule = machine.schedule(info.number); let mut substate = Substate::new(); @@ -3014,7 +3050,7 @@ mod tests { .unwrap(); let info = EnvInfo::default(); - let machine = ::ethereum::new_constantinople_test_machine(); + let machine = crate::ethereum::new_constantinople_test_machine(); let schedule = machine.schedule(info.number); assert_eq!( @@ -3105,7 +3141,7 @@ mod tests { info.number = 100; // Network with wasm activated at block 10 - let machine = ::ethereum::new_kovan_wasm_test_machine(); + let machine = crate::ethereum::new_kovan_wasm_test_machine(); let mut output = [0u8; 20]; let FinalizationResult { diff --git a/crates/ethcore/src/exit.rs b/crates/ethcore/src/exit.rs new file mode 100644 index 0000000000..cf44453354 --- /dev/null +++ b/crates/ethcore/src/exit.rs @@ -0,0 +1,88 @@ +//! allows controlled shutdown of the node software. + +use std::sync::Arc; + +use parking_lot::{Condvar, Mutex}; + +#[derive(Debug)] +/// Status used to exit or restart the program. +pub struct ExitStatus { + /// Whether the program panicked. + panicking: bool, + /// Whether the program should exit. + should_exit: bool, +} + +impl ExitStatus { + /// new not panicking + pub fn new() -> Self { + ExitStatus { + panicking: false, + should_exit: false, + } + } + + /// new one that panics + pub fn new_panicking() -> Self { + ExitStatus { + panicking: true, + should_exit: true, + } + } + + /// regular exit wihout panic + pub fn new_should_exit() -> Self { + ExitStatus { + panicking: false, + should_exit: true, + } + } + + /// Signals the overlaying system to perform a shutdown. + pub fn do_shutdown(self: &mut Self) { + self.should_exit = true; + } + + /// has someone requested a shutdown? + pub fn should_exit(self: &Self) -> bool { + return self.should_exit; + } + + /// has someone requested a panic? + pub fn is_panicking(self: &Self) -> bool { + return self.panicking; + } +} + +/// Shutdown Manager allows engines to signal the system to shutdown. +pub struct ShutdownManager { + exit_mutex: Arc<(Mutex, Condvar)>, +} + +impl ShutdownManager { + /// get's a Null Object that does not interact with the system at all. + pub fn null() -> Self { + return ShutdownManager { + exit_mutex: Arc::new(( + Mutex::new(ExitStatus { + panicking: false, + should_exit: false, + }), + Condvar::new(), + )), + }; + } + + /// creates a new shutdown manager, use ::null() if you do not wish to provide a mutex. + pub fn new(mutex_original: &Arc<(Mutex, Condvar)>) -> Self { + return ShutdownManager { + exit_mutex: mutex_original.clone(), + }; + } + + /// demands a shutdown of the node software + pub fn demand_shutdown(self: &Self) { + self.exit_mutex.0.lock().do_shutdown(); + self.exit_mutex.1.notify_all(); + } +} diff --git a/crates/ethcore/src/externalities.rs b/crates/ethcore/src/externalities.rs index 052e4d88ff..1a4e3e81c9 100644 --- a/crates/ethcore/src/externalities.rs +++ b/crates/ethcore/src/externalities.rs @@ -15,14 +15,16 @@ // along with OpenEthereum. If not, see . //! Transaction Execution environment. +use crate::{ + executive::*, + machine::EthereumMachine as Machine, + state::{Backend as StateBackend, CleanupMode, State, Substate}, + trace::{Tracer, VMTracer}, + types::transaction::UNSIGNED_SENDER, +}; use bytes::Bytes; use ethereum_types::{Address, BigEndianHash, H256, U256}; -use executive::*; -use machine::EthereumMachine as Machine; -use state::{Backend as StateBackend, CleanupMode, State, Substate}; use std::{cmp, sync::Arc}; -use trace::{Tracer, VMTracer}; -use types::transaction::UNSIGNED_SENDER; use vm::{ self, AccessList, ActionParams, ActionValue, CallType, ContractCreateResult, CreateContractAddress, EnvInfo, Ext, MessageCallResult, ReturnData, Schedule, TrapKind, @@ -214,15 +216,12 @@ where self.vm_tracer, ); let output = match &r { - Ok(ref r) => H256::from_slice(&r.return_data[..32]), + Ok(r) => H256::from_slice(&r.return_data[..32]), _ => H256::default(), }; trace!( "ext: blockhash contract({}) -> {:?}({}) self.env_info.number={}\n", - number, - r, - output, - self.env_info.number + number, r, output, self.env_info.number ); output } else { @@ -240,17 +239,14 @@ where let r = self.env_info.last_hashes[index as usize].clone(); trace!( "ext: blockhash({}) -> {} self.env_info.number={}\n", - number, - r, - self.env_info.number + number, r, self.env_info.number ); r } false => { trace!( "ext: blockhash({}) -> null self.env_info.number={}\n", - number, - self.env_info.number + number, self.env_info.number ); H256::zero() } @@ -444,7 +440,7 @@ where } fn log(&mut self, topics: Vec, data: &[u8]) -> vm::Result<()> { - use types::log_entry::LogEntry; + use crate::types::log_entry::LogEntry; if self.static_flag { return Err(vm::Error::MutableCallInStaticContext); @@ -561,12 +557,14 @@ where #[cfg(test)] mod tests { use super::*; + use crate::{ + state::{State, Substate}, + test_helpers::get_temp_state, + trace::{NoopTracer, NoopVMTracer}, + }; use ethereum_types::{Address, U256}; use evm::{CallType, EnvInfo, Ext}; - use state::{State, Substate}; use std::str::FromStr; - use test_helpers::get_temp_state; - use trace::{NoopTracer, NoopVMTracer}; fn get_test_origin() -> OriginInfo { OriginInfo { @@ -591,8 +589,8 @@ mod tests { } struct TestSetup { - state: State<::state_db::StateDB>, - machine: ::machine::EthereumMachine, + state: State, + machine: crate::machine::EthereumMachine, schedule: Schedule, sub_state: Substate, env_info: EnvInfo, @@ -606,7 +604,7 @@ mod tests { impl TestSetup { fn new() -> Self { - let machine = ::spec::Spec::new_test_machine(); + let machine = crate::spec::Spec::new_test_machine(); let env_info = get_test_env_info(); let schedule = machine.schedule(env_info.number); TestSetup { @@ -769,10 +767,10 @@ mod tests { #[test] fn can_log() { let log_data = vec![120u8, 110u8]; - let log_topics = vec![H256::from_str( - "af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd", - ) - .unwrap()]; + let log_topics = vec![ + H256::from_str("af0fa234a6af46afa23faf23bcbc1c1cb4bcb7bcbe7e7e7ee3ee2edddddddddd") + .unwrap(), + ]; let mut setup = TestSetup::new(); let state = &mut setup.state; diff --git a/crates/ethcore/src/factory.rs b/crates/ethcore/src/factory.rs index f89bce9eeb..de946226c1 100644 --- a/crates/ethcore/src/factory.rs +++ b/crates/ethcore/src/factory.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use account_db::Factory as AccountFactory; +use crate::account_db::Factory as AccountFactory; use ethtrie::RlpCodec; use evm::{Factory as EvmFactory, VMType}; use keccak_hasher::KeccakHasher; diff --git a/crates/ethcore/src/json_tests/chain.rs b/crates/ethcore/src/json_tests/chain.rs index cd1c939410..fc18f7f8df 100644 --- a/crates/ethcore/src/json_tests/chain.rs +++ b/crates/ethcore/src/json_tests/chain.rs @@ -14,21 +14,25 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::exit::ShutdownManager; + use super::HookType; -use client::{ - Balance, BlockChainClient, BlockId, ChainInfo, Client, ClientConfig, EvmTestClient, - ImportBlock, Nonce, StateOrBlock, +use crate::{ + client::{ + Balance, BlockChainClient, BlockId, ChainInfo, Client, ClientConfig, EvmTestClient, + ImportBlock, Nonce, StateOrBlock, + }, + io::IoChannel, + miner::Miner, + spec::Genesis, + test_helpers, + verification::{VerifierType, queue::kind::blocks::Unverified}, }; use ethereum_types::{H256, U256}; use ethjson; -use io::IoChannel; use log::warn; -use miner::Miner; use rustc_hex::ToHex; -use spec::Genesis; use std::{path::Path, sync::Arc}; -use test_helpers; -use verification::{queue::kind::blocks::Unverified, VerifierType}; fn check_poststate( client: &Arc, @@ -199,6 +203,7 @@ pub fn json_chain_test( db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .expect("Failed to instantiate a new Client"); diff --git a/crates/ethcore/src/json_tests/difficulty.rs b/crates/ethcore/src/json_tests/difficulty.rs index 60296e707e..247a8bc029 100644 --- a/crates/ethcore/src/json_tests/difficulty.rs +++ b/crates/ethcore/src/json_tests/difficulty.rs @@ -14,11 +14,10 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{spec::Spec, types::header::Header}; use ethereum_types::U256; use ethjson; -use spec::Spec; use std::path::Path; -use types::header::Header; use super::HookType; diff --git a/crates/ethcore/src/json_tests/executive.rs b/crates/ethcore/src/json_tests/executive.rs index ffd20e935b..4b5540f7e5 100644 --- a/crates/ethcore/src/json_tests/executive.rs +++ b/crates/ethcore/src/json_tests/executive.rs @@ -15,20 +15,22 @@ // along with OpenEthereum. If not, see . use super::test_common::*; +use crate::{ + executive::*, + externalities::*, + machine::EthereumMachine as Machine, + state::{Backend as StateBackend, State, Substate}, + test_helpers::get_temp_state, + trace::{NoopTracer, NoopVMTracer, Tracer, VMTracer}, +}; use bytes::Bytes; use ethereum_types::BigEndianHash; use ethjson; use ethtrie; use evm::Finalize; -use executive::*; -use externalities::*; use hash::keccak; -use machine::EthereumMachine as Machine; use rlp::RlpStream; -use state::{Backend as StateBackend, State, Substate}; use std::{path::Path, sync::Arc}; -use test_helpers::get_temp_state; -use trace::{NoopTracer, NoopVMTracer, Tracer, VMTracer}; use vm::{ self, ActionParams, CallType, ContractCreateResult, CreateContractAddress, EnvInfo, Ext, MessageCallResult, ReturnData, Schedule, @@ -311,7 +313,7 @@ pub fn json_executive_test( state.populate_from(From::from(vm.pre_state.clone())); let info: EnvInfo = From::from(vm.env); let machine = { - let mut machine = ::ethereum::new_frontier_test_machine(); + let mut machine = crate::ethereum::new_frontier_test_machine(); machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = 1)); machine }; diff --git a/crates/ethcore/src/json_tests/local.rs b/crates/ethcore/src/json_tests/local.rs index bc20d32aec..e6f2a1f4b2 100644 --- a/crates/ethcore/src/json_tests/local.rs +++ b/crates/ethcore/src/json_tests/local.rs @@ -1,14 +1,16 @@ use super::HookType; +use crate::{ + types::{ + BlockNumber, + transaction::{TypedTransaction, TypedTxId, UnverifiedTransaction}, + }, + verification::queue::kind::blocks::Unverified, +}; use ethereum_types::U256; use ethjson::{self, blockchain::Block}; use log::warn; use rlp::RlpStream; use std::path::Path; -use types::{ - transaction::{TypedTransaction, TypedTxId, UnverifiedTransaction}, - BlockNumber, -}; -use verification::queue::kind::blocks::Unverified; pub fn json_local_block_en_de_test( _test: ðjson::test::LocalTests, diff --git a/crates/ethcore/src/json_tests/mod.rs b/crates/ethcore/src/json_tests/mod.rs index 5cda1f28e7..725e92cc9b 100644 --- a/crates/ethcore/src/json_tests/mod.rs +++ b/crates/ethcore/src/json_tests/mod.rs @@ -30,5 +30,5 @@ pub mod runner; pub use self::{ executive::json_executive_test, - test_common::{debug_include_test, find_json_files_recursive, HookType}, + test_common::{HookType, debug_include_test, find_json_files_recursive}, }; diff --git a/crates/ethcore/src/json_tests/state.rs b/crates/ethcore/src/json_tests/state.rs index 445c60a0b1..8fb25313d6 100644 --- a/crates/ethcore/src/json_tests/state.rs +++ b/crates/ethcore/src/json_tests/state.rs @@ -14,12 +14,14 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use super::{test_common::*, HookType}; -use client::{EvmTestClient, EvmTestError, TransactErr, TransactSuccess}; +use super::{HookType, test_common::*}; +use crate::{ + client::{EvmTestClient, EvmTestError, TransactErr, TransactSuccess}, + pod_state::PodState, + trace, +}; use ethjson::{self, spec::ForkSpec}; -use pod_state::PodState; use std::path::Path; -use trace; use vm::EnvInfo; fn skip_test( diff --git a/crates/ethcore/src/json_tests/transaction.rs b/crates/ethcore/src/json_tests/transaction.rs index 1d22bccb81..426b955f2d 100644 --- a/crates/ethcore/src/json_tests/transaction.rs +++ b/crates/ethcore/src/json_tests/transaction.rs @@ -15,14 +15,16 @@ // along with OpenEthereum. If not, see . use super::test_common::*; -use client::EvmTestClient; +use crate::{ + client::EvmTestClient, + transaction_ext::Transaction, + types::{ + header::Header, + transaction::{TypedTransaction, UnverifiedTransaction}, + }, +}; use ethjson; use std::path::Path; -use transaction_ext::Transaction; -use types::{ - header::Header, - transaction::{TypedTransaction, UnverifiedTransaction}, -}; pub fn json_transaction_test( path: &Path, @@ -68,7 +70,7 @@ pub fn json_transaction_test( let rlp: Vec = test.rlp.clone().into(); let res = TypedTransaction::decode(&rlp) - .map_err(::error::Error::from) + .map_err(crate::error::Error::from) .and_then(|t: UnverifiedTransaction| { let mut header: Header = Default::default(); // Use high enough number to activate all required features. diff --git a/crates/ethcore/src/lib.rs b/crates/ethcore/src/lib.rs index e499cd87e8..a453c43242 100644 --- a/crates/ethcore/src/lib.rs +++ b/crates/ethcore/src/lib.rs @@ -18,77 +18,26 @@ //! Ethcore library -extern crate ansi_term; extern crate common_types as types; -extern crate crossbeam_utils; -extern crate derive_more; -extern crate ethabi; -extern crate ethash; extern crate ethcore_blockchain as blockchain; extern crate ethcore_builtin as builtin; extern crate ethcore_call_contract as call_contract; extern crate ethcore_db as db; extern crate ethcore_io as io; -extern crate ethcore_miner; -extern crate ethereum_types; -extern crate ethjson; -extern crate hash_db; -extern crate hbbft; -extern crate itertools; -extern crate journaldb; extern crate keccak_hash as hash; -extern crate keccak_hasher; -extern crate kvdb; -extern crate len_caching_lock; -extern crate lru_cache; -extern crate maplit; -extern crate memory_cache; -extern crate memory_db; -extern crate num_cpus; extern crate parity_bytes as bytes; extern crate parity_crypto as crypto; extern crate parity_snappy as snappy; -extern crate parity_util_mem; -extern crate parking_lot; extern crate patricia_trie_ethereum as ethtrie; -extern crate rand; -extern crate rand_065; -extern crate rayon; -extern crate reth_util; -extern crate rlp; -extern crate rmp_serde; -extern crate rustc_hex; -extern crate serde; -extern crate stats; -extern crate time_utils; -extern crate tiny_keccak; extern crate trie_db as trie; extern crate triehash_ethereum as triehash; -extern crate unexpected; -extern crate using_queue; -extern crate vm; -extern crate wasm; #[cfg(any(test, feature = "blooms-db"))] extern crate blooms_db; -#[cfg(any(test, feature = "env_logger"))] -extern crate env_logger; #[cfg(test)] extern crate ethcore_accounts as accounts; -#[cfg(feature = "stratum")] -extern crate ethcore_stratum; -#[cfg(feature = "json-tests")] -extern crate globset; #[cfg(any(test, feature = "kvdb-rocksdb"))] extern crate kvdb_rocksdb; -#[cfg(test)] -extern crate rlp_compress; -#[cfg(any(test, feature = "tempdir"))] -extern crate tempdir; -#[cfg(feature = "json-tests")] -extern crate tempfile; -#[cfg(feature = "json-tests")] -extern crate walkdir; #[macro_use] extern crate ethabi_contract; @@ -107,14 +56,8 @@ extern crate trace_time; #[macro_use] extern crate serde_derive; -#[cfg_attr(test, macro_use)] -extern crate evm; - -#[cfg(all(test, feature = "price-info"))] -extern crate fetch; - -#[cfg(all(test, feature = "price-info"))] -extern crate parity_runtime; +#[cfg(test)] +use evm; pub mod block; pub mod client; @@ -123,6 +66,7 @@ pub mod error; pub mod ethereum; pub mod executed; pub mod executive; +pub mod exit; pub mod machine; pub mod miner; pub mod pod_account; @@ -147,6 +91,6 @@ pub mod test_helpers; #[cfg(test)] mod tests; +pub use crate::executive::contract_address; pub use evm::CreateContractAddress; -pub use executive::contract_address; pub use trie::TrieSpec; diff --git a/crates/ethcore/src/machine/impls.rs b/crates/ethcore/src/machine/impls.rs index a17d94cdc9..50329c87ac 100644 --- a/crates/ethcore/src/machine/impls.rs +++ b/crates/ethcore/src/machine/impls.rs @@ -22,30 +22,32 @@ use std::{ sync::Arc, }; -use ethereum_types::{Address, H256, U256}; -use types::{ +use crate::types::{ + BlockNumber, header::Header, transaction::{ - self, SignedTransaction, TypedTransaction, UnverifiedTransaction, SYSTEM_ADDRESS, - UNSIGNED_SENDER, + self, SYSTEM_ADDRESS, SignedTransaction, TypedTransaction, UNSIGNED_SENDER, + UnverifiedTransaction, }, - BlockNumber, }; +use ethereum_types::{Address, H256, U256}; use vm::{ AccessList, ActionParams, ActionValue, CallType, CreateContractAddress, EnvInfo, ParamsType, Schedule, }; -use block::ExecutedBlock; +use crate::{ + block::ExecutedBlock, + client::BlockInfo, + error::Error, + executive::Executive, + spec::CommonParams, + state::{CleanupMode, Substate}, + trace::{NoopTracer, NoopVMTracer}, + tx_filter::TransactionFilter, +}; use builtin::Builtin; use call_contract::CallContract; -use client::BlockInfo; -use error::Error; -use executive::Executive; -use spec::CommonParams; -use state::{CleanupMode, Substate}; -use trace::{NoopTracer, NoopVMTracer}; -use tx_filter::TransactionFilter; /// Ethash-specific extensions. #[derive(Debug, Clone)] @@ -207,7 +209,7 @@ impl EthereumMachine { let res = ex .call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) - .map_err(|e| ::engines::EngineError::FailedSystemCall(format!("{}", e)))?; + .map_err(|e| crate::engines::EngineError::FailedSystemCall(format!("{}", e)))?; let output = res.return_data.to_vec(); Ok(output) @@ -418,6 +420,7 @@ impl EthereumMachine { ) -> Result<(), transaction::Error> { if let Some(ref filter) = self.tx_filter.as_ref() { if !filter.transaction_allowed(&parent.hash(), parent.number() + 1, t, client) { + trace!(target: "txqueue", "transaction {} not allowed by filter", t.hash); return Err(transaction::Error::NotAllowed.into()); } } @@ -451,10 +454,10 @@ impl EthereumMachine { match tx.tx_type() { transaction::TypedTxId::AccessList if !schedule.eip2930 => { - return Err(transaction::Error::TransactionTypeNotEnabled) + return Err(transaction::Error::TransactionTypeNotEnabled); } transaction::TypedTxId::EIP1559Transaction if !schedule.eip1559 => { - return Err(transaction::Error::TransactionTypeNotEnabled) + return Err(transaction::Error::TransactionTypeNotEnabled); } _ => (), }; @@ -597,7 +600,7 @@ mod tests { let rlp = "ea80843b9aca0083015f90948921ebb5f79e9e3920abe571004d0b1d5119c154865af3107a400080038080"; let transaction: UnverifiedTransaction = TypedTransaction::decode(&::rustc_hex::FromHex::from_hex(rlp).unwrap()).unwrap(); - let spec = ::ethereum::new_ropsten_test(); + let spec = crate::ethereum::new_ropsten_test(); let ethparams = get_default_ethash_extensions(); let machine = EthereumMachine::with_ethash_extensions( diff --git a/crates/ethcore/src/machine/traits.rs b/crates/ethcore/src/machine/traits.rs index 3f5f22b2ee..9f08756a24 100644 --- a/crates/ethcore/src/machine/traits.rs +++ b/crates/ethcore/src/machine/traits.rs @@ -17,7 +17,7 @@ //! Generalization of a state machine for a consensus engine. //! This will define traits for the header, block, and state of a blockchain. -use block::ExecutedBlock; +use crate::block::ExecutedBlock; use ethereum_types::{Address, U256}; /// Generalization of types surrounding blockchain-suitable state machines. diff --git a/crates/ethcore/src/miner/miner.rs b/crates/ethcore/src/miner/miner.rs index 6d7a4d359f..ee2135764d 100644 --- a/crates/ethcore/src/miner/miner.rs +++ b/crates/ethcore/src/miner/miner.rs @@ -21,6 +21,21 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + io::IoChannel, + miner::{ + self, MinerService, + cache::Cache, + pool_client::{CachedNonceClient, PoolClient}, + }, + types::{ + BlockNumber, + block::Block, + header::Header, + receipt::RichReceipt, + transaction::{self, Action, PendingTransaction, SignedTransaction, UnverifiedTransaction}, + }, +}; use ansi_term::Colour; use bytes::Bytes; use call_contract::CallContract; @@ -30,43 +45,31 @@ use ethcore_miner::{ gas_pricer::GasPricer, local_accounts::LocalAccounts, pool::{ - self, - transaction_filter::{match_filter, TransactionFilter}, - PrioritizationStrategy, QueueStatus, TransactionQueue, VerifiedTransaction, + self, PrioritizationStrategy, QueueStatus, TransactionQueue, VerifiedTransaction, + transaction_filter::{TransactionFilter, match_filter}, }, service_transaction_checker::ServiceTransactionChecker, }; use ethereum_types::{Address, H256, U256}; -use io::IoChannel; -use miner::{ - self, - cache::Cache, - pool_client::{CachedNonceClient, PoolClient}, - MinerService, -}; +use itertools::Itertools; use parking_lot::{Mutex, RwLock}; use rayon::prelude::*; -use types::{ - block::Block, - header::Header, - receipt::RichReceipt, - transaction::{self, Action, PendingTransaction, SignedTransaction, UnverifiedTransaction}, - BlockNumber, -}; use using_queue::{GetAction, UsingQueue}; -use block::{ClosedBlock, OpenBlock, SealedBlock}; -use client::{ - traits::{EngineClient, ForceUpdateSealing}, - BlockChain, BlockId, BlockProducer, ChainInfo, ClientIoMessage, Nonce, SealedBlockImporter, - TransactionId, TransactionInfo, +use crate::{ + block::{ClosedBlock, OpenBlock, SealedBlock}, + client::{ + BlockChain, BlockId, BlockProducer, ChainInfo, ClientIoMessage, Nonce, SealedBlockImporter, + TransactionId, TransactionInfo, + traits::{EngineClient, ForceUpdateSealing}, + }, + engines::{EngineSigner, EthEngine, Seal, SealingState}, + error::{Error, ErrorKind}, + executed::ExecutionError, + executive::contract_address, + spec::Spec, + state::State, }; -use engines::{EngineSigner, EthEngine, Seal, SealingState}; -use error::{Error, ErrorKind}; -use executed::ExecutionError; -use executive::contract_address; -use spec::Spec; -use state::State; /// Different possible definitions for pending transaction set. #[derive(Debug, PartialEq)] @@ -460,10 +463,10 @@ impl Miner { let params = self.params.read().clone(); let block = match chain.prepare_open_block( - if self.engine.use_block_author() { - params.author - } else { - Address::zero() + match self.engine.use_block_author() { + crate::engines::BlockAuthorOption::ZeroBlockAuthor => Address::zero(), + crate::engines::BlockAuthorOption::ConfiguredBlockAuthor => params.author, + crate::engines::BlockAuthorOption::EngineBlockAuthor(address) => address, }, params.gas_range_target, params.extra_data, @@ -1106,7 +1109,7 @@ impl Miner { const SEALING_TIMEOUT_IN_BLOCKS: u64 = 5; impl miner::MinerService for Miner { - type State = State<::state_db::StateDB>; + type State = State; fn authoring_params(&self) -> AuthoringParams { self.params.read().clone() @@ -1205,7 +1208,12 @@ impl miner::MinerService for Miner { chain: &C, transactions: Vec, ) -> Vec> { - trace!(target: "external_tx", "Importing external transactions"); + debug!(target: "external_tx", "Importing external transactions"); + trace!(target: "external_tx", + "import_external_transactions {:?}", + transactions.iter().map(|f| f.hash).collect_vec() + ); + let client = self.pool_client(chain); let results = self.transaction_queue.import( client, @@ -1445,7 +1453,28 @@ impl miner::MinerService for Miner { } fn transaction(&self, hash: &H256) -> Option> { - self.transaction_queue.find(hash) + let result = self.transaction_queue.find(hash); + + if result.is_none() { + if let Some(Some(pending)) = self.map_existing_pending_block( + |b| b.transactions.iter().find(|t| t.hash == *hash).cloned(), + 0, + ) { + return Some(Arc::new( + pool::VerifiedTransaction::from_pending_block_transaction(pending), + )); + } + } + return result; + } + + fn transaction_if_readable( + &self, + hash: &H256, + max_lock_duration: &Duration, + ) -> Option> { + self.transaction_queue + .find_if_readable(hash, max_lock_duration) } fn remove_transaction(&self, hash: &H256) -> Option> { @@ -1455,6 +1484,10 @@ impl miner::MinerService for Miner { .expect("remove() returns one result per hash; one hash passed; qed") } + fn local_transaction_status(&self, hash: &H256) -> Option { + self.transaction_queue.local_transaction_status(hash) + } + fn queue_status(&self) -> QueueStatus { self.transaction_queue.status() } @@ -1548,8 +1581,11 @@ impl miner::MinerService for Miner { // which should be on by default. if block.header.number() == 1 { if let Some(name) = self.engine.params().nonzero_bugfix_hard_fork() { - warn!("Your chain specification contains one or more hard forks which are required to be \ - on by default. Please remove these forks and start your chain again: {}.", name); + warn!( + "Your chain specification contains one or more hard forks which are required to be \ + on by default. Please remove these forks and start your chain again: {}.", + name + ); return; } } @@ -1725,7 +1761,7 @@ impl miner::MinerService for Miner { let accounts = self.accounts.clone(); let service_transaction_checker = self.service_transaction_checker.clone(); - let cull = move |chain: &::client::Client| { + let cull = move |chain: &crate::client::Client| { let client = PoolClient::new( chain, &nonce_cache, @@ -1807,18 +1843,21 @@ mod tests { use std::iter::FromIterator; use super::*; + use crate::types::BlockNumber; use accounts::AccountProvider; use crypto::publickey::{Generator, Random}; use hash::keccak; use rustc_hex::FromHex; - use types::BlockNumber; - use client::{ChainInfo, EachBlockWith, ImportSealedBlock, TestBlockChainClient}; - use miner::{MinerService, PendingOrdering}; - use test_helpers::{ - dummy_engine_signer_with_address, generate_dummy_client, generate_dummy_client_with_spec, + use crate::{ + client::{ChainInfo, EachBlockWith, ImportSealedBlock, TestBlockChainClient}, + miner::{MinerService, PendingOrdering}, + test_helpers::{ + dummy_engine_signer_with_address, generate_dummy_client, + generate_dummy_client_with_spec, + }, + types::transaction::{Transaction, TypedTransaction}, }; - use types::transaction::{Transaction, TypedTransaction}; #[test] fn should_prepare_block_to_seal() { @@ -2249,13 +2288,18 @@ mod tests { assert!(miner.pending_block(0).is_none()); assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); - assert!(miner - .import_own_transaction( - &*client, - PendingTransaction::new(transaction_with_chain_id(spec.chain_id()).into(), None), - false - ) - .is_ok()); + assert!( + miner + .import_own_transaction( + &*client, + PendingTransaction::new( + transaction_with_chain_id(spec.chain_id()).into(), + None + ), + false + ) + .is_ok() + ); miner.update_sealing(&*client, ForceUpdateSealing::No); client.flush_queue(); diff --git a/crates/ethcore/src/miner/mod.rs b/crates/ethcore/src/miner/mod.rs index b6390e7dbe..c7e87a0b51 100644 --- a/crates/ethcore/src/miner/mod.rs +++ b/crates/ethcore/src/miner/mod.rs @@ -29,33 +29,39 @@ pub mod stratum; pub use self::miner::{Author, AuthoringParams, Miner, MinerOptions, Penalization, PendingSet}; pub use ethcore_miner::{ local_accounts::LocalAccounts, - pool::{transaction_filter::TransactionFilter, PendingOrdering}, + pool::{PendingOrdering, transaction_filter::TransactionFilter}, }; use std::{ collections::{BTreeMap, BTreeSet}, sync::Arc, + time::Duration, }; -use bytes::Bytes; -use ethcore_miner::pool::{local_transactions, QueueStatus, VerifiedTransaction}; -use ethereum_types::{Address, H256, U256}; -use types::{ +use crate::types::{ + BlockNumber, block::Block, header::Header, receipt::RichReceipt, transaction::{self, PendingTransaction, SignedTransaction, UnverifiedTransaction}, - BlockNumber, }; +use bytes::Bytes; +use ethcore_miner::pool::{ + QueueStatus, VerifiedTransaction, + local_transactions::{self, Status}, +}; +use ethereum_types::{Address, H256, U256}; -use block::SealedBlock; -use call_contract::{CallContract, RegistryInfo}; -use client::{ - traits::ForceUpdateSealing, AccountData, BlockChain, BlockProducer, Nonce, ScheduleInfo, - SealedBlockImporter, +use crate::{ + block::SealedBlock, + client::{ + AccountData, BlockChain, BlockProducer, Nonce, ScheduleInfo, SealedBlockImporter, + traits::ForceUpdateSealing, + }, + error::Error, + state::StateInfo, }; -use error::Error; -use state::StateInfo; +use call_contract::{CallContract, RegistryInfo}; /// Provides methods to verify incoming external transactions pub trait TransactionVerifierClient: Send + Sync @@ -197,6 +203,14 @@ pub trait MinerService: Send + Sync { /// Query transaction from the pool given it's hash. fn transaction(&self, hash: &H256) -> Option>; + /// Query transaction from the pool given it's hash, without blocking. + /// Might return "None" in cases when the lock could not get acquired. + fn transaction_if_readable( + &self, + hash: &H256, + max_lock_duration: &Duration, + ) -> Option>; + /// Returns next valid nonce for given address. /// /// This includes nonces of all transactions from this address in the pending queue @@ -276,4 +290,7 @@ pub trait MinerService: Send + Sync { /// Set a new minimum gas limit. /// Will not work if dynamic gas calibration is set. fn set_minimal_gas_price(&self, gas_price: U256) -> Result; + + /// Get the status of a local transaction by its hash. + fn local_transaction_status(&self, hash: &H256) -> Option; } diff --git a/crates/ethcore/src/miner/pool_client.rs b/crates/ethcore/src/miner/pool_client.rs index 1d549ef561..993efb1553 100644 --- a/crates/ethcore/src/miner/pool_client.rs +++ b/crates/ethcore/src/miner/pool_client.rs @@ -18,25 +18,27 @@ use std::fmt; +use crate::types::{ + header::Header, + transaction::{self, SignedTransaction, UnverifiedTransaction}, +}; use ethcore_miner::{ local_accounts::LocalAccounts, pool, pool::client::NonceClient, service_transaction_checker::ServiceTransactionChecker, }; use ethereum_types::{Address, H256, U256}; -use types::{ - header::Header, - transaction::{self, SignedTransaction, UnverifiedTransaction}, -}; +use crate::{ + client::{Balance, BlockId, BlockInfo, Nonce, TransactionId}, + engines::EthEngine, + miner::{ + self, + cache::{Cache, CachedClient}, + }, + transaction_ext::Transaction, +}; use call_contract::CallContract; -use client::{Balance, BlockId, BlockInfo, Nonce, TransactionId}; -use engines::EthEngine; use ethcore_miner::pool::client::BalanceClient; -use miner::{ - self, - cache::{Cache, CachedClient}, -}; -use transaction_ext::Transaction; pub(crate) struct CachedNonceClient<'a, C: 'a> { cached_client: CachedClient<'a, C, Address, U256>, @@ -134,7 +136,7 @@ impl<'a, C: 'a> Clone for PoolClient<'a, C> { cached_nonces: self.cached_nonces.clone(), cached_balances: self.cached_balances.clone(), engine: self.engine, - accounts: self.accounts.clone(), + accounts: self.accounts, best_block_header: self.best_block_header.clone(), service_transaction_checker: self.service_transaction_checker.clone(), } diff --git a/crates/ethcore/src/miner/stratum.rs b/crates/ethcore/src/miner/stratum.rs index 180be26ccd..f27e0754db 100644 --- a/crates/ethcore/src/miner/stratum.rs +++ b/crates/ethcore/src/miner/stratum.rs @@ -22,15 +22,17 @@ use std::{ sync::{Arc, Weak}, }; -use client::{Client, ImportSealedBlock}; +use crate::{ + client::{Client, ImportSealedBlock}, + miner::{Miner, MinerService}, +}; use ethash::{self, SeedHashCompute}; #[cfg(feature = "work-notify")] use ethcore_miner::work_notify::NotifyWork; #[cfg(feature = "work-notify")] use ethcore_stratum::PushWorkHandler; use ethcore_stratum::{Error as StratumServiceError, JobDispatcher, Stratum as StratumService}; -use ethereum_types::{H256, H64, U256}; -use miner::{Miner, MinerService}; +use ethereum_types::{H64, H256, U256}; use parking_lot::Mutex; use rlp::encode; @@ -48,11 +50,7 @@ pub struct Options { } fn clean_0x(s: &str) -> &str { - if s.starts_with("0x") { - &s[2..] - } else { - s - } + if s.starts_with("0x") { &s[2..] } else { s } } struct SubmitPayload { @@ -101,9 +99,13 @@ impl SubmitPayload { #[derive(Debug)] enum PayloadError { + #[allow(dead_code)] ArgumentsAmountUnexpected(usize), + #[allow(dead_code)] InvalidNonce(String), + #[allow(dead_code)] InvalidPowHash(String), + #[allow(dead_code)] InvalidMixHash(String), } diff --git a/crates/ethcore/src/pod_account.rs b/crates/ethcore/src/pod_account.rs index 6187072db4..a5f02ed72c 100644 --- a/crates/ethcore/src/pod_account.rs +++ b/crates/ethcore/src/pod_account.rs @@ -16,6 +16,7 @@ //! Account system expressed in Plain Old Data. +use crate::{state::Account, types::account_diff::*}; use bytes::Bytes; use ethereum_types::{BigEndianHash, H256, U256}; use ethjson; @@ -28,11 +29,9 @@ use kvdb::DBValue; use rlp::{self, RlpStream}; use rustc_hex::ToHex; use serde::Serializer; -use state::Account; use std::{collections::BTreeMap, fmt}; use trie::TrieFactory; use triehash::sec_trie_root; -use types::account_diff::*; #[derive(Debug, Clone, PartialEq, Eq, Serialize)] /// An account, expressed as Plain-Old-Data (hence the name). @@ -216,10 +215,10 @@ pub fn diff_pod(pre: Option<&PodAccount>, post: Option<&PodAccount>) -> Option StateDiff { #[cfg(test)] mod test { use super::PodState; + use crate::{ + pod_account::PodAccount, + types::{account_diff::*, state_diff::*}, + }; use ethereum_types::H160; - use pod_account::PodAccount; use std::collections::BTreeMap; - use types::{account_diff::*, state_diff::*}; #[test] fn create_delete() { diff --git a/crates/ethcore/src/snapshot/account.rs b/crates/ethcore/src/snapshot/account.rs index 7ce25e72c1..680afebbf8 100644 --- a/crates/ethcore/src/snapshot/account.rs +++ b/crates/ethcore/src/snapshot/account.rs @@ -16,17 +16,19 @@ //! Account state encoding and decoding -use account_db::{AccountDB, AccountDBMut}; +use crate::{ + account_db::{AccountDB, AccountDBMut}, + snapshot::{Error, Progress}, + types::basic_account::BasicAccount, +}; use bytes::Bytes; use ethereum_types::{H256, U256}; use ethtrie::{TrieDB, TrieDBMut}; use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; use hash_db::HashDB; use rlp::{Rlp, RlpStream}; -use snapshot::{Error, Progress}; use std::{collections::HashSet, sync::atomic::Ordering}; use trie::{Trie, TrieMut}; -use types::basic_account::BasicAccount; // An empty account -- these were replaced with RLP null data for a space optimization in v1. const ACC_EMPTY: BasicAccount = BasicAccount { @@ -221,20 +223,22 @@ pub fn from_fat_rlp( #[cfg(test)] mod tests { - use account_db::{AccountDB, AccountDBMut}; - use snapshot::{tests::helpers::fill_storage, Progress}; - use test_helpers::get_temp_state_db; - use types::basic_account::BasicAccount; + use crate::{ + account_db::{AccountDB, AccountDBMut}, + snapshot::{Progress, tests::helpers::fill_storage}, + test_helpers::get_temp_state_db, + types::basic_account::BasicAccount, + }; use ethereum_types::{Address, H256}; - use hash::{keccak, KECCAK_EMPTY, KECCAK_NULL_RLP}; + use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; use hash_db::HashDB; use kvdb::DBValue; use rlp::Rlp; use std::collections::HashSet; - use super::{from_fat_rlp, to_fat_rlps, ACC_EMPTY}; + use super::{ACC_EMPTY, from_fat_rlp, to_fat_rlps}; #[test] fn encoding_basic() { diff --git a/crates/ethcore/src/snapshot/block.rs b/crates/ethcore/src/snapshot/block.rs index 57004a7568..907ce29111 100644 --- a/crates/ethcore/src/snapshot/block.rs +++ b/crates/ethcore/src/snapshot/block.rs @@ -16,14 +16,14 @@ //! Block RLP compression. +use crate::types::{ + BlockNumber, block::Block, header::Header, transaction::TypedTransaction, views::BlockView, +}; use bytes::Bytes; use ethereum_types::{H256, U256}; use hash::keccak; use rlp::{DecoderError, Rlp, RlpStream}; use triehash::ordered_trie_root; -use types::{ - block::Block, header::Header, transaction::TypedTransaction, views::BlockView, BlockNumber, -}; const HEADER_FIELDS: usize = 8; const BLOCK_FIELDS: usize = 2; @@ -156,15 +156,15 @@ impl AbridgedBlock { mod tests { use super::AbridgedBlock; - use bytes::Bytes; - use ethereum_types::{Address, H256, U256}; - use types::{ + use crate::types::{ + BlockNumber, block::Block, transaction::{Action, Transaction, TypedTransaction}, view, views::BlockView, - BlockNumber, }; + use bytes::Bytes; + use ethereum_types::{Address, H256, U256}; fn encode_block(b: &Block) -> Bytes { b.rlp_bytes() diff --git a/crates/ethcore/src/snapshot/consensus/authority.rs b/crates/ethcore/src/snapshot/consensus/authority.rs index 07a84ff337..06b913043d 100644 --- a/crates/ethcore/src/snapshot/consensus/authority.rs +++ b/crates/ethcore/src/snapshot/consensus/authority.rs @@ -22,24 +22,28 @@ use super::{ChunkSink, Rebuilder, SnapshotComponents}; use std::sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }; -use engines::{EpochTransition, EpochVerifier, EthEngine}; -use machine::EthereumMachine; -use snapshot::{Error, ManifestData, Progress}; +use crate::{ + engines::{EpochTransition, EpochVerifier, EthEngine}, + machine::EthereumMachine, + snapshot::{Error, ManifestData, Progress}, +}; -use blockchain::{BlockChain, BlockChainDB, BlockProvider}; +use crate::{ + blockchain::{BlockChain, BlockChainDB, BlockProvider}, + types::{ + BlockNumber, encoded, header::Header, ids::BlockId, receipt::TypedReceipt, + transaction::TypedTransaction, + }, +}; use bytes::Bytes; use db::KeyValueDB; use ethereum_types::{H256, U256}; use itertools::{Itertools, Position}; use rlp::{Rlp, RlpStream}; -use types::{ - encoded, header::Header, ids::BlockId, receipt::TypedReceipt, transaction::TypedTransaction, - BlockNumber, -}; /// Snapshot creation and restoration for PoA chains. /// Chunk format: @@ -137,7 +141,7 @@ impl SnapshotComponents for PoaSnapshot { chain: BlockChain, db: Arc, manifest: &ManifestData, - ) -> Result, ::error::Error> { + ) -> Result, crate::error::Error> { Ok(Box::new(ChunkRebuilder { manifest: manifest.clone(), warp_target: None, @@ -198,8 +202,8 @@ impl ChunkRebuilder { last_verifier: &mut Option>>, transition_rlp: Rlp, engine: &dyn EthEngine, - ) -> Result { - use engines::ConstructedVerifier; + ) -> Result { + use crate::engines::ConstructedVerifier; // decode. let header = @@ -260,7 +264,7 @@ impl Rebuilder for ChunkRebuilder { chunk: &[u8], engine: &dyn EthEngine, abort_flag: &AtomicBool, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { let rlp = Rlp::new(chunk); let is_last_chunk: bool = rlp.val_at(0)?; let num_items = rlp.item_count()?; @@ -349,7 +353,7 @@ impl Rebuilder for ChunkRebuilder { } if is_last_chunk { - use types::block::Block; + use crate::types::block::Block; let last_rlp = rlp.at(num_items - 1)?; let block = Block { @@ -392,7 +396,7 @@ impl Rebuilder for ChunkRebuilder { Ok(()) } - fn finalize(&mut self, _engine: &dyn EthEngine) -> Result<(), ::error::Error> { + fn finalize(&mut self, _engine: &dyn EthEngine) -> Result<(), crate::error::Error> { if !self.had_genesis { return Err(Error::WrongChunkFormat("No genesis transition included.".into()).into()); } @@ -402,7 +406,7 @@ impl Rebuilder for ChunkRebuilder { None => { return Err( Error::WrongChunkFormat("Warp target block not included.".into()).into(), - ) + ); } }; diff --git a/crates/ethcore/src/snapshot/consensus/mod.rs b/crates/ethcore/src/snapshot/consensus/mod.rs index 44db5f7249..481dbcc636 100644 --- a/crates/ethcore/src/snapshot/consensus/mod.rs +++ b/crates/ethcore/src/snapshot/consensus/mod.rs @@ -17,12 +17,14 @@ //! Secondary chunk creation and restoration, implementations for different consensus //! engines. -use std::sync::{atomic::AtomicBool, Arc}; +use std::sync::{Arc, atomic::AtomicBool}; -use blockchain::{BlockChain, BlockChainDB}; -use engines::EthEngine; -use snapshot::{Error, ManifestData, Progress}; -use types::BlockNumber; +use crate::{ + blockchain::{BlockChain, BlockChainDB}, + engines::EthEngine, + snapshot::{Error, ManifestData, Progress}, + types::BlockNumber, +}; use ethereum_types::H256; @@ -65,7 +67,7 @@ pub trait SnapshotComponents: Send { chain: BlockChain, db: Arc, manifest: &ManifestData, - ) -> Result, ::error::Error>; + ) -> Result, crate::error::Error>; /// Minimum supported snapshot version number. fn min_supported_version(&self) -> u64; @@ -85,12 +87,12 @@ pub trait Rebuilder: Send { chunk: &[u8], engine: &dyn EthEngine, abort_flag: &AtomicBool, - ) -> Result<(), ::error::Error>; + ) -> Result<(), crate::error::Error>; /// Finalize the restoration. Will be done after all chunks have been /// fed successfully. /// /// This should apply the necessary "glue" between chunks, /// and verify against the restored state. - fn finalize(&mut self, engine: &dyn EthEngine) -> Result<(), ::error::Error>; + fn finalize(&mut self, engine: &dyn EthEngine) -> Result<(), crate::error::Error>; } diff --git a/crates/ethcore/src/snapshot/consensus/work.rs b/crates/ethcore/src/snapshot/consensus/work.rs index 58a8525f36..cb737aea78 100644 --- a/crates/ethcore/src/snapshot/consensus/work.rs +++ b/crates/ethcore/src/snapshot/consensus/work.rs @@ -25,20 +25,22 @@ use super::{ChunkSink, Rebuilder, SnapshotComponents}; use std::{ collections::VecDeque, sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, }; -use blockchain::{BlockChain, BlockChainDB, BlockProvider}; +use crate::{ + blockchain::{BlockChain, BlockChainDB, BlockProvider}, + engines::EthEngine, + snapshot::{Error, ManifestData, Progress, block::AbridgedBlock}, + types::{BlockNumber, encoded}, +}; use bytes::Bytes; use db::KeyValueDB; -use engines::EthEngine; use ethereum_types::H256; use rand::rngs::OsRng; use rlp::{Rlp, RlpStream}; -use snapshot::{block::AbridgedBlock, Error, ManifestData, Progress}; -use types::{encoded, BlockNumber}; /// Snapshot creation and restoration for PoW chains. /// This includes blocks from the head of the chain as a @@ -88,7 +90,7 @@ impl SnapshotComponents for PowSnapshot { chain: BlockChain, db: Arc, manifest: &ManifestData, - ) -> Result, ::error::Error> { + ) -> Result, crate::error::Error> { PowRebuilder::new( chain, db.key_value().clone(), @@ -99,10 +101,10 @@ impl SnapshotComponents for PowSnapshot { } fn min_supported_version(&self) -> u64 { - ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION + crate::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION } fn current_version(&self) -> u64 { - ::snapshot::STATE_CHUNK_VERSION + crate::snapshot::STATE_CHUNK_VERSION } } @@ -244,7 +246,7 @@ impl PowRebuilder { db: Arc, manifest: &ManifestData, snapshot_blocks: u64, - ) -> Result { + ) -> Result { Ok(PowRebuilder { chain: chain, db: db, @@ -267,9 +269,9 @@ impl Rebuilder for PowRebuilder { chunk: &[u8], engine: &dyn EthEngine, abort_flag: &AtomicBool, - ) -> Result<(), ::error::Error> { + ) -> Result<(), crate::error::Error> { + use crate::snapshot::verify_old_block; use ethereum_types::U256; - use snapshot::verify_old_block; use triehash::ordered_trie_root; let rlp = Rlp::new(chunk); @@ -372,7 +374,7 @@ impl Rebuilder for PowRebuilder { } /// Glue together any disconnected chunks and check that the chain is complete. - fn finalize(&mut self, _: &dyn EthEngine) -> Result<(), ::error::Error> { + fn finalize(&mut self, _: &dyn EthEngine) -> Result<(), crate::error::Error> { let mut batch = self.db.transaction(); for (first_num, first_hash) in self.disconnected.drain(..) { @@ -391,7 +393,7 @@ impl Rebuilder for PowRebuilder { self.chain.insert_epoch_transition( &mut batch, 0, - ::engines::EpochTransition { + crate::engines::EpochTransition { block_number: 0, block_hash: genesis_hash, proof: vec![], diff --git a/crates/ethcore/src/snapshot/error.rs b/crates/ethcore/src/snapshot/error.rs index 914f01d32b..6670f5f95d 100644 --- a/crates/ethcore/src/snapshot/error.rs +++ b/crates/ethcore/src/snapshot/error.rs @@ -18,7 +18,7 @@ use std::fmt; -use types::ids::BlockId; +use crate::types::ids::BlockId; use ethereum_types::H256; use ethtrie::TrieError; diff --git a/crates/ethcore/src/snapshot/io.rs b/crates/ethcore/src/snapshot/io.rs index f1abb60be8..62b63d0a3d 100644 --- a/crates/ethcore/src/snapshot/io.rs +++ b/crates/ethcore/src/snapshot/io.rs @@ -207,7 +207,7 @@ impl PackedReader { /// Create a new `PackedReader` for the file at the given path. /// This will fail if any io errors are encountered or the file /// is not a valid packed snapshot. - pub fn new(path: &Path) -> Result, ::snapshot::error::Error> { + pub fn new(path: &Path) -> Result, crate::snapshot::error::Error> { let mut file = File::open(path)?; let file_len = file.metadata()?.len(); if file_len < 8 { @@ -246,7 +246,7 @@ impl PackedReader { }; if version > SNAPSHOT_VERSION { - return Err(::snapshot::error::Error::VersionNotSupported(version)); + return Err(crate::snapshot::error::Error::VersionNotSupported(version)); } let state: Vec = rlp.list_at(0 + start)?; @@ -302,7 +302,7 @@ pub struct LooseReader { impl LooseReader { /// Create a new `LooseReader` which will read the manifest and chunk data from /// the given directory. - pub fn new(mut dir: PathBuf) -> Result { + pub fn new(mut dir: PathBuf) -> Result { let mut manifest_buf = Vec::new(); dir.push("MANIFEST"); @@ -337,10 +337,10 @@ mod tests { use tempdir::TempDir; use super::{ - LooseReader, LooseWriter, PackedReader, PackedWriter, SnapshotReader, SnapshotWriter, - SNAPSHOT_VERSION, + LooseReader, LooseWriter, PackedReader, PackedWriter, SNAPSHOT_VERSION, SnapshotReader, + SnapshotWriter, }; - use snapshot::ManifestData; + use crate::snapshot::ManifestData; const STATE_CHUNKS: &'static [&'static [u8]] = &[b"dog", b"cat", b"hello world", b"hi", b"notarealchunk"]; diff --git a/crates/ethcore/src/snapshot/mod.rs b/crates/ethcore/src/snapshot/mod.rs index 1cf466d1af..bb373fe635 100644 --- a/crates/ethcore/src/snapshot/mod.rs +++ b/crates/ethcore/src/snapshot/mod.rs @@ -19,20 +19,22 @@ //! Documentation of the format can be found at //! https://openethereum.github.io/Warp-Sync-Snapshot-Format -use hash::{keccak, KECCAK_EMPTY, KECCAK_NULL_RLP}; +use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; use std::{ cmp, collections::{HashMap, HashSet}, sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, }, }; -use account_db::{AccountDB, AccountDBMut}; -use blockchain::{BlockChain, BlockProvider}; -use engines::EthEngine; -use types::{header::Header, ids::BlockId}; +use crate::{ + account_db::{AccountDB, AccountDBMut}, + blockchain::{BlockChain, BlockProvider}, + engines::EthEngine, + types::{header::Header, ids::BlockId}, +}; use bytes::Bytes; use db::{DBValue, KeyValueDB}; @@ -50,7 +52,7 @@ use trie::{Trie, TrieMut}; use self::io::SnapshotWriter; use crossbeam_utils::thread; -use rand::{rngs::OsRng, Rng}; +use rand::{Rng, rngs::OsRng}; pub use self::error::Error; @@ -60,7 +62,7 @@ pub use self::{ traits::SnapshotService, watcher::Watcher, }; -pub use types::{ +pub use crate::types::{ basic_account::BasicAccount, creation_status::CreationStatus, restoration_status::RestorationStatus, snapshot_manifest::ManifestData, }; @@ -447,7 +449,7 @@ impl StateRebuilder { } /// Feed an uncompressed state chunk into the rebuilder. - pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), ::error::Error> { + pub fn feed(&mut self, chunk: &[u8], flag: &AtomicBool) -> Result<(), crate::error::Error> { let rlp = Rlp::new(chunk); let mut pairs = Vec::with_capacity(rlp.item_count()?); @@ -513,7 +515,11 @@ impl StateRebuilder { /// Finalize the restoration. Check for accounts missing code and make a dummy /// journal entry. /// Once all chunks have been fed, there should be nothing missing. - pub fn finalize(mut self, era: u64, id: H256) -> Result, ::error::Error> { + pub fn finalize( + mut self, + era: u64, + id: H256, + ) -> Result, crate::error::Error> { let missing = self.missing_code.keys().cloned().collect::>(); if !missing.is_empty() { return Err(Error::MissingCode(missing).into()); @@ -548,7 +554,7 @@ fn rebuild_accounts( known_code: &HashMap, known_storage_roots: &mut HashMap, abort_flag: &AtomicBool, -) -> Result { +) -> Result { let mut status = RebuiltStatus::default(); for (account_rlp, out) in account_fat_rlps.into_iter().zip(out_chunk.iter_mut()) { if !abort_flag.load(Ordering::SeqCst) { @@ -616,10 +622,10 @@ pub fn verify_old_block( engine: &dyn EthEngine, chain: &BlockChain, always: bool, -) -> Result<(), ::error::Error> { +) -> Result<(), crate::error::Error> { engine.verify_block_basic(header)?; - if always || rng.gen::() <= POW_VERIFY_RATE { + if always || rng.r#gen::() <= POW_VERIFY_RATE { engine.verify_block_unordered(header)?; match chain.block_header_data(header.parent_hash()) { Some(parent) => engine diff --git a/crates/ethcore/src/snapshot/service.rs b/crates/ethcore/src/snapshot/service.rs index 10519c06dc..095cb3aa27 100644 --- a/crates/ethcore/src/snapshot/service.rs +++ b/crates/ethcore/src/snapshot/service.rs @@ -23,26 +23,28 @@ use std::{ io::{self, ErrorKind, Read}, path::PathBuf, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, + atomic::{AtomicBool, AtomicUsize, Ordering}, }, }; use super::{ + CreationStatus, MAX_CHUNK_SIZE, ManifestData, Rebuilder, RestorationStatus, SnapshotService, + StateRebuilder, io::{LooseReader, LooseWriter, SnapshotReader, SnapshotWriter}, - CreationStatus, ManifestData, Rebuilder, RestorationStatus, SnapshotService, StateRebuilder, - MAX_CHUNK_SIZE, }; -use blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler}; -use client::{BlockChainClient, BlockInfo, ChainInfo, Client, ClientIoMessage}; -use engines::EthEngine; -use error::{Error, ErrorKind as SnapshotErrorKind}; +use crate::{ + blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler}, + client::{BlockChainClient, BlockInfo, ChainInfo, Client, ClientIoMessage}, + engines::EthEngine, + error::{Error, ErrorKind as SnapshotErrorKind}, + snapshot::Error as SnapshotError, + types::ids::BlockId, +}; use hash::keccak; -use snapshot::Error as SnapshotError; -use types::ids::BlockId; -use io::IoChannel; +use crate::io::IoChannel; use bytes::Bytes; use ethereum_types::H256; @@ -126,7 +128,7 @@ impl Restoration { let components = params .engine .snapshot_components() - .ok_or_else(|| ::snapshot::Error::SnapshotsUnsupported)?; + .ok_or_else(|| crate::snapshot::Error::SnapshotsUnsupported)?; let secondary = components.rebuilder(chain, raw_db.clone(), &manifest)?; @@ -152,7 +154,7 @@ impl Restoration { let expected_len = snappy::decompressed_len(chunk)?; if expected_len > MAX_CHUNK_SIZE { trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); - return Err(::snapshot::Error::ChunkTooLarge.into()); + return Err(crate::snapshot::Error::ChunkTooLarge.into()); } let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; @@ -180,7 +182,7 @@ impl Restoration { let expected_len = snappy::decompressed_len(chunk)?; if expected_len > MAX_CHUNK_SIZE { trace!(target: "snapshot", "Discarding large chunk: {} vs {}", expected_len, MAX_CHUNK_SIZE); - return Err(::snapshot::Error::ChunkTooLarge.into()); + return Err(crate::snapshot::Error::ChunkTooLarge.into()); } let len = snappy::decompress_into(chunk, &mut self.snappy_buffer)?; @@ -444,7 +446,7 @@ impl Service { let block = self .client .block(BlockId::Hash(parent_hash)) - .ok_or(::snapshot::error::Error::UnlinkedAncientBlockChain)?; + .ok_or(crate::snapshot::error::Error::UnlinkedAncientBlockChain)?; parent_hash = block.parent_hash(); let block_number = block.number(); @@ -490,7 +492,7 @@ impl Service { // We couldn't reach the targeted hash if parent_hash != target_hash { - return Err(::snapshot::error::Error::UnlinkedAncientBlockChain.into()); + return Err(crate::snapshot::error::Error::UnlinkedAncientBlockChain.into()); } // Update best ancient block in the Next Chain @@ -558,7 +560,9 @@ impl Service { if let Err(e) = res { if client.chain_info().best_block_number >= num + client.pruning_history() { // The state we were snapshotting was pruned before we could finish. - info!("Periodic snapshot failed: block state pruned. Run with a longer `--pruning-history` or with `--no-periodic-snapshot`"); + info!( + "Periodic snapshot failed: block state pruned. Run with a longer `--pruning-history` or with `--no-periodic-snapshot`" + ); return Err(e); } else { return Err(e); @@ -1011,20 +1015,22 @@ impl Drop for Service { #[cfg(test)] mod tests { use super::*; - use client::ClientIoMessage; - use io::IoService; + use crate::{ + client::ClientIoMessage, + io::IoService, + snapshot::{ManifestData, RestorationStatus, SnapshotService}, + spec::Spec, + test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler}, + }; use journaldb::Algorithm; - use snapshot::{ManifestData, RestorationStatus, SnapshotService}; - use spec::Spec; use tempdir::TempDir; - use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler}; #[test] fn sends_async_messages() { let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices, false); - let service = IoService::::start("Test").unwrap(); + let service = IoService::::start("Test", 4).unwrap(); let spec = Spec::new_test(); let tempdir = TempDir::new("").unwrap(); @@ -1098,16 +1104,20 @@ mod tests { let definitely_bad_chunk = [1, 2, 3, 4, 5]; for hash in state_hashes { - assert!(restoration - .feed_state(hash, &definitely_bad_chunk, &flag) - .is_err()); + assert!( + restoration + .feed_state(hash, &definitely_bad_chunk, &flag) + .is_err() + ); assert!(!restoration.is_done()); } for hash in block_hashes { - assert!(restoration - .feed_blocks(hash, &definitely_bad_chunk, &*spec.engine, &flag) - .is_err()); + assert!( + restoration + .feed_blocks(hash, &definitely_bad_chunk, &*spec.engine, &flag) + .is_err() + ); assert!(!restoration.is_done()); } } diff --git a/crates/ethcore/src/snapshot/tests/helpers.rs b/crates/ethcore/src/snapshot/tests/helpers.rs index 1bbc609615..fb504d8a65 100644 --- a/crates/ethcore/src/snapshot/tests/helpers.rs +++ b/crates/ethcore/src/snapshot/tests/helpers.rs @@ -17,20 +17,22 @@ //! Snapshot test helpers. These are used to build blockchains and state tries //! which can be queried before and after a full snapshot/restore cycle. -extern crate trie_standardmap; +use trie_standardmap; use hash::KECCAK_NULL_RLP; use std::sync::Arc; -use account_db::AccountDBMut; -use blockchain::{BlockChain, BlockChainDB}; -use client::{ChainInfo, Client}; -use engines::EthEngine; -use snapshot::{ - io::{PackedReader, PackedWriter, SnapshotReader}, - StateRebuilder, +use crate::{ + account_db::AccountDBMut, + blockchain::{BlockChain, BlockChainDB}, + client::{ChainInfo, Client}, + engines::EthEngine, + snapshot::{ + StateRebuilder, + io::{PackedReader, PackedWriter, SnapshotReader}, + }, + types::basic_account::BasicAccount, }; -use types::basic_account::BasicAccount; use rand::Rng; use tempdir::TempDir; @@ -71,7 +73,7 @@ impl StateProducer { let temp = trie .iter() .unwrap() // binding required due to complicated lifetime stuff - .filter(|_| rng.gen::() < ACCOUNT_CHURN) + .filter(|_| rng.r#gen::() < ACCOUNT_CHURN) .map(Result::unwrap) .map(|(k, v)| (H256::from_slice(&k), v.to_owned())) .collect(); @@ -96,13 +98,13 @@ impl StateProducer { } // add between 0 and 5 new accounts each tick. - let new_accs = rng.gen::() % 5; + let new_accs = rng.r#gen::() % 5; for _ in 0..new_accs { - let address_hash = H256(rng.gen()); - let balance: usize = rng.gen(); - let nonce: usize = rng.gen(); - let acc = ::state::Account::new_basic(balance.into(), nonce.into()).rlp(); + let address_hash = H256(rng.r#gen()); + let balance: usize = rng.r#gen(); + let nonce: usize = rng.r#gen(); + let acc = crate::state::Account::new_basic(balance.into(), nonce.into()).rlp(); trie.insert(&address_hash[..], &acc).unwrap(); } } @@ -138,7 +140,7 @@ pub fn fill_storage(mut db: AccountDBMut, root: &mut H256, seed: &mut H256) { /// Take a snapshot from the given client into a temporary file. /// Return a snapshot reader for it. pub fn snap(client: &Client) -> (Box, TempDir) { - use types::ids::BlockId; + use crate::types::ids::BlockId; let tempdir = TempDir::new("").unwrap(); let path = tempdir.path().join("file"); @@ -162,7 +164,7 @@ pub fn restore( engine: &dyn EthEngine, reader: &dyn SnapshotReader, genesis: &[u8], -) -> Result<(), ::error::Error> { +) -> Result<(), crate::error::Error> { use std::sync::atomic::AtomicBool; let flag = AtomicBool::new(true); diff --git a/crates/ethcore/src/snapshot/tests/proof_of_authority.rs b/crates/ethcore/src/snapshot/tests/proof_of_authority.rs index dc111a025b..68061e10c5 100644 --- a/crates/ethcore/src/snapshot/tests/proof_of_authority.rs +++ b/crates/ethcore/src/snapshot/tests/proof_of_authority.rs @@ -18,17 +18,19 @@ use std::{cell::RefCell, str::FromStr, sync::Arc}; +use crate::{ + client::{BlockChainClient, ChainInfo, Client}, + snapshot::tests::helpers as snapshot_helpers, + spec::Spec, + test_helpers::generate_dummy_client_with_spec, + types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}, +}; use accounts::AccountProvider; -use client::{BlockChainClient, ChainInfo, Client}; use crypto::publickey::Secret; -use snapshot::tests::helpers as snapshot_helpers; -use spec::Spec; use tempdir::TempDir; -use test_helpers::generate_dummy_client_with_spec; -use types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}; +use crate::test_helpers; use ethereum_types::Address; -use test_helpers; use_contract!(test_validator_set, "res/contracts/test_validator_set.json"); @@ -104,7 +106,7 @@ fn make_chain( { // push a block with given number, signed by one of the signers, with given transactions. let push_block = |signers: &[Address], n, txs: Vec| { - use miner::{self, MinerService}; + use crate::miner::{self, MinerService}; let idx = n as usize % signers.len(); trace!(target: "snapshot", "Pushing block #{}, {} txs, author={}", diff --git a/crates/ethcore/src/snapshot/tests/proof_of_work.rs b/crates/ethcore/src/snapshot/tests/proof_of_work.rs index 6a2dfac587..532b63262e 100644 --- a/crates/ethcore/src/snapshot/tests/proof_of_work.rs +++ b/crates/ethcore/src/snapshot/tests/proof_of_work.rs @@ -16,26 +16,27 @@ //! PoW block chunker and rebuilder tests. -use error::{Error, ErrorKind}; +use crate::error::{Error, ErrorKind}; use std::sync::atomic::AtomicBool; use tempdir::TempDir; -use blockchain::{ - generator::{BlockBuilder, BlockGenerator}, - BlockChain, ExtrasInsert, -}; -use snapshot::{ - chunk_secondary, - io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, - Error as SnapshotError, Progress, SnapshotComponents, +use crate::{ + blockchain::{ + BlockChain, ExtrasInsert, + generator::{BlockBuilder, BlockGenerator}, + }, + snapshot::{ + Error as SnapshotError, Progress, SnapshotComponents, chunk_secondary, + io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, + }, }; +use crate::test_helpers; use kvdb::DBTransaction; use parking_lot::Mutex; use snappy; -use test_helpers; -const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { +const SNAPSHOT_MODE: crate::snapshot::PowSnapshot = crate::snapshot::PowSnapshot { blocks: 30000, max_restore_blocks: 30000, }; @@ -46,7 +47,7 @@ fn chunk_and_restore(amount: u64) { let generator = BlockGenerator::new(vec![rest]); let genesis = genesis.last(); - let engine = ::spec::Spec::new_test().engine; + let engine = crate::spec::Spec::new_test().engine; let tempdir = TempDir::new("").unwrap(); let snapshot_path = tempdir.path().join("SNAP"); @@ -66,7 +67,7 @@ fn chunk_and_restore(amount: u64) { block.encoded(), vec![], ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, + fork_choice: crate::engines::ForkChoice::New, is_finalized: false, }, ); @@ -88,7 +89,7 @@ fn chunk_and_restore(amount: u64) { ) .unwrap(); - let manifest = ::snapshot::ManifestData { + let manifest = crate::snapshot::ManifestData { version: 2, state_hashes: Vec::new(), block_hashes: block_hashes, @@ -160,7 +161,7 @@ fn checks_flag() { let chunk = stream.out(); let db = test_helpers::new_db(); - let engine = ::spec::Spec::new_test().engine; + let engine = crate::spec::Spec::new_test().engine; let chain = BlockChain::new( Default::default(), genesis.last().encoded().raw(), @@ -168,7 +169,7 @@ fn checks_flag() { engine.params().eip1559_transition, ); - let manifest = ::snapshot::ManifestData { + let manifest = crate::snapshot::ManifestData { version: 2, state_hashes: Vec::new(), block_hashes: Vec::new(), diff --git a/crates/ethcore/src/snapshot/tests/service.rs b/crates/ethcore/src/snapshot/tests/service.rs index 13e1d77568..fb277a5fe5 100644 --- a/crates/ethcore/src/snapshot/tests/service.rs +++ b/crates/ethcore/src/snapshot/tests/service.rs @@ -18,25 +18,27 @@ use std::{fs, sync::Arc}; -use blockchain::BlockProvider; -use client::{BlockInfo, Client, ClientConfig, ImportBlock}; -use snapshot::{ - chunk_secondary, chunk_state, - io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, - service::{Service, ServiceParams}, - ManifestData, Progress, RestorationStatus, SnapshotService, +use crate::{ + blockchain::BlockProvider, + client::{BlockInfo, Client, ClientConfig, ImportBlock}, + snapshot::{ + ManifestData, Progress, RestorationStatus, SnapshotService, chunk_secondary, chunk_state, + io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, + service::{Service, ServiceParams}, + }, + spec::Spec, + test_helpers::{ + generate_dummy_client_with_spec_and_data, new_db, new_temp_db, restoration_db_handler, + }, + types::ids::BlockId, }; -use spec::Spec; use tempdir::TempDir; -use test_helpers::{ - generate_dummy_client_with_spec_and_data, new_db, new_temp_db, restoration_db_handler, -}; -use types::ids::BlockId; -use io::IoChannel; +use crate::{io::IoChannel, verification::queue::kind::blocks::Unverified}; use kvdb_rocksdb::DatabaseConfig; use parking_lot::Mutex; -use verification::queue::kind::blocks::Unverified; + +use crate::exit::ShutdownManager; #[test] fn restored_is_equivalent() { @@ -67,8 +69,9 @@ fn restored_is_equivalent() { Default::default(), &spec, blockchain_db, - Arc::new(::miner::Miner::new_for_tests(&spec, None)), + Arc::new(crate::miner::Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); @@ -170,7 +173,7 @@ fn keep_ancient_blocks() { // Test variables const NUM_BLOCKS: u64 = 500; const NUM_SNAPSHOT_BLOCKS: u64 = 300; - const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { + const SNAPSHOT_MODE: crate::snapshot::PowSnapshot = crate::snapshot::PowSnapshot { blocks: NUM_SNAPSHOT_BLOCKS, max_restore_blocks: NUM_SNAPSHOT_BLOCKS, }; @@ -230,8 +233,9 @@ fn keep_ancient_blocks() { ClientConfig::default(), &spec, client_db, - Arc::new(::miner::Miner::new_for_tests(&spec, None)), + Arc::new(crate::miner::Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); @@ -288,9 +292,11 @@ fn keep_ancient_blocks() { // Check that we have blocks in [NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1 ; NUM_BLOCKS] // but none before - assert!(client2 - .block(BlockId::Number(NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1)) - .is_some()); + assert!( + client2 + .block(BlockId::Number(NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1)) + .is_some() + ); assert!(client2.block(BlockId::Number(100)).is_none()); // Check that the first 50 blocks have been migrated @@ -316,8 +322,9 @@ fn recover_aborted_recovery() { Default::default(), &spec, client_db, - Arc::new(::miner::Miner::new_for_tests(&spec, None)), + Arc::new(crate::miner::Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let service_params = ServiceParams { diff --git a/crates/ethcore/src/snapshot/tests/state.rs b/crates/ethcore/src/snapshot/tests/state.rs index 39eb380c8c..9fec345e98 100644 --- a/crates/ethcore/src/snapshot/tests/state.rs +++ b/crates/ethcore/src/snapshot/tests/state.rs @@ -16,20 +16,21 @@ //! State snapshotting tests. -extern crate rand_xorshift; +use rand_xorshift; -use hash::{keccak, KECCAK_NULL_RLP}; -use std::sync::{atomic::AtomicBool, Arc}; +use hash::{KECCAK_NULL_RLP, keccak}; +use std::sync::{Arc, atomic::AtomicBool}; use super::helpers::StateProducer; -use snapshot::{ - account, chunk_state, - io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, - Error as SnapshotError, Progress, StateRebuilder, SNAPSHOT_SUBPARTS, +use crate::{ + snapshot::{ + Error as SnapshotError, Progress, SNAPSHOT_SUBPARTS, StateRebuilder, account, chunk_state, + io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter}, + }, + types::basic_account::BasicAccount, }; -use types::basic_account::BasicAccount; -use error::{Error, ErrorKind}; +use crate::error::{Error, ErrorKind}; use self::rand_xorshift::XorShiftRng; use ethereum_types::H256; @@ -75,7 +76,7 @@ fn snap_and_restore() { writer .into_inner() - .finish(::snapshot::ManifestData { + .finish(crate::snapshot::ManifestData { version: 2, state_hashes: state_hashes, block_hashes: Vec::new(), @@ -128,7 +129,7 @@ fn get_code_from_prev_chunk() { use rlp::RlpStream; use std::collections::HashSet; - use account_db::{AccountDB, AccountDBMut}; + use crate::account_db::{AccountDB, AccountDBMut}; let code = b"this is definitely code"; let mut used_code = HashSet::new(); @@ -214,7 +215,7 @@ fn checks_flag() { writer .into_inner() - .finish(::snapshot::ManifestData { + .finish(crate::snapshot::ManifestData { version: 2, state_hashes, block_hashes: Vec::new(), diff --git a/crates/ethcore/src/snapshot/watcher.rs b/crates/ethcore/src/snapshot/watcher.rs index 968d1e08cb..e53d4ed5ea 100644 --- a/crates/ethcore/src/snapshot/watcher.rs +++ b/crates/ethcore/src/snapshot/watcher.rs @@ -16,12 +16,14 @@ //! Watcher for snapshot-related chain events. -use client::{BlockInfo, ChainNotify, Client, ClientIoMessage, NewBlocks}; +use crate::{ + client::{BlockInfo, ChainNotify, Client, ClientIoMessage, NewBlocks}, + types::ids::BlockId, +}; use parking_lot::Mutex; -use types::ids::BlockId; +use crate::io::IoChannel; use ethereum_types::H256; -use io::IoChannel; use std::sync::Arc; @@ -139,7 +141,7 @@ impl ChainNotify for Watcher { mod tests { use super::{Broadcast, Oracle, Watcher}; - use client::{ChainNotify, ChainRoute, NewBlocks}; + use crate::client::{ChainNotify, ChainRoute, NewBlocks}; use ethereum_types::{BigEndianHash, H256, U256}; diff --git a/crates/ethcore/src/spec/genesis.rs b/crates/ethcore/src/spec/genesis.rs index 127ffd531e..8cc583b92d 100644 --- a/crates/ethcore/src/spec/genesis.rs +++ b/crates/ethcore/src/spec/genesis.rs @@ -14,10 +14,10 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::spec::seal::Seal; use ethereum_types::{Address, H256, U256}; use ethjson; use hash::KECCAK_NULL_RLP; -use spec::seal::Seal; /// Genesis components. pub struct Genesis { diff --git a/crates/ethcore/src/spec/seal.rs b/crates/ethcore/src/spec/seal.rs index 33d69b1212..e45e97953d 100644 --- a/crates/ethcore/src/spec/seal.rs +++ b/crates/ethcore/src/spec/seal.rs @@ -16,7 +16,7 @@ //! Spec seal. -use ethereum_types::{H256, H520, H64}; +use ethereum_types::{H64, H256, H520}; use ethjson; use rlp::RlpStream; diff --git a/crates/ethcore/src/spec/spec.rs b/crates/ethcore/src/spec/spec.rs index bf2df2a222..a8b7cdb5dd 100644 --- a/crates/ethcore/src/spec/spec.rs +++ b/crates/ethcore/src/spec/spec.rs @@ -24,30 +24,32 @@ use std::{ sync::Arc, }; +use crate::types::{BlockNumber, header::Header}; use bytes::Bytes; use ethereum_types::{Address, Bloom, H160, H256, U256}; use ethjson; -use hash::{keccak, KECCAK_NULL_RLP}; +use hash::{KECCAK_NULL_RLP, keccak}; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use rustc_hex::FromHex; -use types::{header::Header, BlockNumber}; use vm::{AccessList, ActionParams, ActionValue, CallType, EnvInfo, ParamsType}; -use builtin::Builtin; -use engines::{ - AuthorityRound, BasicAuthority, Clique, EthEngine, HoneyBadgerBFT, InstantSeal, - InstantSealParams, NullEngine, DEFAULT_BLOCKHASH_CONTRACT, +use crate::{ + engines::{ + AuthorityRound, BasicAuthority, Clique, DEFAULT_BLOCKHASH_CONTRACT, EthEngine, + HoneyBadgerBFT, InstantSeal, InstantSealParams, NullEngine, + }, + error::Error, + executive::Executive, + factory::Factories, + machine::EthereumMachine, + pod_state::PodState, + spec::{Genesis, seal::Generic as GenericSeal}, + state::{Backend, State, Substate, backend::Basic as BasicBackend}, + trace::{NoopTracer, NoopVMTracer}, }; -use error::Error; -use executive::Executive; -use factory::Factories; -use machine::EthereumMachine; +use builtin::Builtin; use maplit::btreeset; -use pod_state::PodState; -use spec::{seal::Generic as GenericSeal, Genesis}; -use state::{backend::Basic as BasicBackend, Backend, State, Substate}; -use trace::{NoopTracer, NoopVMTracer}; pub use ethash::OptimizeFor; @@ -791,7 +793,7 @@ impl Spec { } } - Arc::new(::ethereum::Ethash::new( + Arc::new(crate::ethereum::Ethash::new( spec_params.cache_dir, ethash.params.into(), machine, @@ -1061,7 +1063,7 @@ impl Spec { /// initialize genesis epoch data, using in-memory database for /// constructor. pub fn genesis_epoch_data(&self) -> Result, String> { - use types::transaction::{Action, Transaction, TypedTransaction}; + use crate::types::transaction::{Action, Transaction, TypedTransaction}; let genesis = self.genesis_header(); @@ -1099,7 +1101,7 @@ impl Spec { }) .fake_sign(from); - let res = ::state::prove_transaction_virtual( + let res = crate::state::prove_transaction_virtual( db.as_hash_db_mut(), *genesis.state_root(), &tx, @@ -1196,6 +1198,7 @@ impl Spec { load_bundled!("test/authority_round_block_reward_contract") } + /// test: authority_round_rewrite_bytecode_transitions #[cfg(any(test, feature = "test-helpers"))] pub fn new_test_round_rewrite_bytecode_transitions() -> Self { load_bundled!("test/authority_round_rewrite_bytecode_transitions") @@ -1235,12 +1238,14 @@ impl Spec { #[cfg(test)] mod tests { use super::*; + use crate::{ + state::State, + test_helpers::get_temp_state_db, + types::{view, views::BlockView}, + }; use ethereum_types::{H160, H256}; - use state::State; use std::str::FromStr; use tempdir::TempDir; - use test_helpers::get_temp_state_db; - use types::{view, views::BlockView}; #[test] fn test_load_empty() { diff --git a/crates/ethcore/src/state/account.rs b/crates/ethcore/src/state/account.rs index e524285c5a..b59f025bd0 100644 --- a/crates/ethcore/src/state/account.rs +++ b/crates/ethcore/src/state/account.rs @@ -16,24 +16,22 @@ //! Single account in the system. +use crate::{error::Error, pod_account::*, types::basic_account::BasicAccount}; use bytes::{Bytes, ToPretty}; -use error::Error; use ethereum_types::{Address, BigEndianHash, H256, U256}; use ethtrie::{Result as TrieResult, SecTrieDB, TrieDB, TrieFactory}; -use hash::{keccak, KECCAK_EMPTY, KECCAK_NULL_RLP}; +use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; use hash_db::HashDB; use keccak_hasher::KeccakHasher; use kvdb::DBValue; use lru_cache::LruCache; -use pod_account::*; -use rlp::{encode, RlpStream}; +use rlp::{RlpStream, encode}; use std::{ collections::{BTreeMap, HashMap}, fmt, sync::Arc, }; use trie::{Recorder, Trie}; -use types::basic_account::BasicAccount; use std::cell::{Cell, RefCell}; @@ -252,14 +250,12 @@ impl Account { return Ok(value); } match &self.original_storage_cache { - Some((ref original_storage_root, ref original_storage_cache)) => { - Self::get_and_cache_storage( - original_storage_root, - &mut original_storage_cache.borrow_mut(), - db, - key, - ) - } + Some((original_storage_root, original_storage_cache)) => Self::get_and_cache_storage( + original_storage_root, + &mut original_storage_cache.borrow_mut(), + db, + key, + ), None => Self::get_and_cache_storage( &self.storage_root, &mut self.storage_cache.borrow_mut(), @@ -298,7 +294,7 @@ impl Account { /// Get cached original storage value after last state commitment. Returns `None` if the key is not in the cache. pub fn cached_original_storage_at(&self, key: &H256) -> Option { match &self.original_storage_cache { - Some((_, ref original_storage_cache)) => original_storage_cache + Some((_, original_storage_cache)) => original_storage_cache .borrow_mut() .get_mut(key) .map(|value| value.clone()), @@ -689,7 +685,7 @@ impl fmt::Debug for Account { #[cfg(test)] mod tests { use super::*; - use account_db::*; + use crate::account_db::*; use bytes::Bytes; use ethereum_types::{Address, H256}; use journaldb::new_memory_db; @@ -844,7 +840,10 @@ mod tests { #[test] fn new_account() { let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new()); - assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); + assert_eq!( + a.rlp().to_hex(), + "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + ); assert_eq!(*a.balance(), 69u8.into()); assert_eq!(*a.nonce(), 0u8.into()); assert_eq!(a.code_hash(), KECCAK_EMPTY); diff --git a/crates/ethcore/src/state/backend.rs b/crates/ethcore/src/state/backend.rs index 53e8ee4a48..ca790ddc82 100644 --- a/crates/ethcore/src/state/backend.rs +++ b/crates/ethcore/src/state/backend.rs @@ -26,6 +26,7 @@ use std::{ sync::Arc, }; +use crate::state::Account; use ethereum_types::{Address, H256}; use hash_db::{AsHashDB, HashDB}; use journaldb::AsKeyedHashDB; @@ -33,7 +34,6 @@ use keccak_hasher::KeccakHasher; use kvdb::DBValue; use memory_db::MemoryDB; use parking_lot::Mutex; -use state::Account; /// State backend. See module docs for more details. pub trait Backend: Send { diff --git a/crates/ethcore/src/state/mod.rs b/crates/ethcore/src/state/mod.rs index 2617412c2f..9ed08bcd28 100644 --- a/crates/ethcore/src/state/mod.rs +++ b/crates/ethcore/src/state/mod.rs @@ -22,25 +22,27 @@ use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP}; use std::{ cell::{RefCell, RefMut}, - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet, hash_map::Entry}, fmt, sync::Arc, }; -use error::Error; -use executed::{Executed, ExecutionError}; -use executive::{Executive, TransactOptions}; -use factory::{Factories, VmFactory}; -use machine::EthereumMachine as Machine; -use pod_account::*; -use pod_state::{self, PodState}; -use state_db::StateDB; -use trace::{self, FlatTrace, VMTrace}; -use types::{ - basic_account::BasicAccount, - receipt::{LegacyReceipt, TransactionOutcome, TypedReceipt}, - state_diff::StateDiff, - transaction::SignedTransaction, +use crate::{ + error::Error, + executed::{Executed, ExecutionError}, + executive::{Executive, TransactOptions}, + factory::{Factories, VmFactory}, + machine::EthereumMachine as Machine, + pod_account::*, + pod_state::{self, PodState}, + state_db::StateDB, + trace::{self, FlatTrace, VMTrace}, + types::{ + basic_account::BasicAccount, + receipt::{LegacyReceipt, TransactionOutcome, TypedReceipt}, + state_diff::StateDiff, + transaction::SignedTransaction, + }, }; use vm::EnvInfo; @@ -203,7 +205,7 @@ pub fn check_proof( ) -> ProvedExecution { let backend = self::backend::ProofCheck::new(proof); let mut factories = Factories::default(); - factories.accountdb = ::account_db::Factory::Plain; + factories.accountdb = crate::account_db::Factory::Plain; let res = State::from_existing( backend, @@ -640,7 +642,7 @@ impl State { match checkpoint.get(address) { // The account exists at this checkpoint. Some(Some(AccountEntry { - account: Some(ref account), + account: Some(account), .. })) => { if let Some(value) = account.cached_storage_at(key) { @@ -664,7 +666,7 @@ impl State { } // The account didn't exist at that point. Return empty value. Some(Some(AccountEntry { account: None, .. })) => { - return Ok(Some(H256::default())) + return Ok(Some(H256::default())); } // The value was not cached at that checkpoint, meaning it was not modified at all. Some(None) => { @@ -1574,17 +1576,19 @@ impl Clone for State { #[cfg(test)] mod tests { use super::*; + use crate::{ + machine::EthereumMachine, + spec::*, + test_helpers::{get_temp_state, get_temp_state_db}, + trace::{FlatTrace, TraceError, trace}, + types::transaction::*, + }; use crypto::publickey::Secret; use ethereum_types::{Address, BigEndianHash, H256, U256}; use evm::CallType; - use hash::{keccak, KECCAK_NULL_RLP}; - use machine::EthereumMachine; + use hash::{KECCAK_NULL_RLP, keccak}; use rustc_hex::FromHex; - use spec::*; use std::{str::FromStr, sync::Arc}; - use test_helpers::{get_temp_state, get_temp_state_db}; - use trace::{trace, FlatTrace, TraceError}; - use types::transaction::*; use vm::EnvInfo; fn secret() -> Secret { @@ -1592,7 +1596,7 @@ mod tests { } fn make_frontier_machine(max_depth: usize) -> EthereumMachine { - let mut machine = ::ethereum::new_frontier_test_machine(); + let mut machine = crate::ethereum::new_frontier_test_machine(); machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); machine } @@ -3357,7 +3361,7 @@ mod tests { #[test] fn should_trace_diff_suicided_accounts() { - use pod_account; + use crate::pod_account; let a = Address::from_low_u64_be(10); let db = get_temp_state_db(); @@ -3396,7 +3400,7 @@ mod tests { #[test] fn should_trace_diff_unmodified_storage() { - use pod_account; + use crate::pod_account; let a = Address::from_low_u64_be(10); let db = get_temp_state_db(); diff --git a/crates/ethcore/src/state/substate.rs b/crates/ethcore/src/state/substate.rs index a91dabbbea..441bbe76ee 100644 --- a/crates/ethcore/src/state/substate.rs +++ b/crates/ethcore/src/state/substate.rs @@ -16,10 +16,10 @@ //! Execution environment substate. use super::CleanupMode; +use crate::types::log_entry::LogEntry; use ethereum_types::Address; use evm::{CleanDustMode, Schedule}; use std::collections::HashSet; -use types::log_entry::LogEntry; use vm::access_list::AccessList; /// State changes which should be applied in finalize, @@ -88,8 +88,8 @@ impl Substate { #[cfg(test)] mod tests { use super::Substate; + use crate::types::log_entry::LogEntry; use ethereum_types::Address; - use types::log_entry::LogEntry; #[test] fn created() { diff --git a/crates/ethcore/src/state_db.rs b/crates/ethcore/src/state_db.rs index 78942399d4..8365fc2c75 100644 --- a/crates/ethcore/src/state_db.rs +++ b/crates/ethcore/src/state_db.rs @@ -22,6 +22,7 @@ use std::{ sync::Arc, }; +use crate::types::BlockNumber; use ethereum_types::{Address, H256}; use hash_db::HashDB; use journaldb::JournalDB; @@ -30,9 +31,8 @@ use kvdb::{DBTransaction, DBValue}; use lru_cache::LruCache; use memory_cache::MemoryLruCache; use parking_lot::Mutex; -use types::BlockNumber; -use state::{self, Account}; +use crate::state::{self, Account}; const STATE_CACHE_BLOCKS: usize = 12; @@ -168,10 +168,7 @@ impl StateDB { pub fn sync_cache(&mut self, enacted: &[H256], retracted: &[H256], is_best: bool) { trace!( "sync_cache id = (#{:?}, {:?}), parent={:?}, best={}", - self.commit_number, - self.commit_hash, - self.parent_hash, - is_best + self.commit_number, self.commit_hash, self.parent_hash, is_best ); let mut cache = self.account_cache.lock(); let cache = &mut *cache; @@ -445,10 +442,12 @@ unsafe impl Sync for SyncAccount {} #[cfg(test)] mod tests { + use crate::{ + state::{Account, Backend}, + test_helpers::get_temp_state_db, + }; use ethereum_types::{Address, H256, U256}; use kvdb::DBTransaction; - use state::{Account, Backend}; - use test_helpers::get_temp_state_db; #[test] fn state_db_smoke() { diff --git a/crates/ethcore/src/test_helpers.rs b/crates/ethcore/src/test_helpers.rs index ff75b09797..982acc3597 100644 --- a/crates/ethcore/src/test_helpers.rs +++ b/crates/ethcore/src/test_helpers.rs @@ -18,8 +18,18 @@ use std::{fs, io, path::Path, sync::Arc}; -use blockchain::{ - BlockChain, BlockChainDB, BlockChainDBHandler, Config as BlockChainConfig, ExtrasInsert, +use crate::{ + blockchain::{ + BlockChain, BlockChainDB, BlockChainDBHandler, Config as BlockChainConfig, ExtrasInsert, + }, + io::IoChannel, + types::{ + BlockNumber, encoded, + header::Header, + transaction::{Action, SignedTransaction, Transaction, TypedTransaction}, + view, + views::BlockView, + }, }; use blooms_db; use bytes::Bytes; @@ -28,33 +38,28 @@ use db::KeyValueDB; use ethereum_types::{Address, H256, H512, U256}; use evm::Factory as EvmFactory; use hash::keccak; -use io::IoChannel; use kvdb_rocksdb::{self, Database, DatabaseConfig}; use parking_lot::RwLock; use rlp::{self, RlpStream}; use tempdir::TempDir; -use types::{ - encoded, - header::Header, - transaction::{Action, SignedTransaction, Transaction, TypedTransaction}, - view, - views::BlockView, - BlockNumber, -}; -use block::{Drain, OpenBlock}; -use client::{ - BlockInfo, ChainInfo, ChainMessageType, ChainNotify, Client, ClientConfig, ImportBlock, - PrepareOpenBlock, +use crate::{ + block::{Drain, OpenBlock}, + client::{ + BlockInfo, ChainInfo, ChainMessageType, ChainNotify, Client, ClientConfig, ImportBlock, + PrepareOpenBlock, + }, + engines::{EngineSigner, Seal}, + factory::Factories, + miner::Miner, + spec::Spec, + state::*, + state_db::StateDB, + verification::queue::kind::blocks::Unverified, }; -use engines::{EngineSigner, Seal}; use ethjson::crypto::publickey::{Public, Signature}; -use factory::Factories; -use miner::Miner; -use spec::Spec; -use state::*; -use state_db::StateDB; -use verification::queue::kind::blocks::Unverified; + +use crate::exit::ShutdownManager; /// Creates test block with corresponding header pub fn create_test_block(header: &Header) -> Bytes { @@ -164,6 +169,7 @@ where client_db, Arc::new(miner), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let test_engine = &*test_spec.engine; @@ -321,6 +327,7 @@ pub fn push_block_with_transactions(client: &Arc, transactions: &[Signed client.import_verified_blocks(); } +/// integrates blocks into the client pub fn push_block_with_transactions_and_author( client: &Arc, transactions: &[SignedTransaction], @@ -370,6 +377,7 @@ pub fn get_test_client_with_blocks(blocks: Vec) -> Arc { client_db, Arc::new(Miner::new_for_tests(&test_spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); @@ -520,7 +528,7 @@ pub fn generate_dummy_blockchain(block_number: u32) -> BlockChain { encoded::Block::new(create_unverifiable_block(block_order, bc.best_block_hash())), vec![], ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, + fork_choice: crate::engines::ForkChoice::New, is_finalized: false, }, ); @@ -552,7 +560,7 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> BlockChain { )), vec![], ExtrasInsert { - fork_choice: ::engines::ForkChoice::New, + fork_choice: crate::engines::ForkChoice::New, is_finalized: false, }, ); @@ -575,13 +583,13 @@ pub fn generate_dummy_empty_blockchain() -> BlockChain { } /// Returns temp state -pub fn get_temp_state() -> State<::state_db::StateDB> { +pub fn get_temp_state() -> State { let journal_db = get_temp_state_db(); State::new(journal_db, U256::from(0), Default::default()) } /// Returns temp state using coresponding factory -pub fn get_temp_state_with_factory(factory: EvmFactory) -> State<::state_db::StateDB> { +pub fn get_temp_state_with_factory(factory: EvmFactory) -> State { let journal_db = get_temp_state_db(); let mut factories = Factories::default(); factories.vm = factory.into(); @@ -682,16 +690,18 @@ pub struct TestNotify { impl ChainNotify for TestNotify { fn broadcast(&self, message: ChainMessageType) { let data = match message { - ChainMessageType::Consensus(data) => data, + ChainMessageType::Consensus(_message, data) => data, }; self.messages.write().push(data); } - fn send(&self, message: ChainMessageType, node_id: Option) { + fn send(&self, message: ChainMessageType, node_id: &H512) { let data = match message { - ChainMessageType::Consensus(data) => data, + ChainMessageType::Consensus(_message, data) => data, }; - self.targeted_messages.write().push((data, node_id)); + self.targeted_messages + .write() + .push((data, Some(node_id.clone()))); } } diff --git a/crates/ethcore/src/tests/blockchain.rs b/crates/ethcore/src/tests/blockchain.rs index 86a1475b15..98a605d7cb 100644 --- a/crates/ethcore/src/tests/blockchain.rs +++ b/crates/ethcore/src/tests/blockchain.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use blockchain::BlockProvider; +use crate::blockchain::BlockProvider; -use test_helpers::{ +use crate::test_helpers::{ generate_dummy_blockchain, generate_dummy_blockchain_with_extra, generate_dummy_empty_blockchain, }; diff --git a/crates/ethcore/src/tests/client.rs b/crates/ethcore/src/tests/client.rs index 5509a483bd..2e15c2cd27 100644 --- a/crates/ethcore/src/tests/client.rs +++ b/crates/ethcore/src/tests/client.rs @@ -15,41 +15,46 @@ // along with OpenEthereum. If not, see . use std::{ - str::{from_utf8, FromStr}, + str::{FromStr, from_utf8}, sync::Arc, }; -use client::{ - traits::{ - BlockChainClient, BlockChainReset, BlockInfo, ChainInfo, ImportBlock, ImportExportBlocks, +use crate::{ + client::{ + Client, ClientConfig, ImportSealedBlock, PrepareOpenBlock, + traits::{ + BlockChainClient, BlockChainReset, BlockInfo, ChainInfo, ImportBlock, + ImportExportBlocks, + }, }, - Client, ClientConfig, ImportSealedBlock, PrepareOpenBlock, + ethereum, + executive::{Executive, TransactOptions}, + io::IoChannel, + miner::{Miner, MinerService, PendingOrdering}, + spec::Spec, + state::{self, CleanupMode, State, StateInfo}, + test_helpers::{ + self, generate_dummy_client, generate_dummy_client_with_data, get_bad_state_dummy_block, + get_good_dummy_block, get_good_dummy_block_seq, get_test_client_with_blocks, + push_blocks_to_client, + }, + types::{ + data_format::DataFormat, + filter::Filter, + ids::BlockId, + transaction::{Action, Condition, PendingTransaction, Transaction, TypedTransaction}, + view, + views::BlockView, + }, + verification::queue::kind::blocks::Unverified, }; use crypto::publickey::KeyPair; -use ethereum; use ethereum_types::{Address, U256}; -use executive::{Executive, TransactOptions}; use hash::keccak; -use io::IoChannel; -use miner::{Miner, MinerService, PendingOrdering}; use rustc_hex::ToHex; -use spec::Spec; -use state::{self, CleanupMode, State, StateInfo}; use tempdir::TempDir; -use test_helpers::{ - self, generate_dummy_client, generate_dummy_client_with_data, get_bad_state_dummy_block, - get_good_dummy_block, get_good_dummy_block_seq, get_test_client_with_blocks, - push_blocks_to_client, -}; -use types::{ - data_format::DataFormat, - filter::Filter, - ids::BlockId, - transaction::{Action, Condition, PendingTransaction, Transaction, TypedTransaction}, - view, - views::BlockView, -}; -use verification::queue::kind::blocks::Unverified; + +use crate::exit::ShutdownManager; #[test] fn imports_from_empty() { @@ -62,6 +67,7 @@ fn imports_from_empty() { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); client.import_verified_blocks(); @@ -80,6 +86,7 @@ fn should_return_registrar() { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let params = client.additional_params(); @@ -100,6 +107,7 @@ fn imports_good_block() { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let good_block = get_good_dummy_block(); @@ -127,6 +135,7 @@ fn query_none_block() { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let non_existant = client.block_header(BlockId::Number(188)); @@ -329,6 +338,7 @@ fn change_history_size() { db.clone(), Arc::new(Miner::new_for_tests(&test_spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); @@ -361,6 +371,7 @@ fn change_history_size() { db, Arc::new(Miner::new_for_tests(&test_spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); assert_eq!(client.state().balance(&address).unwrap(), 100.into()); @@ -426,7 +437,7 @@ fn does_not_propagate_delayed_transactions() { #[test] fn transaction_proof() { - use client::ProvingBlockChainClient; + use crate::client::ProvingBlockChainClient; let client = generate_dummy_client(0); let address = Address::random(); @@ -468,8 +479,8 @@ fn transaction_proof() { .1; let backend = state::backend::ProofCheck::new(&proof); - let mut factories = ::factory::Factories::default(); - factories.accountdb = ::account_db::Factory::Plain; // raw state values, no mangled keys. + let mut factories = crate::factory::Factories::default(); + factories.accountdb = crate::account_db::Factory::Plain; // raw state values, no mangled keys. let root = *client.best_block_header().state_root(); let machine = test_spec.engine.machine(); diff --git a/crates/ethcore/src/tests/evm.rs b/crates/ethcore/src/tests/evm.rs index b8e1f5c214..5498888c8d 100644 --- a/crates/ethcore/src/tests/evm.rs +++ b/crates/ethcore/src/tests/evm.rs @@ -16,14 +16,16 @@ //! Tests of EVM integration with transaction execution. -use evm::{Factory, VMType}; -use executive::Executive; +use crate::{ + executive::Executive, + state::Substate, + test_helpers::get_temp_state_with_factory, + trace::{NoopTracer, NoopVMTracer}, + types::transaction::SYSTEM_ADDRESS, +}; +use evm::{Factory, VMType, evm_test}; use hash::keccak; -use state::Substate; use std::sync::Arc; -use test_helpers::get_temp_state_with_factory; -use trace::{NoopTracer, NoopVMTracer}; -use types::transaction::SYSTEM_ADDRESS; use vm::{AccessList, ActionParams, ActionValue, CallType, EnvInfo, ParamsType}; use rustc_hex::FromHex; @@ -38,7 +40,7 @@ fn test_blockhash_eip210(factory: Factory) { let test_blockhash_contract = "73fffffffffffffffffffffffffffffffffffffffe33141561007a57600143036020526000356101006020510755600061010060205107141561005057600035610100610100602051050761010001555b6000620100006020510714156100755760003561010062010000602051050761020001555b61014a565b4360003512151561009057600060405260206040f35b610100600035430312156100b357610100600035075460605260206060f3610149565b62010000600035430312156100d157600061010060003507146100d4565b60005b156100f6576101006101006000350507610100015460805260206080f3610148565b630100000060003543031215610116576000620100006000350714610119565b60005b1561013c57610100620100006000350507610200015460a052602060a0f3610147565b600060c052602060c0f35b5b5b5b5b"; let blockhash_contract_code = Arc::new(test_blockhash_contract.from_hex().unwrap()); let blockhash_contract_code_hash = keccak(blockhash_contract_code.as_ref()); - let machine = ::ethereum::new_eip210_test_machine(); + let machine = crate::ethereum::new_eip210_test_machine(); let mut env_info = EnvInfo::default(); // populate state with 256 last hashes diff --git a/crates/ethcore/src/tests/trace.rs b/crates/ethcore/src/tests/trace.rs index af1664f57b..d185262527 100644 --- a/crates/ethcore/src/tests/trace.rs +++ b/crates/ethcore/src/tests/trace.rs @@ -16,24 +16,28 @@ //! Client tests of tracing -use block::*; -use client::{BlockChainClient, Client, ClientConfig, *}; +use crate::{ + block::*, + client::{BlockChainClient, Client, ClientConfig, *}, + io::*, + miner::Miner, + spec::*, + test_helpers::{self, get_temp_state_db}, + trace::{LocalizedTrace, RewardType, trace::Action::Reward}, + types::{ + header::Header, + transaction::{Action, Transaction, TypedTransaction}, + view, + views::BlockView, + }, + verification::queue::kind::blocks::Unverified, +}; use crypto::publickey::KeyPair; use ethereum_types::{Address, U256}; use hash::keccak; -use io::*; -use miner::Miner; -use spec::*; use std::{str::FromStr, sync::Arc}; -use test_helpers::{self, get_temp_state_db}; -use trace::{trace::Action::Reward, LocalizedTrace, RewardType}; -use types::{ - header::Header, - transaction::{Action, Transaction, TypedTransaction}, - view, - views::BlockView, -}; -use verification::queue::kind::blocks::Unverified; + +use crate::exit::ShutdownManager; #[test] fn can_trace_block_and_uncle_reward() { @@ -50,6 +54,7 @@ fn can_trace_block_and_uncle_reward() { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); diff --git a/crates/ethcore/src/trace/db.rs b/crates/ethcore/src/trace/db.rs index 33ca541f2a..af14692c33 100644 --- a/crates/ethcore/src/trace/db.rs +++ b/crates/ethcore/src/trace/db.rs @@ -17,17 +17,16 @@ //! Trace database. use std::{collections::HashMap, sync::Arc}; -use blockchain::BlockChainDB; -use db::{self, cache_manager::CacheManager, CacheUpdatePolicy, Key, Readable, Writable}; +use crate::{blockchain::BlockChainDB, types::BlockNumber}; +use db::{self, CacheUpdatePolicy, Key, Readable, Writable, cache_manager::CacheManager}; use ethereum_types::{H256, H264}; use kvdb::DBTransaction; use parity_util_mem::MallocSizeOfExt; use parking_lot::RwLock; -use types::BlockNumber; -use trace::{ - flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, +use crate::trace::{ Config, Database as TraceDatabase, DatabaseExtras, Filter, ImportRequest, LocalizedTrace, + flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, }; const TRACE_DB_VER: &'static [u8] = b"1.0"; @@ -406,18 +405,20 @@ where #[cfg(test)] mod tests { + use crate::{ + test_helpers::new_db, + trace::{ + AddressesFilter, Config, Database as TraceDatabase, DatabaseExtras, Filter, + ImportRequest, LocalizedTrace, TraceDB, TraceError, + flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, + trace::{Action, Call, Res}, + }, + types::BlockNumber, + }; use ethereum_types::{Address, H256, U256}; use evm::CallType; use kvdb::DBTransaction; use std::{collections::HashMap, sync::Arc}; - use test_helpers::new_db; - use trace::{ - flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, - trace::{Action, Call, Res}, - AddressesFilter, Config, Database as TraceDatabase, DatabaseExtras, Filter, ImportRequest, - LocalizedTrace, TraceDB, TraceError, - }; - use types::BlockNumber; struct NoopExtras; diff --git a/crates/ethcore/src/trace/executive_tracer.rs b/crates/ethcore/src/trace/executive_tracer.rs index 3cb7f2cd4f..b8f791016c 100644 --- a/crates/ethcore/src/trace/executive_tracer.rs +++ b/crates/ethcore/src/trace/executive_tracer.rs @@ -16,16 +16,16 @@ //! Simple executive tracer. -use ethereum_types::{Address, U256}; -use log::{debug, warn}; -use std::cmp::min; -use trace::{ +use crate::trace::{ + FlatTrace, Tracer, VMTracer, trace::{ Action, Call, CallResult, Create, CreateResult, MemoryDiff, Res, Reward, RewardType, StorageDiff, Suicide, VMExecutedOperation, VMOperation, VMTrace, }, - FlatTrace, Tracer, VMTracer, }; +use ethereum_types::{Address, U256}; +use log::{debug, warn}; +use std::cmp::min; use vm::{ActionParams, Error as VmError}; /// Simple executive tracer. Traces all calls and creates. Ignores delegatecalls. @@ -42,7 +42,10 @@ impl Tracer for ExecutiveTracer { type Output = FlatTrace; fn prepare_trace_call(&mut self, params: &ActionParams, depth: usize, is_builtin: bool) { - assert!(!self.skip_one, "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_call it cannot be true; qed"); + assert!( + !self.skip_one, + "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_call it cannot be true; qed" + ); if depth != 0 && is_builtin && params.value.value() == U256::zero() { self.skip_one = true; @@ -69,7 +72,10 @@ impl Tracer for ExecutiveTracer { } fn prepare_trace_create(&mut self, params: &ActionParams) { - assert!(!self.skip_one, "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_create it cannot be true; qed"); + assert!( + !self.skip_one, + "skip_one is used only for builtin contracts that do not have subsequent calls; in prepare_trace_create it cannot be true; qed" + ); if let Some(parentlen) = self.sublen_stack.last_mut() { *parentlen += 1; @@ -113,7 +119,10 @@ impl Tracer for ExecutiveTracer { } fn done_trace_create(&mut self, gas_used: U256, code: &[u8], address: Address) { - assert!(!self.skip_one, "skip_one is only set with prepare_trace_call for builtin contracts with no subsequent calls; skip_one cannot be true after the same level prepare_trace_create; qed"); + assert!( + !self.skip_one, + "skip_one is only set with prepare_trace_call for builtin contracts with no subsequent calls; skip_one cannot be true after the same level prepare_trace_create; qed" + ); let vecindex = self.vecindex_stack.pop().expect("Executive invoked prepare_trace_create before this function; vecindex_stack is never empty; qed"); let sublen = self.sublen_stack.pop().expect("Executive invoked prepare_trace_create before this function; sublen_stack is never empty; qed"); diff --git a/crates/ethcore/src/trace/import.rs b/crates/ethcore/src/trace/import.rs index aba9b2b091..1c782f1aea 100644 --- a/crates/ethcore/src/trace/import.rs +++ b/crates/ethcore/src/trace/import.rs @@ -15,10 +15,10 @@ // along with OpenEthereum. If not, see . //! Traces import request. +use crate::types::BlockNumber; use ethereum_types::H256; -use types::BlockNumber; -use trace::FlatBlockTraces; +use crate::trace::FlatBlockTraces; /// Traces import request. pub struct ImportRequest { diff --git a/crates/ethcore/src/trace/mod.rs b/crates/ethcore/src/trace/mod.rs index 036a5245f1..4ca7c2f548 100644 --- a/crates/ethcore/src/trace/mod.rs +++ b/crates/ethcore/src/trace/mod.rs @@ -33,6 +33,7 @@ pub use self::{ }; pub use self::types::{ + Tracing, error::Error as TraceError, filter, filter::{AddressesFilter, Filter}, @@ -40,12 +41,11 @@ pub use self::types::{ flat::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}, localized, trace, trace::{MemoryDiff, RewardType, StorageDiff, VMExecutedOperation, VMOperation, VMTrace}, - Tracing, }; +use crate::types::BlockNumber; use ethereum_types::{Address, H256, U256}; use kvdb::DBTransaction; -use types::BlockNumber; use vm::{ActionParams, Error as VmError}; /// This trait is used by executive to build traces. diff --git a/crates/ethcore/src/trace/noop_tracer.rs b/crates/ethcore/src/trace/noop_tracer.rs index 3944ecef82..c967f61d7a 100644 --- a/crates/ethcore/src/trace/noop_tracer.rs +++ b/crates/ethcore/src/trace/noop_tracer.rs @@ -16,11 +16,11 @@ //! Nonoperative tracer. -use ethereum_types::{Address, U256}; -use trace::{ - trace::{RewardType, VMTrace}, +use crate::trace::{ FlatTrace, Tracer, VMTracer, + trace::{RewardType, VMTrace}, }; +use ethereum_types::{Address, U256}; use vm::{ActionParams, Error as VmError}; /// Nonoperative tracer. Does not trace anything. diff --git a/crates/ethcore/src/trace/types/filter.rs b/crates/ethcore/src/trace/types/filter.rs index fe1bbf2914..d8fe439287 100644 --- a/crates/ethcore/src/trace/types/filter.rs +++ b/crates/ethcore/src/trace/types/filter.rs @@ -17,9 +17,9 @@ //! Trace filters type definitions use super::trace::{Action, Res}; +use crate::trace::flat::FlatTrace; use ethereum_types::{Address, Bloom, BloomInput}; use std::ops::Range; -use trace::flat::FlatTrace; /// Addresses filter. /// @@ -132,13 +132,13 @@ impl Filter { #[cfg(test)] mod tests { - use ethereum_types::{Address, Bloom, BloomInput}; - use evm::CallType; - use trace::{ + use crate::trace::{ + AddressesFilter, Filter, RewardType, TraceError, flat::FlatTrace, trace::{Action, Call, Create, CreateResult, Res, Reward, Suicide}, - AddressesFilter, Filter, RewardType, TraceError, }; + use ethereum_types::{Address, Bloom, BloomInput}; + use evm::CallType; #[test] fn empty_trace_filter_bloom_possibilities() { diff --git a/crates/ethcore/src/trace/types/flat.rs b/crates/ethcore/src/trace/types/flat.rs index a165e2e91b..0a6094052c 100644 --- a/crates/ethcore/src/trace/types/flat.rs +++ b/crates/ethcore/src/trace/types/flat.rs @@ -126,12 +126,12 @@ impl Into> for FlatBlockTraces { #[cfg(test)] mod tests { use super::{FlatBlockTraces, FlatTrace, FlatTransactionTraces}; - use evm::CallType; - use rlp::*; - use trace::{ - trace::{Action, Call, CallResult, Res, Reward, Suicide}, + use crate::trace::{ RewardType, + trace::{Action, Call, CallResult, Res, Reward, Suicide}, }; + use evm::CallType; + use rlp::*; #[test] fn encode_flat_transaction_traces() { diff --git a/crates/ethcore/src/trace/types/localized.rs b/crates/ethcore/src/trace/types/localized.rs index 8392a33d7c..405e55819e 100644 --- a/crates/ethcore/src/trace/types/localized.rs +++ b/crates/ethcore/src/trace/types/localized.rs @@ -17,8 +17,8 @@ //! Localized traces type definitions use super::trace::{Action, Res}; +use crate::types::BlockNumber; use ethereum_types::H256; -use types::BlockNumber; /// Localized trace. #[derive(Debug, PartialEq, Clone)] diff --git a/crates/ethcore/src/transaction_ext.rs b/crates/ethcore/src/transaction_ext.rs index d86f095823..a9ac179186 100644 --- a/crates/ethcore/src/transaction_ext.rs +++ b/crates/ethcore/src/transaction_ext.rs @@ -16,8 +16,8 @@ //! Ethereum transaction +use crate::types::transaction::{self, Action}; use evm::Schedule; -use types::transaction::{self, Action}; /// Extends transaction with gas verification method. pub trait Transaction { diff --git a/crates/ethcore/src/tx_filter.rs b/crates/ethcore/src/tx_filter.rs index 963dcb06a0..69a116b273 100644 --- a/crates/ethcore/src/tx_filter.rs +++ b/crates/ethcore/src/tx_filter.rs @@ -16,19 +16,24 @@ //! Smart contract based transaction filter. +use std::num::NonZeroUsize; + use ethabi::FunctionOutputDecoder; use ethereum_types::{Address, H256, U256}; +use fastmap::{H256FastLruMap, new_h256_fast_lru_map}; use lru_cache::LruCache; +use crate::{ + client::{BlockId, BlockInfo}, + spec::CommonParams, + types::{ + BlockNumber, + transaction::{Action, SignedTransaction}, + }, +}; use call_contract::CallContract; -use client::{BlockId, BlockInfo}; use hash::KECCAK_EMPTY; use parking_lot::Mutex; -use spec::CommonParams; -use types::{ - transaction::{Action, SignedTransaction}, - BlockNumber, -}; use_contract!( transact_acl_deprecated, @@ -41,7 +46,7 @@ use_contract!( ); use_contract!(transact_acl_1559, "res/contracts/tx_acl_1559.json"); -const MAX_CACHE_SIZE: usize = 4096; +const MAX_CACHE_SIZE: usize = 40960; mod tx_permissions { pub const _ALL: u32 = 0xffffffff; @@ -52,11 +57,54 @@ mod tx_permissions { pub const _PRIVATE: u32 = 0b00001000; } +pub struct PermissionCache { + // permission cache is only valid for one block. + valid_block: H256, + cache: H256FastLruMap, +} + +impl PermissionCache { + pub fn new(size: usize) -> Self { + PermissionCache { + cache: new_h256_fast_lru_map(NonZeroUsize::new(size).unwrap()), + valid_block: H256::zero(), + } + } + + pub fn refresh(&mut self, current_block: &H256) { + if self.valid_block != *current_block { + self.cache.clear(); + self.valid_block = current_block.clone(); + } + } + + pub fn insert(&mut self, tx_hash: H256, value: u32) { + self.cache.push(tx_hash, value); + } + + pub fn get(&mut self, tx_hash: &H256) -> Option { + self.cache.get_mut(&tx_hash).cloned() + } + + /// returns the block number for which the cache is valid. + pub fn get_valid_block(&self) -> &H256 { + return &self.valid_block; + } + + // // we need a cheap method to get the key for the cache. + // fn calc_key(address: &Address, tx_hash: &H256) -> H256 { + + // // since both, address and tx_hash are already cryptographical products, + // // we can calculate the X-Or out of them to get a H256 cryptographicaly unique identifier. + // return H256::from_slice(address.as_bytes()) ^ *tx_hash; + // } +} + /// Connection filter that uses a contract to manage permissions. pub struct TransactionFilter { contract_address: Address, transition_block: BlockNumber, - permission_cache: Mutex>, + permission_cache: Mutex, contract_version_cache: Mutex>>, } @@ -68,7 +116,7 @@ impl TransactionFilter { .map(|address| TransactionFilter { contract_address: address, transition_block: params.transaction_permission_contract_transition, - permission_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), + permission_cache: Mutex::new(PermissionCache::new(MAX_CACHE_SIZE)), contract_version_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), }) } @@ -85,8 +133,7 @@ impl TransactionFilter { return true; } - let mut permission_cache = self.permission_cache.lock(); - let mut contract_version_cache = self.contract_version_cache.lock(); + debug!(target: "tx_filter", "Checking transaction permission for tx: {}", transaction.hash); let (tx_type, to) = match transaction.tx().action { Action::Create => (tx_permissions::CREATE, Address::default()), @@ -107,30 +154,40 @@ impl TransactionFilter { let gas_price = transaction.tx().gas_price; let max_priority_fee_per_gas = transaction.max_priority_fee_per_gas(); let gas_limit = transaction.tx().gas; - let key = (*parent_hash, sender); - if let Some(permissions) = permission_cache.get_mut(&key) { - return *permissions & tx_type != 0; - } + // this scope is for holding the lock for the permission caches only for the time we need it. + { + let mut permission_cache = self.permission_cache.lock(); + permission_cache.refresh(parent_hash); + if let Some(permissions) = permission_cache.get(&transaction.hash) { + return permissions & tx_type != 0; + } + } let contract_address = self.contract_address; - let contract_version = contract_version_cache - .get_mut(parent_hash) - .and_then(|v| *v) - .or_else(|| { - let (data, decoder) = transact_acl::functions::contract_version::call(); - decoder - .decode( - &client - .call_contract(BlockId::Hash(*parent_hash), contract_address, data) - .ok()?, - ) - .ok() - }); - contract_version_cache.insert(*parent_hash, contract_version); + + let contract_version = { + let mut contract_version_cache = self.contract_version_cache.lock(); + + let v = contract_version_cache + .get_mut(parent_hash) + .and_then(|v| *v) + .or_else(|| { + let (data, decoder) = transact_acl::functions::contract_version::call(); + decoder + .decode( + &client + .call_contract(BlockId::Hash(*parent_hash), contract_address, data) + .ok()?, + ) + .ok() + }); + contract_version_cache.insert(*parent_hash, v); + v + }; // Check permissions in smart contract based on its version - let (permissions, filter_only_sender) = match contract_version { + let (permissions, _filter_only_sender) = match contract_version { Some(version) => { let version_u64 = version.low_u64(); trace!(target: "tx_filter", "Version of tx permission contract: {}", version); @@ -203,29 +260,42 @@ impl TransactionFilter { } }; - if filter_only_sender { - permission_cache.insert((*parent_hash, sender), permissions); + { + let mut permission_cache = self.permission_cache.lock(); + + // it could be that cache got refreshed in the meantime by another thread. + if parent_hash == permission_cache.get_valid_block() { + // we can cache every transaciton. + permission_cache.insert(transaction.hash.clone(), permissions); + } else { + trace!(target: "tx_filter", "did not add tx [{}] to permission cache, because crate::block changed in the meantime.", transaction.hash); + } } + trace!(target: "tx_filter", "Given transaction data: sender: {:?} to: {:?} value: {}, gas_price: {}. Permissions required: {:X}, got: {:X}", sender, to, value, gas_price, tx_type, permissions); - permissions & tx_type != 0 + return permissions & tx_type != 0; } } #[cfg(test)] mod test { + use crate::exit::ShutdownManager; + use super::TransactionFilter; - use client::{BlockChainClient, BlockId, Client, ClientConfig}; + use crate::{ + client::{BlockChainClient, BlockId, Client, ClientConfig}, + io::IoChannel, + miner::Miner, + spec::Spec, + test_helpers, + types::transaction::{ + AccessListTx, Action, EIP1559TransactionTx, Transaction, TypedTransaction, + }, + }; use crypto::publickey::{KeyPair, Secret}; use ethereum_types::{Address, U256}; - use io::IoChannel; - use miner::Miner; - use spec::Spec; use std::{str::FromStr, sync::Arc}; use tempdir::TempDir; - use test_helpers; - use types::transaction::{ - AccessListTx, Action, EIP1559TransactionTx, Transaction, TypedTransaction, - }; /// Contract code: https://gist.github.com/VladLupashevskyi/84f18eabb1e4afadf572cf92af3e7e7f #[test] @@ -242,6 +312,7 @@ mod test { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let key1 = KeyPair::from_secret( @@ -418,14 +489,16 @@ mod test { &*client )); - assert!(!filter.transaction_allowed( - &genesis, - block_number, - &basic_tx_with_ether_and_to_key7 - .clone() - .sign(key5.secret(), None), - &*client - )); + assert!( + !filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_with_ether_and_to_key7 + .clone() + .sign(key5.secret(), None), + &*client + ) + ); assert!(!filter.transaction_allowed( &genesis, block_number, @@ -438,28 +511,32 @@ mod test { &basic_tx.clone().sign(key6.secret(), None), &*client )); - assert!(filter.transaction_allowed( - &genesis, - block_number, - &basic_tx_with_ether_and_to_key7 - .clone() - .sign(key6.secret(), None), - &*client - )); + assert!( + filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_with_ether_and_to_key7 + .clone() + .sign(key6.secret(), None), + &*client + ) + ); assert!(filter.transaction_allowed( &genesis, block_number, &basic_tx_to_key6.clone().sign(key7.secret(), None), &*client )); - assert!(!filter.transaction_allowed( - &genesis, - block_number, - &basic_tx_with_ether_and_to_key6 - .clone() - .sign(key7.secret(), None), - &*client - )); + assert!( + !filter.transaction_allowed( + &genesis, + block_number, + &basic_tx_with_ether_and_to_key6 + .clone() + .sign(key7.secret(), None), + &*client + ) + ); } /// Contract code: res/chainspec/test/contract_ver_3.sol @@ -477,6 +554,7 @@ mod test { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let key1 = KeyPair::from_secret( @@ -545,6 +623,7 @@ mod test { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let key1 = KeyPair::from_secret( @@ -613,6 +692,7 @@ mod test { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let key1 = KeyPair::from_secret( @@ -684,6 +764,7 @@ mod test { db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let key1 = KeyPair::from_secret( diff --git a/crates/ethcore/src/verification/canon_verifier.rs b/crates/ethcore/src/verification/canon_verifier.rs index a5089356ee..7ac220d3f2 100644 --- a/crates/ethcore/src/verification/canon_verifier.rs +++ b/crates/ethcore/src/verification/canon_verifier.rs @@ -16,12 +16,9 @@ //! Canonical verifier. -use super::{verification, Verifier}; +use super::{Verifier, verification}; +use crate::{client::BlockInfo, engines::EthEngine, error::Error, types::header::Header}; use call_contract::CallContract; -use client::BlockInfo; -use engines::EthEngine; -use error::Error; -use types::header::Header; /// A canonial verifier -- this does full verification. pub struct CanonVerifier; diff --git a/crates/ethcore/src/verification/mod.rs b/crates/ethcore/src/verification/mod.rs index 3ee8f8503a..9272067ed3 100644 --- a/crates/ethcore/src/verification/mod.rs +++ b/crates/ethcore/src/verification/mod.rs @@ -30,8 +30,8 @@ pub use self::{ verifier::Verifier, }; +use crate::client::BlockInfo; use call_contract::CallContract; -use client::BlockInfo; /// Verifier type. #[derive(Debug, PartialEq, Clone)] diff --git a/crates/ethcore/src/verification/noop_verifier.rs b/crates/ethcore/src/verification/noop_verifier.rs index b4e1562cad..631fb6125a 100644 --- a/crates/ethcore/src/verification/noop_verifier.rs +++ b/crates/ethcore/src/verification/noop_verifier.rs @@ -16,12 +16,9 @@ //! No-op verifier. -use super::{verification, Verifier}; +use super::{Verifier, verification}; +use crate::{client::BlockInfo, engines::EthEngine, error::Error, types::header::Header}; use call_contract::CallContract; -use client::BlockInfo; -use engines::EthEngine; -use error::Error; -use types::header::Header; /// A no-op verifier -- this will verify everything it's given immediately. #[allow(dead_code)] diff --git a/crates/ethcore/src/verification/queue/kind.rs b/crates/ethcore/src/verification/queue/kind.rs index a5b5754526..2edac39b13 100644 --- a/crates/ethcore/src/verification/queue/kind.rs +++ b/crates/ethcore/src/verification/queue/kind.rs @@ -16,8 +16,7 @@ //! Definition of valid items for the verification queue. -use engines::EthEngine; -use error::Error; +use crate::{engines::EthEngine, error::Error}; use ethereum_types::{H256, U256}; use parity_util_mem::MallocSizeOf; @@ -78,14 +77,16 @@ pub trait Kind: 'static + Sized + Send + Sync { pub mod blocks { use super::{BlockLike, Kind}; - use engines::EthEngine; - use error::{BlockError, Error, ErrorKind}; - use types::{ - header::Header, - transaction::{TypedTransaction, UnverifiedTransaction}, - BlockNumber, + use crate::{ + engines::EthEngine, + error::{BlockError, Error, ErrorKind}, + types::{ + BlockNumber, + header::Header, + transaction::{TypedTransaction, UnverifiedTransaction}, + }, + verification::{PreverifiedBlock, verify_block_basic, verify_block_unordered}, }; - use verification::{verify_block_basic, verify_block_unordered, PreverifiedBlock}; use bytes::Bytes; use ethereum_types::{H256, U256}; @@ -213,10 +214,9 @@ pub mod blocks { pub mod headers { use super::{BlockLike, Kind}; - use engines::EthEngine; - use error::Error; - use types::header::Header; - use verification::verify_header_params; + use crate::{ + engines::EthEngine, error::Error, types::header::Header, verification::verify_header_params, + }; use ethereum_types::{H256, U256}; diff --git a/crates/ethcore/src/verification/queue/mod.rs b/crates/ethcore/src/verification/queue/mod.rs index 61f128fcf2..a3271aa4fa 100644 --- a/crates/ethcore/src/verification/queue/mod.rs +++ b/crates/ethcore/src/verification/queue/mod.rs @@ -17,12 +17,14 @@ //! A queue of blocks. Sits between network or other I/O and the `BlockChain`. //! Sorts them ready for blockchain insertion. -use blockchain::BlockChain; -use client::ClientIoMessage; -use engines::EthEngine; -use error::{BlockError, Error, ErrorKind, ImportErrorKind}; +use crate::{ + blockchain::BlockChain, + client::ClientIoMessage, + engines::EthEngine, + error::{BlockError, Error, ErrorKind, ImportErrorKind}, + io::*, +}; use ethereum_types::{H256, U256}; -use io::*; use len_caching_lock::LenCachingMutex; use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; use parking_lot::{Condvar, Mutex, RwLock}; @@ -31,15 +33,15 @@ use std::{ collections::{HashMap, HashSet, VecDeque}, iter::FromIterator, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}, }, thread::{self, JoinHandle}, }; use self::kind::{BlockLike, Kind}; -pub use types::verification_queue_info::VerificationQueueInfo as QueueInfo; +pub use crate::types::verification_queue_info::VerificationQueueInfo as QueueInfo; pub mod kind; @@ -126,7 +128,7 @@ pub enum Status { impl Into<::types::block_status::BlockStatus> for Status { fn into(self) -> ::types::block_status::BlockStatus { - use types::block_status::BlockStatus; + use crate::types::block_status::BlockStatus; match self { Status::Queued => BlockStatus::Queued, Status::Bad => BlockStatus::Bad, @@ -870,13 +872,15 @@ impl Drop for VerificationQueue { #[cfg(test)] mod tests { - use super::{kind::blocks::Unverified, BlockQueue, Config, State}; + use super::{BlockQueue, Config, State, kind::blocks::Unverified}; + use crate::{ + error::*, + io::*, + spec::Spec, + test_helpers::{get_good_dummy_block, get_good_dummy_block_seq}, + types::{BlockNumber, view, views::BlockView}, + }; use bytes::Bytes; - use error::*; - use io::*; - use spec::Spec; - use test_helpers::{get_good_dummy_block, get_good_dummy_block_seq}; - use types::{view, views::BlockView, BlockNumber}; // create a test block queue. // auto_scaling enables verifier adjustment. diff --git a/crates/ethcore/src/verification/verification.rs b/crates/ethcore/src/verification/verification.rs index b1e7244bbd..f1fa299577 100644 --- a/crates/ethcore/src/verification/verification.rs +++ b/crates/ethcore/src/verification/verification.rs @@ -33,13 +33,15 @@ use rlp::Rlp; use triehash::ordered_trie_root; use unexpected::{Mismatch, OutOfBounds}; -use blockchain::*; +use crate::{ + blockchain::*, + client::BlockInfo, + engines::{EthEngine, MAX_UNCLE_AGE}, + error::{BlockError, Error}, + types::{BlockNumber, header::Header, transaction::SignedTransaction}, + verification::queue::kind::blocks::Unverified, +}; use call_contract::CallContract; -use client::BlockInfo; -use engines::{EthEngine, MAX_UNCLE_AGE}; -use error::{BlockError, Error}; -use types::{header::Header, transaction::SignedTransaction, BlockNumber}; -use verification::queue::kind::blocks::Unverified; use time_utils::CheckedSystemTime; @@ -540,25 +542,27 @@ fn verify_block_integrity(block: &Unverified) -> Result<(), Error> { mod tests { use super::*; - use blockchain::{BlockDetails, BlockReceipts, TransactionAddress}; + use crate::{ + blockchain::{BlockDetails, BlockReceipts, TransactionAddress}, + engines::EthEngine, + error::{BlockError::*, ErrorKind}, + spec::{CommonParams, Spec}, + test_helpers::{create_test_block, create_test_block_with_data}, + types::{ + encoded, + log_entry::{LocalizedLogEntry, LogEntry}, + transaction::{Action, SignedTransaction, Transaction, TypedTransaction}, + }, + }; use crypto::publickey::{Generator, Random}; - use engines::EthEngine; - use error::{BlockError::*, ErrorKind}; use ethereum_types::{Address, BloomRef, H256, U256}; use hash::keccak; use rlp; - use spec::{CommonParams, Spec}; use std::{ collections::{BTreeMap, HashMap}, time::{SystemTime, UNIX_EPOCH}, }; - use test_helpers::{create_test_block, create_test_block_with_data}; use triehash::ordered_trie_root; - use types::{ - encoded, - log_entry::{LocalizedLogEntry, LogEntry}, - transaction::{Action, SignedTransaction, Transaction, TypedTransaction}, - }; fn check_ok(result: Result<(), Error>) { result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e)); @@ -731,7 +735,7 @@ mod tests { // additions that need access to state (tx filter in specific) // no existing tests need access to test, so having this not function // is fine. - let client = ::client::TestBlockChainClient::default(); + let client = crate::client::TestBlockChainClient::default(); let parent = bc .block_header_data(header.parent_hash()) .ok_or(BlockError::UnknownParent(*header.parent_hash()))? @@ -892,10 +896,20 @@ mod tests { let mut bad_header = good.clone(); bad_header.set_transactions_root(eip86_transactions_root.clone()); bad_header.set_uncles_hash(good_uncles_hash.clone()); - match basic_test(&create_test_block_with_data(&bad_header, &eip86_transactions, &good_uncles), engine) { - Err(Error(ErrorKind::Transaction(ref e), _)) if e == &crypto::publickey::Error::InvalidSignature.into() => (), - e => panic!("Block verification failed.\nExpected: Transaction Error (Invalid Signature)\nGot: {:?}", e), - } + match basic_test( + &create_test_block_with_data(&bad_header, &eip86_transactions, &good_uncles), + engine, + ) { + Err(Error(ErrorKind::Transaction(ref e), _)) + if e == &crypto::publickey::Error::InvalidSignature.into() => + { + () + } + e => panic!( + "Block verification failed.\nExpected: Transaction Error (Invalid Signature)\nGot: {:?}", + e + ), + } let mut header = good.clone(); header.set_transactions_root(good_transactions_root.clone()); @@ -1124,10 +1138,12 @@ mod tests { #[test] fn dust_protection() { + use crate::{ + engines::NullEngine, + machine::EthereumMachine, + types::transaction::{Action, Transaction}, + }; use crypto::publickey::{Generator, Random}; - use engines::NullEngine; - use machine::EthereumMachine; - use types::transaction::{Action, Transaction}; let mut params = CommonParams::default(); params.dust_protection_transition = 0; diff --git a/crates/ethcore/src/verification/verifier.rs b/crates/ethcore/src/verification/verifier.rs index c74af43e7c..7924598cc7 100644 --- a/crates/ethcore/src/verification/verifier.rs +++ b/crates/ethcore/src/verification/verifier.rs @@ -17,11 +17,8 @@ //! A generic verifier trait. use super::verification; +use crate::{client::BlockInfo, engines::EthEngine, error::Error, types::header::Header}; use call_contract::CallContract; -use client::BlockInfo; -use engines::EthEngine; -use error::Error; -use types::header::Header; /// Should be used to verify blocks. pub trait Verifier: Send + Sync diff --git a/crates/ethcore/sync/Cargo.toml b/crates/ethcore/sync/Cargo.toml index cf181c95bf..a9b5585382 100644 --- a/crates/ethcore/sync/Cargo.toml +++ b/crates/ethcore/sync/Cargo.toml @@ -4,6 +4,7 @@ name = "ethcore-sync" version = "1.12.0" license = "GPL-3.0" authors = ["Parity Technologies "] +edition = "2024" [lib] @@ -31,12 +32,13 @@ macros = { path = "../../util/macros" } parity-bytes = "0.1" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" rand = "0.7.3" rand_xorshift = "0.2.0" rlp = { version = "0.4.6" } trace-time = "0.1" triehash-ethereum = {version = "0.2", path = "../../util/triehash-ethereum" } +time-utils = { path = "../../util/time-utils" } stats = { path = "../../util/stats" } crossbeam-channel = "0.5.2" @@ -46,3 +48,6 @@ ethcore = { path = "..", features = ["test-helpers"] } ethcore-io = { path = "../../runtime/io", features = ["mio"] } kvdb-memorydb = "0.1" rustc-hex = "1.0" + +[features] +devP2PTests = [] \ No newline at end of file diff --git a/crates/ethcore/sync/src/api.rs b/crates/ethcore/sync/src/api.rs index 1720750d75..5c8d61647b 100644 --- a/crates/ethcore/sync/src/api.rs +++ b/crates/ethcore/sync/src/api.rs @@ -18,42 +18,46 @@ use bytes::Bytes; use crypto::publickey::Secret; use devp2p::NetworkService; use network::{ - client_version::ClientVersion, ConnectionFilter, Error, ErrorKind, - NetworkConfiguration as BasicNetworkConfiguration, NetworkContext, NetworkProtocolHandler, - NodeId, NonReservedPeerMode, PeerId, ProtocolId, + ConnectionFilter, Error, ErrorKind, NetworkConfiguration as BasicNetworkConfiguration, + NetworkContext, NetworkProtocolHandler, NodeId, NonReservedPeerMode, PeerId, ProtocolId, + client_version::ClientVersion, }; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, io, ops::RangeInclusive, - sync::{atomic, mpsc, Arc}, + sync::{Arc, atomic, mpsc}, time::Duration, }; -use chain::{ - fork_filter::ForkFilterApi, ChainSyncApi, SyncState, SyncStatus as EthSyncStatus, - ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_64, ETH_PROTOCOL_VERSION_65, - ETH_PROTOCOL_VERSION_66, PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, -}; -use ethcore::{ - client::{BlockChainClient, ChainMessageType, ChainNotify, NewBlocks}, - snapshot::SnapshotService, +use crate::{ + chain::{ + ChainSyncApi, ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_64, ETH_PROTOCOL_VERSION_65, + ETH_PROTOCOL_VERSION_66, PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, SyncState, + SyncStatus as EthSyncStatus, fork_filter::ForkFilterApi, + }, + ethcore::{ + client::{BlockChainClient, ChainMessageType, ChainNotify, NewBlocks}, + snapshot::SnapshotService, + }, + io::TimerToken, + network::IpFilter, + stats::{PrometheusMetrics, PrometheusRegistry}, }; -use ethereum_types::{H256, H512, U256, U64}; -use io::TimerToken; -use network::IpFilter; +use ethereum_types::{H256, H512, U64, U256}; use parking_lot::{Mutex, RwLock}; -use stats::{PrometheusMetrics, PrometheusRegistry}; +use crate::{ + sync_io::{NetSyncIo, SyncIo}, + types::{ + BlockNumber, creation_status::CreationStatus, restoration_status::RestorationStatus, + transaction::UnverifiedTransaction, + }, +}; use std::{ net::{AddrParseError, SocketAddr}, str::FromStr, }; -use sync_io::{NetSyncIo, SyncIo}; -use types::{ - creation_status::CreationStatus, restoration_status::RestorationStatus, - transaction::UnverifiedTransaction, BlockNumber, -}; /// OpenEthereum sync protocol pub const PAR_PROTOCOL: ProtocolId = U64([0x706172]); // hexadecimal number of "par"; @@ -318,12 +322,12 @@ impl SyncProvider for EthSync { Some(PeerInfo { id: session_info.id.map(|id| format!("{:x}", id)), - client_version: session_info.client_version, capabilities: session_info - .peer_capabilities - .into_iter() + .peer_capabilities() + .iter() .map(|c| c.to_string()) .collect(), + client_version: session_info.client_version, remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: peer_info, @@ -447,6 +451,10 @@ impl PrometheusMetrics for EthSync { "First block number of the present snapshot", manifest_block_num as i64, ); + + self.eth_handler.prometheus_metrics(r); + + self.network.prometheus_metrics(r); } } @@ -486,40 +494,85 @@ impl SyncProtocolHandler { let mut sync_io = NetSyncIo::new(nc, &*self.chain, &*self.snapshot_service, &self.overlay); for node_id in pub_keys.iter() { - if let Some(peer_id) = nc.node_id_to_peer_id(*node_id) { + if let Some(peer_id) = nc.node_id_to_peer_id(node_id) { let found_peers = self.sync.peer_info(&[peer_id]); if let Some(peer_info) = found_peers.first() { if let Some(_) = peer_info { - self.send_cached_consensus_messages_for(&mut sync_io, node_id, peer_id); + self.send_cached_consensus_messages_for(&mut sync_io, node_id); } } } } } - fn send_cached_consensus_messages_for( - &self, - sync_io: &mut dyn SyncIo, - node_id: &NodeId, - peer_id: PeerId, - ) { + fn send_cached_consensus_messages_for(&self, sync_io: &mut dyn SyncIo, node_id: &NodeId) { + let last_interesting_block = self + .chain + .block_number(types::ids::BlockId::Latest) + .unwrap_or(0); + // now since we are connected, lets send any cached messages if let Some(vec_msg) = self.message_cache.write().remove(&Some(*node_id)) { trace!(target: "consensus", "Cached Messages: Trying to send cached messages to {:?}", node_id); + + let mut failed_messages: Vec = Vec::new(); + for msg in vec_msg { match msg { - ChainMessageType::Consensus(message) => self - .sync - .write() - .send_consensus_packet(sync_io, message, peer_id), + ChainMessageType::Consensus(block, message) => { + if block < last_interesting_block { + // https://github.com/DMDcoin/diamond-node/issues/261 + continue; + } + let send_consensus_result = self.sync.write().send_consensus_packet( + sync_io, + message.clone(), + node_id, + ); + + match send_consensus_result { + Ok(_) => {} + Err(e) => { + info!(target: "consensus", "Error sending cached consensus message to peer (re-adding) {:?}: {:?}", node_id, e); + failed_messages.push(ChainMessageType::Consensus(block, message)); + } + } + } } } + + if !failed_messages.is_empty() { + // If we failed to send some messages, cache them for later + self.message_cache + .write() + .entry(Some(*node_id)) + .or_default() + .extend(failed_messages); + } else { + trace!(target: "consensus", "Cached Messages: Successfully sent all cached messages to {:?}", node_id); + } } } } +impl PrometheusMetrics for SyncProtocolHandler { + fn prometheus_metrics(&self, r: &mut PrometheusRegistry) { + if let Some(cache) = self.message_cache.try_read_for(Duration::from_millis(50)) { + let sum = cache.iter().map(|(_, v)| v.len()).sum::(); + r.register_gauge( + "consensus_message_cache", + "Number of cached consensus messages", + sum as i64, + ); + } + + self.sync.prometheus_metrics(r); + } +} + impl NetworkProtocolHandler for SyncProtocolHandler { fn initialize(&self, io: &dyn NetworkContext) { + trace!(target: "sync", "Initializing sync protocol handler for subprotocol: {}", io.subprotocol_name()); if io.subprotocol_name() != PAR_PROTOCOL { io.register_timer(PEERS_TIMER, Duration::from_millis(700)) .expect("Error registering peers timer"); @@ -539,6 +592,11 @@ impl NetworkProtocolHandler for SyncProtocolHandler { } fn read(&self, io: &dyn NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + let session_info = io.session_info(*peer); + if session_info.is_none() { + debug!(target: "sync", "Received packet from peer, where no Session info is available anymore (was just disconnected ?): {peer}"); + return; + } let node_id = io.session_info(*peer).unwrap().id; self.sync.dispatch_packet( &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), @@ -551,9 +609,12 @@ impl NetworkProtocolHandler for SyncProtocolHandler { fn connected(&self, io: &dyn NetworkContext, peer: &PeerId) { trace_time!("sync::connected"); - let node_id = io.session_info(*peer).unwrap().id; + let node_id = io + .session_info(*peer) + .unwrap_or_else(|| panic!("peer not found: {peer}")) + .id; if io.is_reserved_peer(*peer) { - trace!(target: "sync", "Connected to reserved peer {:?}", node_id); + debug!(target: "sync", "Connected to reserved peer {node_id:?} {peer}" ); } // If warp protocol is supported only allow warp handshake let warp_protocol = io.protocol_version(PAR_PROTOCOL, *peer).unwrap_or(0) != 0; @@ -569,7 +630,7 @@ impl NetworkProtocolHandler for SyncProtocolHandler { fn disconnected(&self, io: &dyn NetworkContext, peer: &PeerId) { trace_time!("sync::disconnected"); if io.is_reserved_peer(*peer) { - trace!(target: "sync", "Disconnected from reserved peer {:?}", io.session_info(*peer).expect("").id); + debug!(target: "sync", "Disconnected from reserved peer peerID: {} protocol: {} peer: {}",peer , io.subprotocol_name(), io.session_info(*peer).expect("").id.map_or("".to_string(), |f| format!("{:?}", f))); } if io.subprotocol_name() != PAR_PROTOCOL { self.sync.write().on_peer_aborting( @@ -581,12 +642,13 @@ impl NetworkProtocolHandler for SyncProtocolHandler { fn timeout(&self, nc: &dyn NetworkContext, timer: TimerToken) { trace_time!("sync::timeout"); + let mut io = NetSyncIo::new(nc, &*self.chain, &*self.snapshot_service, &self.overlay); match timer { PEERS_TIMER => self.sync.write().maintain_peers(&mut io), MAINTAIN_SYNC_TIMER => self.sync.write().maintain_sync(&mut io), CONTINUE_SYNC_TIMER => self.sync.write().continue_sync(&mut io), - TX_TIMER => self.sync.write().propagate_new_transactions(&mut io), + TX_TIMER => self.sync.write().propagate_new_ready_transactions(&mut io), PRIORITY_TIMER => self.sync.process_priority_queue(&mut io), DELAYED_PROCESSING_TIMER => self.sync.process_delayed_requests(&mut io), CONSENSUS_SEND_RETRY_TIMER => self.try_resend_consensus_messages(nc), @@ -636,7 +698,10 @@ impl ChainNotify for EthSync { match self.network.start() { Err((err, listen_address)) => match err.into() { ErrorKind::Io(ref e) if e.kind() == io::ErrorKind::AddrInUse => { - warn!("Network port {:?} is already in use, make sure that another instance of an Ethereum client is not running or change the port using the --port option.", listen_address.expect("Listen address is not set.")) + warn!( + "Network port {:?} is already in use, make sure that another instance of an Ethereum client is not running or change the port using the --port option.", + listen_address.expect("Listen address is not set.") + ) } err => warn!("Error starting network: {}", err), }, @@ -679,7 +744,7 @@ impl ChainNotify for EthSync { &self.eth_handler.overlay, ); match message_type { - ChainMessageType::Consensus(message) => self + ChainMessageType::Consensus(_block, message) => self .eth_handler .sync .write() @@ -688,35 +753,23 @@ impl ChainNotify for EthSync { }); } - fn send(&self, message_type: ChainMessageType, node_id: Option) { + fn send(&self, message_type: ChainMessageType, node_id: &H512) { self.network.with_context(PAR_PROTOCOL, |context| { - let peer_ids = self.network.connected_peers(); - let target_peer_id = peer_ids.into_iter().find(|p| { - match context.session_info(*p){ - Some(session_info) => { - session_info.id == node_id - }, - None => { warn!(target:"sync", "No session exists for peerId {:?}", p); false}, - } - }); - - let my_peer_id = match target_peer_id { - None => { - trace!(target: "consensus", "Cached Messages: peer {:?} not connected, caching message...", node_id); - let mut lock = self.eth_handler.message_cache.write(); - lock.entry(node_id.clone()).or_default().push(message_type); - return; - } - Some(n) => n, - }; - let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); match message_type { - ChainMessageType::Consensus(message) => self.eth_handler.sync.write().send_consensus_packet(&mut sync_io, message, my_peer_id), + ChainMessageType::Consensus(block, message) => { + let send_result = self.eth_handler.sync.write().send_consensus_packet(&mut sync_io, message.clone(), node_id); + if let Err(e) = send_result { + info!(target: "consensus", "Error sending consensus message to peer - caching message {:?}: {:?}", node_id, e); + // If we failed to send the message, cache it for later + let mut lock = self.eth_handler.message_cache.write(); + lock.entry(Some(node_id.clone())).or_default().push(ChainMessageType::Consensus(block, message)); + } + }, } }); } diff --git a/crates/ethcore/sync/src/block_sync.rs b/crates/ethcore/sync/src/block_sync.rs index 7e1a3ccd1b..732a1f7025 100644 --- a/crates/ethcore/sync/src/block_sync.rs +++ b/crates/ethcore/sync/src/block_sync.rs @@ -15,8 +15,12 @@ // along with OpenEthereum. If not, see . // use std::backtrace::Backtrace; -use blocks::{BlockCollection, SyncBody, SyncHeader}; -use chain::BlockSet; +use crate::{ + blocks::{BlockCollection, SyncBody, SyncHeader}, + chain::BlockSet, + sync_io::SyncIo, + types::BlockNumber, +}; use ethcore::{ client::{BlockId, BlockStatus}, error::{ @@ -25,19 +29,15 @@ use ethcore::{ }, }; use ethereum_types::H256; -use network::{client_version::ClientCapabilities, PeerId}; +use network::PeerId; use rlp::{self, Rlp}; use std::cmp; /// /// Blockchain downloader /// use std::collections::{BTreeMap, HashSet, VecDeque}; -use sync_io::SyncIo; -use types::BlockNumber; const MAX_HEADERS_TO_REQUEST: usize = 128; -const MAX_BODIES_TO_REQUEST_LARGE: usize = 128; -const MAX_BODIES_TO_REQUEST_SMALL: usize = 32; // Size request for parity clients prior to 2.4.0 const MAX_RECEPITS_TO_REQUEST: usize = 256; const SUBCHAIN_SIZE: u64 = 256; const MAX_ROUND_PARENTS: usize = 16; @@ -377,7 +377,10 @@ impl BlockDownloader { return Err(BlockDownloaderImportError::Invalid); } BlockSet::OldBlocks => { - trace_sync!(self, "Expected some useful headers for downloading OldBlocks. Try a different peer"); + trace_sync!( + self, + "Expected some useful headers for downloading OldBlocks. Try a different peer" + ); return Err(BlockDownloaderImportError::Useless); } _ => (), @@ -567,7 +570,10 @@ impl BlockDownloader { } else { let n = start - cmp::min(self.retract_step, start); if n == 0 { - info!("Header not found, bottom line reached, resetting, last imported: {}", self.last_imported_hash); + info!( + "Header not found, bottom line reached, resetting, last imported: {}", + self.last_imported_hash + ); info!(target: "sync", "Header not found: start: {} best: {} retract_step: {}, last_imported_hash: {}, oldest_reorg: {}", start, best, self.retract_step, self.last_imported_hash, oldest_reorg); self.reset_to_block(&best_hash, best); } else { @@ -602,6 +608,7 @@ impl BlockDownloader { self.retract_step = 1; } } + self.last_round_start = self.last_imported_block; self.last_round_start_hash = self.last_imported_hash; self.imported_this_round = None; @@ -636,21 +643,21 @@ impl BlockDownloader { } State::Blocks => { // check to see if we need to download any block bodies first - let client_version = io.peer_version(peer_id); - let number_of_bodies_to_request = if client_version.can_handle_large_requests() { - MAX_BODIES_TO_REQUEST_LARGE - } else { - MAX_BODIES_TO_REQUEST_SMALL - }; + // let number_of_bodies_to_request = if client_version.can_handle_large_requests() { + // MAX_BODIES_TO_REQUEST_LARGE + // } else { + // MAX_BODIES_TO_REQUEST_SMALL + // }; + + let number_of_bodies_to_request = 1; let needed_bodies = self .blocks .needed_bodies(number_of_bodies_to_request, false); - trace!(target: "sync", "Downloading blocks from Peer {} sync with better chain. needed Bodies: {}, download receipts: {}", peer_id, needed_bodies.len(),self.download_receipts); - if !needed_bodies.is_empty() { + trace!(target: "sync", "Downloading blocks from PeerID {} : {}. needed Bodies: {}, download receipts: {}, first body: {}", peer_id, io.peer_session_info(peer_id).map_or("unknown".to_string(), |p| p.remote_address.clone()), needed_bodies.len(),self.download_receipts, needed_bodies[0]); return Some(BlockRequest::Bodies { hashes: needed_bodies, }); @@ -660,6 +667,7 @@ impl BlockDownloader { let needed_receipts = self.blocks.needed_receipts(MAX_RECEPITS_TO_REQUEST, false); if !needed_receipts.is_empty() { + trace!(target: "sync", "Downloading receipts from Peer {}. needed receipts: {}, first receipt: {}", peer_id, needed_receipts.len(), needed_receipts[0]); return Some(BlockRequest::Receipts { hashes: needed_receipts, }); @@ -797,17 +805,19 @@ where #[cfg(test)] mod tests { use super::*; + use crate::{ + tests::{helpers::TestIo, snapshot::TestSnapshotService}, + types::{ + header::Header as BlockHeader, + transaction::{SignedTransaction, Transaction, TypedTransaction}, + }, + }; use crypto::publickey::{Generator, Random}; use ethcore::{client::TestBlockChainClient, spec::Spec}; use hash::keccak; use parking_lot::RwLock; - use rlp::{encode_list, RlpStream}; - use tests::{helpers::TestIo, snapshot::TestSnapshotService}; + use rlp::{RlpStream, encode_list}; use triehash_ethereum::ordered_trie_root; - use types::{ - header::Header as BlockHeader, - transaction::{SignedTransaction, Transaction, TypedTransaction}, - }; fn dummy_header(number: u64, parent_hash: H256) -> BlockHeader { let mut header = BlockHeader::new(); @@ -1063,33 +1073,39 @@ mod tests { // Only import the first three block headers. let rlp_data = encode_list(&headers[0..3]); let headers_rlp = Rlp::new(&rlp_data); - assert!(downloader - .import_headers(&mut io, &headers_rlp, headers[0].hash(), eip1559_transition) - .is_ok()); + assert!( + downloader + .import_headers(&mut io, &headers_rlp, headers[0].hash(), eip1559_transition) + .is_ok() + ); // Import first body successfully. let mut rlp_data = RlpStream::new_list(1); rlp_data.append_raw(&bodies[0], 1); let bodies_rlp = Rlp::new(rlp_data.as_raw()); - assert!(downloader - .import_bodies( - &bodies_rlp, - &[headers[0].hash(), headers[1].hash()], - eip1559_transition - ) - .is_ok()); + assert!( + downloader + .import_bodies( + &bodies_rlp, + &[headers[0].hash(), headers[1].hash()], + eip1559_transition + ) + .is_ok() + ); // Import second body successfully. let mut rlp_data = RlpStream::new_list(1); rlp_data.append_raw(&bodies[1], 1); let bodies_rlp = Rlp::new(rlp_data.as_raw()); - assert!(downloader - .import_bodies( - &bodies_rlp, - &[headers[0].hash(), headers[1].hash()], - eip1559_transition - ) - .is_ok()); + assert!( + downloader + .import_bodies( + &bodies_rlp, + &[headers[0].hash(), headers[1].hash()], + eip1559_transition + ) + .is_ok() + ); // Import unexpected third body. let mut rlp_data = RlpStream::new_list(1); @@ -1153,18 +1169,22 @@ mod tests { // Only import the first three block headers. let rlp_data = encode_list(&headers[0..3]); let headers_rlp = Rlp::new(&rlp_data); - assert!(downloader - .import_headers(&mut io, &headers_rlp, headers[0].hash(), eip1559_transition) - .is_ok()); + assert!( + downloader + .import_headers(&mut io, &headers_rlp, headers[0].hash(), eip1559_transition) + .is_ok() + ); // Import second and third receipts successfully. let mut rlp_data = RlpStream::new_list(2); rlp_data.append_raw(&receipts[1], 1); rlp_data.append_raw(&receipts[2], 1); let receipts_rlp = Rlp::new(rlp_data.as_raw()); - assert!(downloader - .import_receipts(&receipts_rlp, &[headers[1].hash(), headers[2].hash()]) - .is_ok()); + assert!( + downloader + .import_receipts(&receipts_rlp, &[headers[1].hash(), headers[2].hash()]) + .is_ok() + ); // Import unexpected fourth receipt. let mut rlp_data = RlpStream::new_list(1); diff --git a/crates/ethcore/sync/src/blocks.rs b/crates/ethcore/sync/src/blocks.rs index 9c5d28f95d..847e4bad45 100644 --- a/crates/ethcore/sync/src/blocks.rs +++ b/crates/ethcore/sync/src/blocks.rs @@ -14,20 +14,20 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::types::{ + BlockNumber, + header::Header as BlockHeader, + transaction::{TypedTransaction, UnverifiedTransaction}, +}; use bytes::Bytes; use ethcore::verification::queue::kind::blocks::Unverified; use ethereum_types::H256; -use hash::{keccak, KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP}; +use hash::{KECCAK_EMPTY_LIST_RLP, KECCAK_NULL_RLP, keccak}; use network; use parity_util_mem::MallocSizeOf; use rlp::{DecoderError, Rlp, RlpStream}; -use std::collections::{hash_map, BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet, hash_map}; use triehash_ethereum::ordered_trie_root; -use types::{ - header::Header as BlockHeader, - transaction::{TypedTransaction, UnverifiedTransaction}, - BlockNumber, -}; malloc_size_of_is_0!(HeaderId); @@ -621,12 +621,12 @@ impl BlockCollection { #[cfg(test)] mod test { use super::{BlockCollection, SyncHeader}; + use crate::types::BlockNumber; use ethcore::{ client::{BlockChainClient, BlockId, EachBlockWith, TestBlockChainClient}, verification::queue::kind::blocks::Unverified, }; use rlp::*; - use types::BlockNumber; fn is_empty(bc: &BlockCollection) -> bool { bc.heads.is_empty() diff --git a/crates/ethcore/sync/src/chain/fork_filter.rs b/crates/ethcore/sync/src/chain/fork_filter.rs index c72c06efdc..4f5de69f65 100644 --- a/crates/ethcore/sync/src/chain/fork_filter.rs +++ b/crates/ethcore/sync/src/chain/fork_filter.rs @@ -2,10 +2,10 @@ //! to support Ethereum network protocol, version 64 and above. // Re-export ethereum-forkid crate contents here. -pub use ethereum_forkid::{BlockNumber, ForkId, RejectReason}; +pub use crate::ethereum_forkid::{BlockNumber, ForkId, RejectReason}; +use crate::ethereum_forkid::ForkFilter; use ethcore::client::ChainInfo; -use ethereum_forkid::ForkFilter; /// Wrapper around fork filter that provides integration with `ForkFilter`. pub struct ForkFilterApi { diff --git a/crates/ethcore/sync/src/chain/handler.rs b/crates/ethcore/sync/src/chain/handler.rs index b9c2a10729..a4a74f0baa 100644 --- a/crates/ethcore/sync/src/chain/handler.rs +++ b/crates/ethcore/sync/src/chain/handler.rs @@ -14,8 +14,13 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use api::{ETH_PROTOCOL, PAR_PROTOCOL}; -use block_sync::{BlockDownloaderImportError as DownloaderImportError, DownloadAction}; +use crate::{ + api::{ETH_PROTOCOL, PAR_PROTOCOL}, + block_sync::{BlockDownloaderImportError as DownloaderImportError, DownloadAction}, + snapshot::ChunkType, + sync_io::SyncIo, + types::{BlockNumber, block_status::BlockStatus, ids::BlockId}, +}; use bytes::Bytes; use enum_primitive::FromPrimitive; use ethcore::{ @@ -25,12 +30,13 @@ use ethcore::{ }; use ethereum_types::{H256, H512, U256}; use hash::keccak; -use network::{client_version::ClientVersion, PeerId}; +use network::{PeerId, client_version::ClientVersion}; use rlp::Rlp; -use snapshot::ChunkType; -use std::{cmp, mem, time::Instant}; -use sync_io::SyncIo; -use types::{block_status::BlockStatus, ids::BlockId, BlockNumber}; +use std::{ + cmp, mem, + time::{Duration, Instant}, +}; +use time_utils::DeadlineStopwatch; use super::{ request_id::strip_request_id, @@ -41,9 +47,9 @@ use super::{ }; use super::{ - BlockSet, ChainSync, ForkConfirmation, PacketProcessError, PeerAsking, PeerInfo, SyncRequester, - SyncState, ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_64, ETH_PROTOCOL_VERSION_66, - MAX_NEW_BLOCK_AGE, MAX_NEW_HASHES, PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, + BlockSet, ChainSync, ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_64, ETH_PROTOCOL_VERSION_66, + ForkConfirmation, MAX_NEW_BLOCK_AGE, MAX_NEW_HASHES, PAR_PROTOCOL_VERSION_1, + PAR_PROTOCOL_VERSION_2, PacketProcessError, PeerAsking, PeerInfo, SyncRequester, SyncState, }; use network::client_version::ClientCapabilities; @@ -81,7 +87,7 @@ impl SyncHandler { } SnapshotDataPacket => SyncHandler::on_snapshot_data(sync, io, peer, &rlp), _ => { - debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id.id()); + warn!(target: "sync", "{}: Unknown packet {}", peer, packet_id.id()); Ok(()) } }, @@ -155,11 +161,10 @@ impl SyncHandler { /// Called when a new peer is connected pub fn on_peer_connected(sync: &mut ChainSync, io: &mut dyn SyncIo, peer: PeerId) { let peer_version = io.peer_version(peer); - trace!(target: "sync", "== Connected {}: {}", peer, peer_version); + + trace!(target: "sync", "== Connected {}: {} protocol: {}", peer, peer_version, io.peer_session_info(peer).map_or(String::new(), |f| f.peer_capabilities().iter().map(|c| format!("{}-{}", c.protocol, c.version)).collect::>().join(" | "))); let whitelisted = peer_version.is_hbbft(); - // peer_version_string.contains("hbbft") - // && peer_version_string.contains("OpenEthereum"); if !whitelisted { let mut ip_addr = String::new(); @@ -167,10 +172,10 @@ impl SyncHandler { Some(session) => ip_addr = session.remote_address.to_string(), None => {} } - trace!(target:"sync", "Disabling Peer (this Software Version not whitelisted) {} ip:{} ", peer_version, ip_addr); + debug!(target:"sync", "Disabling Peer (this Software Version not whitelisted) {} ip:{} ", peer_version, ip_addr); io.disable_peer(peer); } else if let Err(e) = sync.send_status(io, peer) { - debug!(target:"sync", "Error sending status request: {:?}", e); + debug!(target:"sync", "Error sending status request: {peer} {:?} {e:?}", io.peer_session_info(peer).as_ref().map_or(" (no Session)", |f| f.remote_address.as_str())); io.disconnect_peer(peer); } else { sync.handshaking_peers.insert(peer, Instant::now()); @@ -857,12 +862,33 @@ impl SyncHandler { peer_id: PeerId, tx_rlp: &Rlp, ) -> Result<(), DownloaderImportError> { + // those P2P operations must not take forever, a better , configurable but balanced timeout managment would be nice to have. + let max_duration = Duration::from_millis(500); + + let deadline = DeadlineStopwatch::new(max_duration); + for item in tx_rlp { let hash = item .as_val::() .map_err(|_| DownloaderImportError::Invalid)?; - if io.chain().queued_transaction(hash).is_none() { + // todo: what if the Transaction is not new, and already in the chain? + // see: https://github.com/DMDcoin/diamond-node/issues/196 + + // if we cant read the pool here, we are asuming we dont know the transaction yet. + // in the worst case we are refetching a transaction that we already have. + + if deadline.is_expired() { + debug!(target: "sync", "{}: deadline reached while processing pooled transactions", peer_id); + // we did run out of time to finish this opation, but thats Ok. + return Ok(()); + } + + if io + .chain() + .transaction_if_readable(&hash, &deadline.time_left()) + .is_none() + { sync.peers .get_mut(&peer_id) .map(|peer| peer.unfetched_pooled_transactions.insert(hash)); @@ -874,7 +900,7 @@ impl SyncHandler { /// Called when peer sends us a list of pooled transactions pub fn on_peer_pooled_transactions( - sync: &ChainSync, + sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, tx_rlp: &Rlp, @@ -892,7 +918,7 @@ impl SyncHandler { trace!(target: "sync", "{} Peer sent us more transactions than was supposed to", peer_id); return Err(DownloaderImportError::Invalid); } - trace!(target: "sync", "{:02} -> PooledTransactions ({} entries)", peer_id, item_count); + debug!(target: "sync", "{:02} -> PooledTransactions ({} entries)", peer_id, item_count); let mut transactions = Vec::with_capacity(item_count); for i in 0..item_count { let rlp = tx_rlp.at(i)?; @@ -905,6 +931,9 @@ impl SyncHandler { transactions.push(tx); } io.chain().queue_transactions(transactions, peer_id); + + sync.reset_peer_asking(peer_id, PeerAsking::PooledTransactions); + Ok(()) } @@ -945,11 +974,11 @@ impl SyncHandler { #[cfg(test)] mod tests { + use crate::tests::{helpers::TestIo, snapshot::TestSnapshotService}; use ethcore::client::{ChainInfo, EachBlockWith, TestBlockChainClient}; use parking_lot::RwLock; use rlp::Rlp; use std::collections::VecDeque; - use tests::{helpers::TestIo, snapshot::TestSnapshotService}; use super::{ super::tests::{dummy_sync_with_peer, get_dummy_block, get_dummy_blocks, get_dummy_hashes}, diff --git a/crates/ethcore/sync/src/chain/mod.rs b/crates/ethcore/sync/src/chain/mod.rs index 3ceb3fc30a..3a204b3c14 100644 --- a/crates/ethcore/sync/src/chain/mod.rs +++ b/crates/ethcore/sync/src/chain/mod.rs @@ -89,16 +89,28 @@ pub mod fork_filter; mod handler; +mod pooled_transactions_overview; mod propagator; +mod propagator_statistics; pub mod request_id; mod requester; mod supplier; pub mod sync_packet; pub use self::fork_filter::ForkFilterApi; +use self::{ + pooled_transactions_overview::PooledTransactionOverview, + propagator_statistics::SyncPropagatorStatistics, +}; use super::{SyncConfig, WarpSync}; -use api::{EthProtocolInfo as PeerInfoDigest, PriorityTask, ETH_PROTOCOL, PAR_PROTOCOL}; -use block_sync::{BlockDownloader, DownloadAction}; +use crate::{ + api::{ETH_PROTOCOL, EthProtocolInfo as PeerInfoDigest, PAR_PROTOCOL, PriorityTask}, + block_sync::{BlockDownloader, DownloadAction}, + snapshot::Snapshot, + sync_io::SyncIo, + transactions_stats::{Stats as TransactionStats, TransactionsStats}, + types::{BlockNumber, transaction::UnverifiedTransaction}, +}; use bytes::Bytes; use derive_more::Display; use ethcore::{ @@ -108,20 +120,17 @@ use ethcore::{ use ethereum_types::{H256, H512, U256}; use fastmap::{H256FastMap, H256FastSet}; use hash::keccak; -use network::{self, client_version::ClientVersion, PeerId}; +use network::{self, PeerId, client_version::ClientVersion}; use parking_lot::{Mutex, RwLock, RwLockWriteGuard}; -use rand::{seq::SliceRandom, Rng}; +use rand::{Rng, seq::SliceRandom}; use rlp::{DecoderError, RlpStream}; -use snapshot::Snapshot; +use stats::PrometheusMetrics; use std::{ cmp, collections::{BTreeMap, HashMap, HashSet}, sync::mpsc, time::{Duration, Instant}, }; -use sync_io::SyncIo; -use transactions_stats::{Stats as TransactionStats, TransactionsStats}; -use types::{transaction::UnverifiedTransaction, BlockNumber}; use self::{ handler::SyncHandler, @@ -131,8 +140,8 @@ use self::{ }, }; +use self::requester::SyncRequester; pub(crate) use self::supplier::SyncSupplier; -use self::{propagator::SyncPropagator, requester::SyncRequester}; malloc_size_of_is_0!(PeerInfo); @@ -349,7 +358,7 @@ pub struct PeerInfo { /// Hashes of transactions to be requested. unfetched_pooled_transactions: H256FastSet, /// Hashes of the transactions we're requesting. - asking_pooled_transactions: Vec, + asking_pooled_transactions: H256FastSet, /// Holds requested snapshot chunk hash if any. asking_snapshot_data: Option, /// Request timestamp @@ -419,6 +428,8 @@ pub struct ChainSyncApi { priority_tasks: Mutex>, /// The rest of sync data sync: RwLock, + /// last known sync state. + last_known_sync_status: Mutex, } impl ChainSyncApi { @@ -430,14 +441,14 @@ impl ChainSyncApi { priority_tasks: mpsc::Receiver, new_transaction_hashes: crossbeam_channel::Receiver, ) -> Self { + let sync = ChainSync::new(config, chain, fork_filter, new_transaction_hashes); + + let last_known_sync_status = sync.status(); + ChainSyncApi { - sync: RwLock::new(ChainSync::new( - config, - chain, - fork_filter, - new_transaction_hashes, - )), + sync: RwLock::new(sync), priority_tasks: Mutex::new(priority_tasks), + last_known_sync_status: Mutex::new(last_known_sync_status), } } @@ -452,13 +463,21 @@ impl ChainSyncApi { ids.iter().map(|id| sync.peer_info(id)).collect() } - /// Returns synchonization status + /// Returns best known synchonization status pub fn status(&self) -> SyncStatus { - self.sync.read().status() + if let Some(sync) = self.sync.try_read_for(Duration::from_millis(50)) { + let status = sync.status(); + *self.last_known_sync_status.lock() = status.clone(); + return status; + } + + // we return that last known sync status here, in cases we could not get the most recent information. + // see also: https://github.com/DMDcoin/diamond-node/issues/223 + return self.last_known_sync_status.lock().clone(); } /// Returns pending transactions propagation statistics - pub fn pending_transactions_stats(&self) -> BTreeMap { + pub fn pending_transactions_stats(&self) -> BTreeMap { self.sync .read() .pending_transactions_stats() @@ -468,7 +487,7 @@ impl ChainSyncApi { } /// Returns new transactions propagation statistics - pub fn new_transactions_stats(&self) -> BTreeMap { + pub fn new_transactions_stats(&self) -> BTreeMap { self.sync .read() .new_transactions_stats() @@ -523,7 +542,7 @@ impl ChainSyncApi { } // deadline to get the task from the queue - let deadline = Instant::now() + ::api::PRIORITY_TIMER_INTERVAL; + let deadline = Instant::now() + crate::api::PRIORITY_TIMER_INTERVAL; let mut work = || { let task = { let tasks = self.priority_tasks.try_lock_until(deadline)?; @@ -537,9 +556,9 @@ impl ChainSyncApi { // since we already have everything let's use a different deadline // to do the rest of the job now, so that previous work is not wasted. let deadline = Instant::now() + PRIORITY_TASK_DEADLINE; - let as_ms = move |prev| { + let as_us = move |prev| { let dur: Duration = Instant::now() - prev; - dur.as_secs() * 1_000 + dur.subsec_millis() as u64 + dur.as_micros() }; match task { // NOTE We can't simply use existing methods, @@ -558,20 +577,23 @@ impl ChainSyncApi { for peers in sync.get_peers(&chain_info, PeerState::SameBlock).chunks(10) { check_deadline(deadline)?; for peer in peers { - SyncPropagator::send_packet(io, *peer, NewBlockPacket, rlp.clone()); - if let Some(ref mut peer) = sync.peers.get_mut(peer) { - peer.latest_hash = hash; + let send_result = + ChainSync::send_packet(io, *peer, NewBlockPacket, rlp.clone()); + if send_result.is_ok() { + if let Some(ref mut peer) = sync.peers.get_mut(peer) { + peer.latest_hash = hash; + } } } } - debug!(target: "sync", "Finished block propagation, took {}ms", as_ms(started)); + debug!(target: "sync", "Finished block propagation, took {} us", as_us(started)); } PriorityTask::PropagateTransactions(time, _) => { let hashes = sync.new_transaction_hashes(None); - SyncPropagator::propagate_new_transactions(&mut sync, io, hashes, || { + sync.propagate_new_transactions(io, hashes, || { check_deadline(deadline).is_some() }); - debug!(target: "sync", "Finished transaction propagation, took {}ms", as_ms(time)); + debug!(target: "sync", "Finished transaction propagation, took {} us", as_us(time)); } } @@ -732,6 +754,10 @@ pub struct ChainSync { eip1559_transition: BlockNumber, /// Number of blocks for which new transactions will be returned in a result of `parity_newTransactionsStats` RPC call new_transactions_stats_period: BlockNumber, + /// Statistics of sync propagation + statistics: SyncPropagatorStatistics, + /// memorizing currently pooled transaction to reduce the number of pooled transaction requests. + asking_pooled_transaction_overview: PooledTransactionOverview, } #[derive(Debug, Default)] @@ -743,13 +769,13 @@ struct GetPooledTransactionsReport { impl GetPooledTransactionsReport { fn generate( - mut asked: Vec, + mut asked: H256FastSet, received: impl IntoIterator, ) -> Result { let mut out = GetPooledTransactionsReport::default(); let asked_set = asked.iter().copied().collect::(); - let mut asked_iter = asked.drain(std::ops::RangeFull); + let mut asked_iter = asked.drain(); let mut txs = received.into_iter(); let mut next_received: Option = None; loop { @@ -821,6 +847,8 @@ impl ChainSync { warp_sync: config.warp_sync, eip1559_transition: config.eip1559_transition, new_transactions_stats_period: config.new_transactions_stats_period, + statistics: SyncPropagatorStatistics::new(), + asking_pooled_transaction_overview: PooledTransactionOverview::new(), }; sync.update_targets(chain); sync @@ -830,7 +858,8 @@ impl ChainSync { pub fn status(&self) -> SyncStatus { let last_imported_number = self.new_blocks.last_imported_block_number(); let mut item_sizes = BTreeMap::::new(); - self.old_blocks + let _ = self + .old_blocks .as_ref() .map_or((), |d| d.get_sizes(&mut item_sizes)); self.new_blocks.get_sizes(&mut item_sizes); @@ -918,6 +947,11 @@ impl ChainSync { /// Updates transactions were received by a peer pub fn transactions_received(&mut self, txs: &[UnverifiedTransaction], peer_id: PeerId) { + debug!(target: "sync", "Received {} transactions from peer {}", txs.len(), peer_id); + if !txs.is_empty() { + trace!(target: "sync", "Received {:?}", txs.iter().map(|t| t.hash).map(|t| t.0).collect::>()); + } + // Remove imported txs from all request queues let imported = txs.iter().map(|tx| tx.hash()).collect::(); for (pid, peer_info) in &mut self.peers { @@ -928,7 +962,10 @@ impl ChainSync { .collect(); if *pid == peer_id { match GetPooledTransactionsReport::generate( - std::mem::replace(&mut peer_info.asking_pooled_transactions, Vec::new()), + std::mem::replace( + &mut peer_info.asking_pooled_transactions, + H256FastSet::default(), + ), txs.iter().map(UnverifiedTransaction::hash), ) { Ok(report) => { @@ -940,8 +977,9 @@ impl ChainSync { .copied() .collect(); } - Err(_unknown_tx) => { + Err(unknown_tx) => { // punish peer? + debug!(target: "sync", "Peer {} sent unknown transaction {}", peer_id, unknown_tx); } } @@ -986,6 +1024,7 @@ impl ChainSync { // Reactivate peers only if some progress has been made // since the last sync round of if starting fresh. self.active_peers = self.peers.keys().cloned().collect(); + debug!(target: "sync", "resetting sync state to {:?}", self.state); } /// Add a request for later processing @@ -1190,7 +1229,7 @@ impl ChainSync { ); peers.shuffle(&mut random::new()); // TODO (#646): sort by rating - // prefer peers with higher protocol version + // prefer peers with higher protocol version peers.sort_by(|&(_, ref v1), &(_, ref v2)| v1.cmp(v2)); @@ -1225,6 +1264,9 @@ impl ChainSync { /// Find something to do for a peer. Called for a new peer or when a peer is done with its task. fn sync_peer(&mut self, io: &mut dyn SyncIo, peer_id: PeerId, force: bool) { + debug!(target: "sync", "sync_peer: {} force {} state: {:?}", + peer_id, force, self.state + ); if !self.active_peers.contains(&peer_id) { trace!(target: "sync", "Skipping deactivated peer {}", peer_id); return; @@ -1232,7 +1274,7 @@ impl ChainSync { let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { if let Some(peer) = self.peers.get_mut(&peer_id) { if peer.asking != PeerAsking::Nothing || !peer.can_sync() { - trace!(target: "sync", "Skipping busy peer {}", peer_id); + debug!(target: "sync", "Skipping busy peer {} asking: {:?}", peer_id, peer.asking); return; } ( @@ -1242,11 +1284,13 @@ impl ChainSync { peer.snapshot_hash.as_ref().cloned(), ) } else { + info!(target: "sync", "peer info not found for {}", peer_id); return; } }; let chain_info = io.chain().chain_info(); let syncing_difficulty = chain_info.pending_total_difficulty; + let num_active_peers = self .peers .values() @@ -1260,37 +1304,13 @@ impl ChainSync { // the system get's stuck. let is_other_block = peer_latest != chain_info.best_block_hash; - if higher_difficulty && !is_other_block { - if peer_difficulty.is_some() { - // NetworkContext session_info - let session_info = io.peer_session_info(peer_id); - - match session_info { - Some(s) => { - //only warn if the other peer has provided a difficulty level. - warn!(target: "sync", "protected from hang. peer {}, did send wrong information ( td={:?}, our td={}) for blockhash latest={} {} originated by us: {}. client_version: {}, protocol version: {}", - peer_id, peer_difficulty, syncing_difficulty, peer_latest, s.remote_address, s.originated, s.client_version, s.protocol_version); - - // todo: temporary disabled peer deactivation. - // we are just returning now. - // will we see this problem in sequences now, but less disconnects ? - // io.disable_peer(peer_id); - // self.deactivate_peer(io, peer_id); - - return; - } - _ => {} - } - } - } - if self.old_blocks.is_some() { info!(target: "sync", "syncing old blocks from peer: {} ", peer_id); let session_info = io.peer_session_info(peer_id); match session_info { Some(s) => { - warn!(target: "sync", "old blocks peer: {} {} originated by us: {}", peer_id, s.remote_address, s.originated); + debug!(target: "sync", "old blocks peer: {} {} originated by us: {}", peer_id, s.remote_address, s.originated); } _ => {} } @@ -1309,6 +1329,7 @@ impl ChainSync { self.maybe_start_snapshot_sync(io); }, SyncState::Idle | SyncState::Blocks | SyncState::NewBlocks => { + if io.chain().queue_info().is_full() { self.pause_sync(); return; @@ -1335,27 +1356,13 @@ impl ChainSync { if force || equal_or_higher_difficulty { if ancient_block_fullness < 0.8 { if let Some(request) = self.old_blocks.as_mut().and_then(|d| d.request_blocks(peer_id, io, num_active_peers)) { + debug!(target:"sync", "requesting old blocks from: {}", peer_id); SyncRequester::request_blocks(self, io, peer_id, request, BlockSet::OldBlocks); return; } } - - // and if we have nothing else to do, get the peer to give us at least some of announced but unfetched transactions - let mut to_send = Default::default(); - if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.asking_pooled_transactions.is_empty() { - to_send = peer.unfetched_pooled_transactions.drain().take(MAX_TRANSACTIONS_TO_REQUEST).collect::>(); - peer.asking_pooled_transactions = to_send.clone(); - } - } - - if !to_send.is_empty() { - SyncRequester::request_pooled_transactions(self, io, peer_id, &to_send); - - return; - } } else { - trace!( + debug!( target: "sync", "peer {:?} is not suitable for requesting old blocks, syncing_difficulty={:?}, peer_difficulty={:?}", peer_id, @@ -1363,7 +1370,10 @@ impl ChainSync { peer_difficulty ); self.deactivate_peer(io, peer_id); + return; } + + }, SyncState::SnapshotData => { match io.snapshot_service().restoration_status() { @@ -1395,7 +1405,97 @@ impl ChainSync { SyncState::SnapshotWaiting => () } } else { - trace!(target: "sync", "Skipping peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); + // if we got nothing to do, and the other peer is also at the same block, or is known to be just 1 behind, we are fetching unfetched pooled transactions. + // there is some delay of the information what block they are on. + + // communicate with this peer in any case if we are on the same block. + // more about: https://github.com/DMDcoin/diamond-node/issues/173 + + //let communicate_with_peer = chain_info.best_block_hash == peer_latest; + + let communicate_with_peer = true; + + // on a distributed real network, 3 seconds is about they physical minimum. + // therefore we "accept" other nodes to be 1 block behind - usually they are not! + // The other way around: if they are a validator, and we are at the tip, we might be still 1 block behind, because there is already a pending block. + // our best_block information is always accurate, so we are not notifiying them obout our transactions, that might be already included in the block. + + // todo: Further investigation if we should or should not accept a gap in block height. + + // if !communicate_with_peer { + + // // if we are not on the same block, find out if we do have a block number for their block. + // io.chain().block_number(BlockId::Hash(peer_latest)).map(|block_number| { + // // let other_best_block = peer_difficulty.unwrap_or_default().low_u64() as i64; + // // let best_block = chain_info.best_block_number as i64; + + // if block_number == chain_info.best_block_number { + // communicate_with_peer = true; + // } + // }); + // } + + if self.state == SyncState::Idle && communicate_with_peer { + // and if we have nothing else to do, get the peer to give us at least some of announced but unfetched transactions + let mut to_send = H256FastSet::default(); + if let Some(peer) = self.peers.get_mut(&peer_id) { + // info: this check should do nothing, if everything is tracked correctly, + + if peer.asking_pooled_transactions.is_empty() { + // todo: we might just request the same transactions from multiple peers here, at the same time. + // we should keep track of how many replicas of a transaction we had requested. + + for hash in peer.unfetched_pooled_transactions.iter() { + if to_send.len() >= MAX_TRANSACTIONS_TO_REQUEST { + break; + } + + if self + .asking_pooled_transaction_overview + .get_last_fetched(hash) + .map_or(true, |t| t.elapsed().as_millis() > 300) + { + to_send.insert(hash.clone()); + self.asking_pooled_transaction_overview + .report_transaction_pooling(hash); + } + } + + if !to_send.is_empty() { + peer.unfetched_pooled_transactions + .retain(|u| !to_send.contains(u)); + + trace!( + target: "sync", + "Asking {} pooled transactions from peer {}: {:?}", + to_send.len(), + peer_id, + to_send + ); + peer.asking_pooled_transactions = to_send.clone(); + } + } else { + debug!( + target: "sync", + "we are already asking from peer {}: {} transactions", + peer_id, + peer.asking_pooled_transactions.len() + ); + } + } + + if !to_send.is_empty() { + debug!(target: "sync", "requesting {} pooled transactions from {}", to_send.len(), peer_id); + let bytes_sent = + SyncRequester::request_pooled_transactions(self, io, peer_id, &to_send); + self.statistics + .log_requested_transactions_response(to_send.len(), bytes_sent); + + return; + } + } else { + debug!(target: "sync", "Skipping peer {}, force={}, td={:?}, our td={}, blochhash={:?} our_blockhash={:?} state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, peer_latest, chain_info.best_block_hash, self.state); + } } } @@ -1724,9 +1824,10 @@ impl ChainSync { if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { trace!(target: "sync", "Propagating blocks, state={:?}", self.state); // t_nb 11.4.1 propagate latest blocks - SyncPropagator::propagate_latest_blocks(self, io, sealed); + self.propagate_latest_blocks(io, sealed); + // t_nb 11.4.4 propagate proposed blocks - SyncPropagator::propagate_proposed_blocks(self, io, proposed); + self.propagate_proposed_blocks(io, proposed); } if !invalid.is_empty() { info!(target: "sync", "Bad blocks in the queue, restarting sync"); @@ -1756,34 +1857,34 @@ impl ChainSync { pub fn on_peer_connected(&mut self, io: &mut dyn SyncIo, peer: PeerId) { SyncHandler::on_peer_connected(self, io, peer); } +} - /// propagates new transactions to all peers - pub fn propagate_new_transactions(&mut self, io: &mut dyn SyncIo) { - let deadline = Instant::now() + Duration::from_millis(500); - SyncPropagator::propagate_ready_transactions(self, io, || { - if deadline > Instant::now() { - true - } else { - debug!(target: "sync", "Wasn't able to finish transaction propagation within a deadline."); - false - } - }); - } - - /// Broadcast consensus message to peers. - pub fn propagate_consensus_packet(&mut self, io: &mut dyn SyncIo, packet: Bytes) { - SyncPropagator::propagate_consensus_packet(self, io, packet); +impl PrometheusMetrics for ChainSyncApi { + fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) { + // unfortunatly, Sync is holding the lock for quite some time, + // due its poor degree of parallism. + // since most of the metrics are counter, it should not involve a huge problem + // we are still trying to get the lock only for 50ms here... + if let Some(sync) = self.sync.try_read_for(Duration::from_millis(50)) { + sync.prometheus_metrics(registry); + } } +} - /// Send consensus message to a specific peer. - pub fn send_consensus_packet(&mut self, io: &mut dyn SyncIo, packet: Bytes, peer_id: usize) { - SyncPropagator::send_consensus_packet(self, io, packet, peer_id); +impl PrometheusMetrics for ChainSync { + fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) { + self.statistics.prometheus_metrics(registry); } } #[cfg(test)] pub mod tests { use super::{PeerAsking, PeerInfo, *}; + use crate::{ + SyncConfig, + tests::{helpers::TestIo, snapshot::TestSnapshotService}, + types::header::Header, + }; use bytes::Bytes; use ethcore::{ client::{BlockChainClient, BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient}, @@ -1794,9 +1895,6 @@ pub mod tests { use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use std::collections::VecDeque; - use tests::{helpers::TestIo, snapshot::TestSnapshotService}; - use types::header::Header; - use SyncConfig; pub fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); @@ -1982,7 +2080,7 @@ pub mod tests { let mut io = TestIo::new(&mut client, &ss, &queue, None); let peers = sync.get_lagging_peers(&chain_info); - SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); + sync.propagate_new_hashes(&chain_info, &mut io, &peers); let data = &io.packets[0].data.clone(); let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &Rlp::new(data)); @@ -2002,7 +2100,7 @@ pub mod tests { let mut io = TestIo::new(&mut client, &ss, &queue, None); let peers = sync.get_lagging_peers(&chain_info); - SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); + sync.propagate_blocks(&chain_info, &mut io, &[], &peers); let data = &io.packets[0].data.clone(); let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &Rlp::new(data)); diff --git a/crates/ethcore/sync/src/chain/pooled_transactions_overview.rs b/crates/ethcore/sync/src/chain/pooled_transactions_overview.rs new file mode 100644 index 0000000000..2e95852c3b --- /dev/null +++ b/crates/ethcore/sync/src/chain/pooled_transactions_overview.rs @@ -0,0 +1,41 @@ +use std::{num::NonZeroUsize, time::Instant}; + +use fastmap::{H256FastLruMap, new_h256_fast_lru_map}; +use hash::H256; + +/// memorizs currently pooled transactions, so they are not pooled to often from different hosts. +pub(crate) struct PooledTransactionOverview { + // number_of_requests: H256FastMap, + /// The cache of pooled transactions. + last_fetched: H256FastLruMap, +} + +impl PooledTransactionOverview { + /// Create a new `PooledTransactionOverview` with a given maximum cache size. + pub fn new() -> Self { + // we are defaulting here to a memory usage of maximum 1 MB netto data. + // 40 byte per entry (32 byte hash + 8 byte usize) + // so we can store about 26214 cached entries per megabyte of date. + + Self { + last_fetched: new_h256_fast_lru_map::( + NonZeroUsize::new(26214).unwrap(), /* about 1 MB + some overhead */ + ), + } + } + + /// Check if a transaction is already pooled. + pub fn get_last_fetched(&mut self, hash: &H256) -> Option<&Instant> { + self.last_fetched.get(hash) + } + + /// Add a transaction to the cache. + pub fn report_transaction_pooling(&mut self, hash: &H256) { + self.last_fetched.push(hash.clone(), Instant::now()); + } + + // pub fn report_transaction_pooling_finished(&mut self, hash: &H256) { + // self.last_fetched.pop(hash); + // // if we tried to access an entry that is not in the map, we ignore it. + // } +} diff --git a/crates/ethcore/sync/src/chain/propagator.rs b/crates/ethcore/sync/src/chain/propagator.rs index e85d1d5de2..c0b0fafde5 100644 --- a/crates/ethcore/sync/src/chain/propagator.rs +++ b/crates/ethcore/sync/src/chain/propagator.rs @@ -14,48 +14,77 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use std::{cmp, collections::HashSet}; +use std::{ + cmp, + collections::HashSet, + time::{Duration, Instant}, +}; +use crate::{ + sync_io::SyncIo, + types::{BlockNumber, blockchain_info::BlockChainInfo, transaction::SignedTransaction}, +}; use bytes::Bytes; -use ethereum_types::H256; +use ethereum_types::{H256, H512}; use fastmap::H256FastSet; -use network::{client_version::ClientCapabilities, PeerId}; -use rand::RngCore; +use network::{Error, ErrorKind, PeerId, client_version::ClientCapabilities}; +use rand::{RngCore, seq::IteratorRandom}; use rlp::RlpStream; -use sync_io::SyncIo; -use types::{blockchain_info::BlockChainInfo, transaction::SignedTransaction, BlockNumber}; + +use crate::chain::propagator_statistics::SyncPropagatorStatistics; use super::sync_packet::SyncPacket::{self, *}; use super::{ - random, ChainSync, ETH_PROTOCOL_VERSION_65, MAX_PEERS_PROPAGATION, MAX_PEER_LAG_PROPAGATION, - MAX_TRANSACTION_PACKET_SIZE, MIN_PEERS_PROPAGATION, + ChainSync, MAX_PEER_LAG_PROPAGATION, MAX_PEERS_PROPAGATION, MAX_TRANSACTION_PACKET_SIZE, + MIN_PEERS_PROPAGATION, random, }; use ethcore_miner::pool::VerifiedTransaction; use std::sync::Arc; const NEW_POOLED_HASHES_LIMIT: usize = 4096; +const MAX_TRACE_PROPAGATED_TXS: usize = 20; + /// The Chain Sync Propagator: propagates data to peers -pub struct SyncPropagator; +// pub struct SyncPropagator<'a> { + +// //sync: ChainSync +// } + +// SyncPropagator for +impl ChainSync { + // fn new( ) -> Self { + // SyncPropagator { + // statistics: SyncPropagatorStatistics::default(), + // sync + // } + // } -impl SyncPropagator { // t_nb 11.4.3 propagates latest block to a set of peers - pub fn propagate_blocks( - sync: &mut ChainSync, + pub(crate) fn propagate_blocks( + &mut self, chain_info: &BlockChainInfo, io: &mut dyn SyncIo, blocks: &[H256], peers: &[PeerId], ) -> usize { + if peers.len() == 0 { + return 0; + } trace!(target: "sync", "Sending NewBlocks to {:?}", peers); let sent = peers.len(); let mut send_packet = |io: &mut dyn SyncIo, rlp: Bytes| { for peer_id in peers { - SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); + self.statistics + .log_propagated_block(io, *peer_id, blocks.len(), rlp.len()); + + let send_result = ChainSync::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); - if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { - peer.latest_hash = chain_info.best_block_hash.clone(); + if send_result.is_ok() { + if let Some(ref mut peer) = self.peers.get_mut(peer_id) { + peer.latest_hash = chain_info.best_block_hash.clone(); + } } } }; @@ -74,12 +103,15 @@ impl SyncPropagator { } // t_nb 11.4.2 propagates new known hashes to all peers - pub fn propagate_new_hashes( - sync: &mut ChainSync, + pub(crate) fn propagate_new_hashes( + &mut self, chain_info: &BlockChainInfo, io: &mut dyn SyncIo, peers: &[PeerId], ) -> usize { + if peers.len() == 0 { + return 0; + } trace!(target: "sync", "Sending NewHashes to {:?}", peers); let last_parent = *io.chain().best_block_header().parent_hash(); let best_block_hash = chain_info.best_block_hash; @@ -91,17 +123,33 @@ impl SyncPropagator { let sent = peers.len(); for peer_id in peers { - if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { + if let Some(ref mut peer) = self.peers.get_mut(peer_id) { peer.latest_hash = best_block_hash; } - SyncPropagator::send_packet(io, *peer_id, NewBlockHashesPacket, rlp.clone()); + let _ = ChainSync::send_packet(io, *peer_id, NewBlockHashesPacket, rlp.clone()); } sent } /// propagates new transactions to all peers - pub fn propagate_new_transactions bool>( - sync: &mut ChainSync, + pub(crate) fn propagate_new_ready_transactions(&mut self, io: &mut dyn SyncIo) { + debug!(target: "sync", "propagate_new_ready_transactions"); + + let deadline = Instant::now() + Duration::from_millis(500); + + self.propagate_ready_transactions(io, || { + if deadline > Instant::now() { + true + } else { + debug!(target: "sync", "Wasn't able to finish transaction propagation within a deadline."); + false + } + }); + } + + /// propagates new transactions to all peers + pub(crate) fn propagate_new_transactions bool>( + &mut self, io: &mut dyn SyncIo, tx_hashes: Vec, should_continue: F, @@ -112,20 +160,20 @@ impl SyncPropagator { .filter_map(|hash| io.chain().transaction(hash)) .collect() }; - SyncPropagator::propagate_transactions(sync, io, transactions, true, should_continue) + self.propagate_transactions(io, transactions, true, should_continue) } - pub fn propagate_ready_transactions bool>( - sync: &mut ChainSync, + fn propagate_ready_transactions bool>( + &mut self, io: &mut dyn SyncIo, should_continue: F, ) -> usize { let transactions = |io: &dyn SyncIo| io.chain().transactions_to_propagate(); - SyncPropagator::propagate_transactions(sync, io, transactions, false, should_continue) + self.propagate_transactions(io, transactions, false, should_continue) } fn propagate_transactions_to_peers bool>( - sync: &mut ChainSync, + &mut self, io: &mut dyn SyncIo, peers: Vec, transactions: Vec<&SignedTransaction>, @@ -136,33 +184,33 @@ impl SyncPropagator { .iter() .map(|tx| tx.hash()) .collect::(); - let all_transactions_rlp = { - let mut packet = RlpStream::new_list(transactions.len()); - for tx in &transactions { - tx.rlp_append(&mut packet); - } - packet.out() - }; - let all_transactions_hashes_rlp = - rlp::encode_list(&all_transactions_hashes.iter().copied().collect::>()); let block_number = io.chain().chain_info().best_block_number; if are_new { - sync.transactions_stats - .retain_new(block_number, sync.new_transactions_stats_period); + self.transactions_stats + .retain_new(block_number, self.new_transactions_stats_period); } else { - sync.transactions_stats + self.transactions_stats .retain_pending(&all_transactions_hashes); } + debug!(target: "sync", "Propagating {} transactions to {} peers", transactions.len(), peers.len()); + if all_transactions_hashes.len() > MAX_TRACE_PROPAGATED_TXS { + trace!(target: "sync", "Propagating {:?}", all_transactions_hashes .iter().choose_multiple(&mut rand::thread_rng(), MAX_TRACE_PROPAGATED_TXS)); + } else { + trace!(target: "sync", "Propagating {:?}", all_transactions_hashes); + }; + let send_packet = |io: &mut dyn SyncIo, + stats: &mut SyncPropagatorStatistics, peer_id: PeerId, is_hashes: bool, sent: usize, rlp: Bytes| { let size = rlp.len(); - SyncPropagator::send_packet( + + let send_result = ChainSync::send_packet( io, peer_id, if is_hashes { @@ -172,7 +220,15 @@ impl SyncPropagator { }, rlp, ); - trace!(target: "sync", "{:02} <- {} ({} entries; {} bytes)", peer_id, if is_hashes { "NewPooledTransactionHashes" } else { "Transactions" }, sent, size); + + if send_result.is_ok() { + if is_hashes { + stats.log_propagated_hashes(sent, size); + } else { + stats.log_propagated_transactions(sent, size); + } + trace!(target: "sync", "{:02} <- {} ({} entries; {} bytes)", peer_id, if is_hashes { "NewPooledTransactionHashes" } else { "Transactions" }, sent, size); + } }; let mut sent_to_peers = HashSet::new(); @@ -185,29 +241,49 @@ impl SyncPropagator { return sent_to_peers; } - let stats = &mut sync.transactions_stats; - let peer_info = sync.peers.get_mut(&peer_id) - .expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed"); + let stats = &mut self.transactions_stats; + let peer_info = self.peers.get_mut(&peer_id) + .expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed"); - let is_hashes = peer_info.protocol_version >= ETH_PROTOCOL_VERSION_65.0; + let (id, is_hashes) = if let Some(session_info) = io.peer_session_info(peer_id) { + ( + session_info.id, + session_info.is_pooled_transactions_capable(), + ) + } else { + warn!(target: "sync", "no peer session info available: could not detect if peer is capable of eip-2464 transaction gossiping"); + continue; + }; // Send all transactions, if the peer doesn't know about anything if peer_info.last_sent_transactions.is_empty() { // update stats for hash in &all_transactions_hashes { - let id = io.peer_session_info(peer_id).and_then(|info| info.id); stats.propagated(hash, are_new, id, block_number); } peer_info.last_sent_transactions = all_transactions_hashes.clone(); let rlp = { if is_hashes { - all_transactions_hashes_rlp.clone() + rlp::encode_list( + &all_transactions_hashes.iter().copied().collect::>(), + ) } else { - all_transactions_rlp.clone() + let mut packet = RlpStream::new_list(transactions.len()); + for tx in &transactions { + tx.rlp_append(&mut packet); + } + packet.out() } }; - send_packet(io, peer_id, is_hashes, all_transactions_hashes.len(), rlp); + send_packet( + io, + &mut self.statistics, + peer_id, + is_hashes, + all_transactions_hashes.len(), + rlp, + ); sent_to_peers.insert(peer_id); max_sent = cmp::max(max_sent, all_transactions_hashes.len()); continue; @@ -254,7 +330,6 @@ impl SyncPropagator { }; // Update stats. - let id = io.peer_session_info(peer_id).and_then(|info| info.id); for hash in &to_send { stats.propagated(hash, are_new, id, block_number); } @@ -264,7 +339,14 @@ impl SyncPropagator { .chain(&to_send) .cloned() .collect(); - send_packet(io, peer_id, is_hashes, to_send.len(), packet.out()); + send_packet( + io, + &mut self.statistics, + peer_id, + is_hashes, + to_send.len(), + packet.out(), + ); sent_to_peers.insert(peer_id); max_sent = cmp::max(max_sent, to_send.len()); } @@ -274,69 +356,101 @@ impl SyncPropagator { } // t_nb 11.4.1 propagate latest blocks to peers - pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut dyn SyncIo, sealed: &[H256]) { + pub(crate) fn propagate_latest_blocks<'a>(&mut self, io: &mut dyn SyncIo, sealed: &[H256]) { let chain_info = io.chain().chain_info(); - if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() + if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let peers = sync.get_lagging_peers(&chain_info); + let peers = self.get_lagging_peers(&chain_info); if sealed.is_empty() { // t_nb 11.4.2 - let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); + let hashes = self.propagate_new_hashes(&chain_info, io, &peers); let peers = ChainSync::select_random_peers(&peers); // t_nb 11.4.3 - let blocks = - SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); + let blocks = self.propagate_blocks(&chain_info, io, sealed, &peers); if blocks != 0 || hashes != 0 { trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); } } else { + // todo: on HBBFT we do not need to send the new sealed blocks to all validators, because + // they can create them themselves by the Consensus engine. + // t_nb 11.4.3 - SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); + self.propagate_blocks(&chain_info, io, sealed, &peers); // t_nb 11.4.2 - SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); + self.propagate_new_hashes(&chain_info, io, &peers); trace!(target: "sync", "Sent sealed block to all peers"); }; } - sync.last_sent_block_number = chain_info.best_block_number; + self.last_sent_block_number = chain_info.best_block_number; } // t_nb 11.4.4 Distribute valid proposed blocks to subset of current peers. (if there is any proposed) - pub fn propagate_proposed_blocks( - sync: &mut ChainSync, - io: &mut dyn SyncIo, - proposed: &[Bytes], - ) { - let peers = sync.get_consensus_peers(); + pub(crate) fn propagate_proposed_blocks(&mut self, io: &mut dyn SyncIo, proposed: &[Bytes]) { + let peers = self.get_consensus_peers(); trace!(target: "sync", "Sending proposed blocks to {:?}", peers); for block in proposed { + // todo: sometimes we get at the receiving end blocks, with mismatched total difficulty, + // so we ignore those blocks on import. + // might that be the case if we are sending more than 1 block here ? + // more about: https://github.com/DMDcoin/diamond-node/issues/61 let rlp = ChainSync::create_block_rlp(block, io.chain().chain_info().total_difficulty); for peer_id in &peers { - SyncPropagator::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); + let _ = ChainSync::send_packet(io, *peer_id, NewBlockPacket, rlp.clone()); } } } /// Broadcast consensus message to peers. - pub fn propagate_consensus_packet(sync: &mut ChainSync, io: &mut dyn SyncIo, packet: Bytes) { - let lucky_peers = ChainSync::select_random_peers(&sync.get_consensus_peers()); + pub(crate) fn propagate_consensus_packet(&mut self, io: &mut dyn SyncIo, packet: Bytes) { + let lucky_peers = ChainSync::select_random_peers(&self.get_consensus_peers()); trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); + + let mut num_sent_messages = 0; for peer_id in lucky_peers { - SyncPropagator::send_packet(io, peer_id, ConsensusDataPacket, packet.clone()); + let send_result = + ChainSync::send_packet(io, peer_id, ConsensusDataPacket, packet.clone()); + if let Err(e) = send_result { + info!(target: "sync", "Error broadcast consensus packet to peer {}: {:?}", peer_id, e); + } else { + num_sent_messages += 1; + } } + + self.statistics + .log_consensus_broadcast(num_sent_messages, packet.len()); } - pub fn send_consensus_packet( - _sync: &mut ChainSync, + /// Sends a packet to a specific peer. + /// The caller has to take care about Errors, and reshedule if an error occurs. + pub(crate) fn send_consensus_packet( + &mut self, io: &mut dyn SyncIo, packet: Bytes, - peer_id: usize, - ) { - SyncPropagator::send_packet(io, peer_id, ConsensusDataPacket, packet.clone()); + peer: &H512, + ) -> Result<(), Error> { + let peer_id = match io.node_id_to_peer_id(peer) { + Some(id) => id, + None => { + debug!(target: "sync", "Cannot send consensus message: Peer with node id {} seems not to be connected.", peer); + return Err(ErrorKind::PeerNotFound.into()); + } + }; + let packet_len = packet.len(); + let send_result = ChainSync::send_packet(io, peer_id, ConsensusDataPacket, packet.clone()); + match send_result { + Ok(_) => { + self.statistics.log_consensus(packet_len); + } + Err(e) => { + return Err(e); + } + } + return send_result; } - fn select_peers_for_transactions(sync: &ChainSync, filter: F, are_new: bool) -> Vec + fn select_peers_for_transactions(&self, filter: F, are_new: bool) -> Vec where F: Fn(&PeerId) -> bool, { @@ -348,12 +462,12 @@ impl SyncPropagator { let mut random = random::new(); // sqrt(x)/x scaled to max u32 let fraction = - ((sync.peers.len() as f64).powf(-0.5) * (u32::max_value() as f64).round()) as u32; - let small = sync.peers.len() < MIN_PEERS_PROPAGATION; + ((self.peers.len() as f64).powf(-0.5) * (u32::max_value() as f64).round()) as u32; + let small = self.peers.len() < MIN_PEERS_PROPAGATION; Box::new(move |_| small || random.next_u32() < fraction) }; - sync.peers + self.peers .keys() .cloned() .filter(filter) @@ -363,21 +477,24 @@ impl SyncPropagator { } /// Generic packet sender - pub fn send_packet( + pub(crate) fn send_packet( sync: &mut dyn SyncIo, peer_id: PeerId, packet_id: SyncPacket, packet: Bytes, - ) { - if let Err(e) = sync.send(peer_id, packet_id, packet) { + ) -> Result<(), Error> { + let result = sync.send(peer_id, packet_id, packet); + if let Err(e) = &result { debug!(target:"sync", "Error sending packet: {:?}", e); sync.disconnect_peer(peer_id); } + + return result; } /// propagates new transactions to all peers fn propagate_transactions<'a, F, G>( - sync: &mut ChainSync, + &mut self, io: &mut dyn SyncIo, get_transactions: G, are_new: bool, @@ -387,8 +504,10 @@ impl SyncPropagator { F: FnMut() -> bool, G: Fn(&dyn SyncIo) -> Vec>, { + trace!(target:"sync", "propagate_transactions"); + // Early out if nobody to send to. - if sync.peers.is_empty() { + if self.peers.is_empty() { return 0; } @@ -409,9 +528,8 @@ impl SyncPropagator { // usual transactions could be propagated to all peers let mut affected_peers = HashSet::new(); if !transactions.is_empty() { - let peers = SyncPropagator::select_peers_for_transactions(sync, |_| true, are_new); - affected_peers = SyncPropagator::propagate_transactions_to_peers( - sync, + let peers = ChainSync::select_peers_for_transactions(self, |_| true, are_new); + affected_peers = self.propagate_transactions_to_peers( io, peers, transactions, @@ -421,22 +539,19 @@ impl SyncPropagator { } // most of times service_transactions will be empty - // => there's no need to merge packets + // => we still need to merge packets if !service_transactions.is_empty() { - let service_transactions_peers = SyncPropagator::select_peers_for_transactions( - sync, + let service_transactions_peers = self.select_peers_for_transactions( |peer_id| io.peer_version(*peer_id).accepts_service_transaction(), are_new, ); - let service_transactions_affected_peers = - SyncPropagator::propagate_transactions_to_peers( - sync, - io, - service_transactions_peers, - service_transactions, - are_new, - &mut should_continue, - ); + let service_transactions_affected_peers = self.propagate_transactions_to_peers( + io, + service_transactions_peers, + service_transactions, + are_new, + &mut should_continue, + ); affected_peers.extend(&service_transactions_affected_peers); } @@ -446,18 +561,15 @@ impl SyncPropagator { #[cfg(test)] mod tests { + use crate::tests::{helpers::TestIo, snapshot::TestSnapshotService}; use ethcore::client::{BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient}; use parking_lot::RwLock; - use rlp::Rlp; use std::collections::VecDeque; - use tests::{helpers::TestIo, snapshot::TestSnapshotService}; - use types::transaction::TypedTransaction; use super::{ super::{tests::*, *}, *, }; - use ethcore::ethereum::new_london_test; #[test] fn sends_new_hashes_to_lagging_peer() { @@ -470,8 +582,7 @@ mod tests { let mut io = TestIo::new(&mut client, &ss, &queue, None); let peers = sync.get_lagging_peers(&chain_info); - let peer_count = - SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); + let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); // 1 message should be send assert_eq!(1, io.packets.len()); @@ -491,8 +602,7 @@ mod tests { let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); let peers = sync.get_lagging_peers(&chain_info); - let peer_count = - SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); + let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); // 1 message should be send assert_eq!(1, io.packets.len()); @@ -513,13 +623,7 @@ mod tests { let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); let peers = sync.get_lagging_peers(&chain_info); - let peer_count = SyncPropagator::propagate_blocks( - &mut sync, - &chain_info, - &mut io, - &[hash.clone()], - &peers, - ); + let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); // 1 message should be send assert_eq!(1, io.packets.len()); @@ -563,7 +667,7 @@ mod tests { ); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - SyncPropagator::propagate_proposed_blocks(&mut sync, &mut io, &[block]); + sync.propagate_proposed_blocks(&mut io, &[block]); // 1 message should be sent assert_eq!(1, io.packets.len()); @@ -571,6 +675,7 @@ mod tests { assert_eq!(0x07, io.packets[0].packet_id); } + #[cfg(feature = "devP2PTests")] #[test] fn propagates_ready_transactions() { let mut client = TestBlockChainClient::new(); @@ -580,13 +685,13 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count = sync.propagate_ready_transactions(&mut io, || true); // Try to propagate same transactions for the second time - let peer_count2 = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count2 = sync.propagate_ready_transactions(&mut io, || true); // Even after new block transactions should not be propagated twice sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); // Try to propagate same transactions for the third time - let peer_count3 = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count3 = sync.propagate_ready_transactions(&mut io, || true); // 1 message should be send assert_eq!(1, io.packets.len()); @@ -598,6 +703,7 @@ mod tests { assert_eq!(0x02, io.packets[0].packet_id); } + #[cfg(feature = "devP2PTests")] #[test] fn propagates_ready_transactions_to_subset_of_peers() { let mut client = TestBlockChainClient::new(); @@ -610,12 +716,13 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count = sync.propagate_ready_transactions(&mut io, || true); // Currently random implementation for test returns 8 peers as result of peers selection. assert_eq!(8, peer_count); } + #[cfg(feature = "devP2PTests")] #[test] fn propagates_new_transactions_to_all_peers() { let (new_transaction_hashes_tx, new_transaction_hashes_rx) = crossbeam_channel::unbounded(); @@ -631,12 +738,12 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); assert_eq!(25, peer_count); } + #[cfg(feature = "devP2PTests")] #[test] fn propagates_new_transactions() { let (new_transaction_hashes_tx, new_transaction_hashes_rx) = crossbeam_channel::unbounded(); @@ -653,16 +760,13 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); // Try to propagate same transactions for the second time - let peer_count2 = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count2 = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); // Even after new block transactions should not be propagated twice sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); // Try to propagate same transactions for the third time - let peer_count3 = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count3 = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); // 1 message should be send assert_eq!(1, io.packets.len()); @@ -674,6 +778,7 @@ mod tests { assert_eq!(0x02, io.packets[0].packet_id); } + #[cfg(feature = "devP2PTests")] #[test] fn does_not_propagate_ready_transactions_after_new_block() { let mut client = TestBlockChainClient::new(); @@ -683,7 +788,7 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count = sync.propagate_ready_transactions(&mut io, || true); io.chain.insert_transaction_to_queue(); // New block import should not trigger propagation. // (we only propagate on timeout) @@ -697,6 +802,7 @@ mod tests { assert_eq!(0x02, io.packets[0].packet_id); } + #[cfg(feature = "devP2PTests")] #[test] fn does_not_propagate_new_transactions_after_new_block() { let (new_transaction_hashes_tx, new_transaction_hashes_rx) = crossbeam_channel::unbounded(); @@ -713,8 +819,7 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); io.chain.insert_transaction_to_queue(); // New block import should not trigger propagation. // (we only propagate on timeout) @@ -724,8 +829,14 @@ mod tests { assert_eq!(1, io.packets.len()); // 1 peer should receive the message assert_eq!(1, peer_count); + + // depending on ETH_PROTOCOL_VERSION_65.0, it sends here either TRANSACTION_PACK or NewPooledTransactionHashesPacket + // as for diamond Node // TRANSACTIONS_PACKET - assert_eq!(0x02, io.packets[0].packet_id); + assert!( + io.packets[0].packet_id == SyncPacket::NewPooledTransactionHashesPacket as u8 + || io.packets[0].packet_id == SyncPacket::TransactionsPacket as u8 + ); } #[test] @@ -741,14 +852,12 @@ mod tests { let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); - let peer_count_new = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count = sync.propagate_ready_transactions(&mut io, || true); + let peer_count_new = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); // Try to propagate same transactions for the second time - let peer_count2 = SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); - let peer_count_new2 = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count2 = sync.propagate_ready_transactions(&mut io, || true); + let peer_count_new2 = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); assert_eq!(0, io.packets.len()); assert_eq!(0, peer_count); @@ -757,6 +866,7 @@ mod tests { assert_eq!(0, peer_count_new2); } + #[cfg(feature = "devP2PTests")] #[test] fn propagates_transactions_without_alternating() { let mut client = TestBlockChainClient::new(); @@ -768,8 +878,7 @@ mod tests { // should sent some { let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = - SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count = sync.propagate_ready_transactions(&mut io, || true); assert_eq!(1, io.packets.len()); assert_eq!(1, peer_count); } @@ -778,11 +887,9 @@ mod tests { let (peer_count2, peer_count3) = { let mut io = TestIo::new(&mut client, &ss, &queue, None); // Propagate new transactions - let peer_count2 = - SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count2 = sync.propagate_ready_transactions(&mut io, || true); // And now the peer should have all transactions - let peer_count3 = - SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + let peer_count3 = sync.propagate_ready_transactions(&mut io, || true); (peer_count2, peer_count3) }; @@ -796,6 +903,7 @@ mod tests { assert_eq!(0x02, queue.read()[1].packet_id); } + #[cfg(feature = "devP2PTests")] #[test] fn should_maintain_transactions_propagation_stats() { let (new_transaction_hashes_tx, new_transaction_hashes_rx) = crossbeam_channel::unbounded(); @@ -814,13 +922,13 @@ mod tests { { let mut io = TestIo::new(&mut client, &ss, &queue, None); - SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + sync.propagate_ready_transactions(&mut io, || true); } let tx_hash2 = client.insert_transaction_to_queue(); { let mut io = TestIo::new(&mut client, &ss, &queue, None); - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash2], || true); + sync.propagate_new_transactions(&mut io, vec![tx_hash2], || true); } let stats = sync.pending_transactions_stats(); @@ -846,6 +954,7 @@ mod tests { ); } + #[cfg(feature = "devP2PTests")] #[test] fn should_propagate_service_transaction_to_selected_peers_only() { let mut client = TestBlockChainClient::new(); @@ -869,20 +978,23 @@ mod tests { .insert(3, "OpenEthereum/ABCDEFGH/v2.7.3/linux/rustc".to_owned()); // and new service transaction is propagated to peers - SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + sync.propagate_ready_transactions(&mut io, || true); // peer#2 && peer#3 are receiving service transaction - assert!(io - .packets - .iter() - .any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET - assert!(io - .packets - .iter() - .any(|p| p.packet_id == 0x02 && p.recipient == 3)); // TRANSACTIONS_PACKET + assert!( + io.packets + .iter() + .any(|p| p.packet_id == 0x02 && p.recipient == 2) + ); // TRANSACTIONS_PACKET + assert!( + io.packets + .iter() + .any(|p| p.packet_id == 0x02 && p.recipient == 3) + ); // TRANSACTIONS_PACKET assert_eq!(io.packets.len(), 2); } + #[cfg(feature = "devP2PTests")] #[test] fn should_propagate_service_transaction_is_sent_as_separate_message() { let mut client = TestBlockChainClient::new(); @@ -900,7 +1012,7 @@ mod tests { .insert(1, "OpenEthereum/v2.6.0/linux/rustc".to_owned()); // and service + non-service transactions are propagated to peers - SyncPropagator::propagate_ready_transactions(&mut sync, &mut io, || true); + sync.propagate_ready_transactions(&mut io, || true); // two separate packets for peer are queued: // 1) with non-service-transaction @@ -930,6 +1042,7 @@ mod tests { assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash)); } + #[cfg(feature = "devP2PTests")] #[test] fn should_propagate_transactions_with_max_fee_per_gas_lower_than_base_fee() { let (new_transaction_hashes_tx, new_transaction_hashes_rx) = crossbeam_channel::unbounded(); @@ -948,8 +1061,7 @@ mod tests { let ss = TestSnapshotService::new(); let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = - SyncPropagator::propagate_new_transactions(&mut sync, &mut io, vec![tx_hash], || true); + let peer_count = sync.propagate_new_transactions(&mut io, vec![tx_hash], || true); assert_eq!(1, io.packets.len()); assert_eq!(1, peer_count); diff --git a/crates/ethcore/sync/src/chain/propagator_statistics.rs b/crates/ethcore/sync/src/chain/propagator_statistics.rs new file mode 100644 index 0000000000..06274584b3 --- /dev/null +++ b/crates/ethcore/sync/src/chain/propagator_statistics.rs @@ -0,0 +1,212 @@ +use std::collections::HashMap; + +use stats::PrometheusMetrics; + +use crate::sync_io::SyncIo; + +#[derive(Default)] +pub struct SyncPropagatorStatistics { + logging_enabled: bool, + logging_peer_details_enabled: bool, + propagated_blocks: i64, + propagated_blocks_bytes: i64, + + consensus_bytes: i64, + consensus_packages: i64, + + consensus_broadcast_bytes: i64, + consensus_broadcast_packages: i64, + + transactions_propagated: i64, + transactions_propagated_bytes: i64, + + transaction_hashes_propagated: i64, + transaction_hashes_propagated_bytes: i64, + + responded_transactions_bytes: i64, + responded_transactions: i64, + + node_statistics: HashMap, +} + +struct SyncPropagatorNodeStatistics { + address: String, + propagated_blocks: i64, + propagated_blocks_bytes: i64, +} + +impl SyncPropagatorStatistics { + pub fn new() -> Self { + SyncPropagatorStatistics { + logging_enabled: true, + logging_peer_details_enabled: true, + ..Default::default() + } + } + + pub fn logging_enabled(&self) -> bool { + return self.logging_enabled; + } + + pub fn log_propagated_block( + &mut self, + io: &mut dyn SyncIo, + peer_id: usize, + blocks: usize, + bytes: usize, + ) { + if self.logging_enabled() { + self.propagated_blocks += blocks as i64; + self.propagated_blocks_bytes += bytes as i64; + + if self.logging_peer_details_enabled { + if let Some(peer_info) = io.peer_session_info(peer_id) { + //let mut node_statistics = &self.node_statistics; + let node_statistics = self + .node_statistics + .entry(peer_info.remote_address.clone()) + .or_insert_with(|| SyncPropagatorNodeStatistics { + address: peer_info.remote_address, + propagated_blocks: 0, + propagated_blocks_bytes: 0, + }); + + node_statistics.propagated_blocks += blocks as i64; + + node_statistics.propagated_blocks_bytes += bytes as i64; + } + } + } + } + + pub(crate) fn log_consensus(&mut self, bytelen: usize) { + if self.logging_enabled { + self.consensus_bytes += bytelen as i64; + self.consensus_packages += 1; + } + } + + pub(crate) fn log_consensus_broadcast(&mut self, num_peers: usize, bytes_len: usize) { + if self.logging_enabled { + self.consensus_broadcast_bytes += (bytes_len * num_peers) as i64; + self.consensus_broadcast_packages += num_peers as i64; + } + } + + pub(crate) fn log_propagated_hashes(&mut self, sent: usize, size: usize) { + if self.logging_enabled { + self.transaction_hashes_propagated += sent as i64; + self.transaction_hashes_propagated_bytes += size as i64; + } + } + + pub(crate) fn log_propagated_transactions(&mut self, sent: usize, size: usize) { + if self.logging_enabled { + self.transactions_propagated += sent as i64; + self.transactions_propagated_bytes += size as i64; + } + } + + pub(crate) fn log_requested_transactions_response( + &mut self, + num_txs: usize, + bytes_sent: usize, + ) { + if self.logging_enabled { + self.responded_transactions_bytes += bytes_sent as i64; + self.responded_transactions += num_txs as i64; + } + } +} + +impl PrometheusMetrics for SyncPropagatorStatistics { + fn prometheus_metrics(&self, registry: &mut stats::PrometheusRegistry) { + registry.register_counter( + "p2p_propagated_blocks", + "blocks sent", + self.propagated_blocks, + ); + registry.register_counter( + "p2p_propagated_blocks_bytes", + "block byte sent", + self.propagated_blocks_bytes, + ); + + registry.register_counter( + "p2p_cons_bytes", + "consensus bytes sent", + self.consensus_bytes, + ); + + registry.register_counter( + "p2p_cons_package", + "consensus packages sent", + self.consensus_packages, + ); + + registry.register_counter( + "p2p_cons_broadcast_bytes", + "consensus bytes broadcasted", + self.consensus_broadcast_bytes, + ); + + registry.register_counter( + "p2p_cons_broadcast_packages", + "total number consensus packages send through broadcast", + self.consensus_broadcast_packages, + ); + + registry.register_counter( + "p2p_propagated_txs", + "transactions propagated", + self.transactions_propagated, + ); + + registry.register_counter( + "p2p_propagated_txs_bytes", + "transactions propagated (byte size)", + self.transactions_propagated_bytes, + ); + + registry.register_counter( + "p2p_propagated_hashes", + "transaction hashes propagated", + self.transaction_hashes_propagated, + ); + + registry.register_counter( + "p2p_propagated_hashes_bytes", + "transaction hashes propagated (byte size)", + self.transaction_hashes_propagated_bytes, + ); + + registry.register_counter( + "p2p_responded_transactions", + "number of responded transactions", + self.responded_transactions, + ); + + registry.register_counter( + "p2p_responded_transactions_bytes", + "bytes of responded transactions", + self.responded_transactions_bytes, + ); + + self.node_statistics + .iter() + .for_each(|(_, node_statistics)| { + registry.register_gauge_with_other_node_label( + "p2p_propagated_blocks_peer", + "# blocks to peer", + &node_statistics.address, + node_statistics.propagated_blocks, + ); + registry.register_gauge_with_other_node_label( + "p2p_propagated_bytes_peer", + "bytes to peer", + &node_statistics.address, + node_statistics.propagated_blocks_bytes, + ); + }); + } +} diff --git a/crates/ethcore/sync/src/chain/request_id.rs b/crates/ethcore/sync/src/chain/request_id.rs index 29f24db8dd..3715213b29 100644 --- a/crates/ethcore/sync/src/chain/request_id.rs +++ b/crates/ethcore/sync/src/chain/request_id.rs @@ -1,8 +1,8 @@ -use bytes::Bytes; -use chain::{ - sync_packet::{PacketInfo, SyncPacket}, +use crate::chain::{ ChainSync, PeerInfo, + sync_packet::{PacketInfo, SyncPacket}, }; +use bytes::Bytes; use network::PeerId; use rlp::{DecoderError, Rlp, RlpStream}; diff --git a/crates/ethcore/sync/src/chain/requester.rs b/crates/ethcore/sync/src/chain/requester.rs index 5714ad56d2..1e229f8e5e 100644 --- a/crates/ethcore/sync/src/chain/requester.rs +++ b/crates/ethcore/sync/src/chain/requester.rs @@ -14,14 +14,13 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use block_sync::BlockRequest; +use crate::{block_sync::BlockRequest, sync_io::SyncIo, types::BlockNumber}; use bytes::Bytes; use ethereum_types::H256; +use fastmap::H256FastSet; use network::PeerId; use rlp::RlpStream; use std::time::Instant; -use sync_io::SyncIo; -use types::BlockNumber; use super::{ request_id::generate_request_id, @@ -107,26 +106,27 @@ impl SyncRequester { } /// Request pooled transactions from a peer + /// @return number of bytes sent pub fn request_pooled_transactions( sync: &mut ChainSync, io: &mut dyn SyncIo, peer_id: PeerId, - hashes: &[H256], - ) { - trace!(target: "sync", "{} <- GetPooledTransactions: {:?}", peer_id, hashes); + hashes: &H256FastSet, + ) -> usize { + debug!(target: "sync", "{} <- GetPooledTransactions: {:?}", peer_id, hashes); let mut rlp = RlpStream::new_list(hashes.len()); for h in hashes { rlp.append(h); } - SyncRequester::send_request( + return SyncRequester::send_request( sync, io, peer_id, PeerAsking::PooledTransactions, GetPooledTransactionsPacket, rlp.out(), - ) + ); } /// Find some headers or blocks to download for a peer. @@ -238,7 +238,7 @@ impl SyncRequester { asking: PeerAsking, packet_id: SyncPacket, packet: Bytes, - ) { + ) -> usize { if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { if peer.asking != PeerAsking::Nothing { warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); @@ -247,13 +247,16 @@ impl SyncRequester { peer.ask_time = Instant::now(); let (packet, _) = generate_request_id(packet, peer, packet_id); - + let packet_bytes = packet.len(); let result = io.send(peer_id, packet_id, packet); if let Err(e) = result { debug!(target:"sync", "Error sending request: {:?}", e); io.disconnect_peer(peer_id); + return 0; } + return packet_bytes; } + return 0; } } diff --git a/crates/ethcore/sync/src/chain/supplier.rs b/crates/ethcore/sync/src/chain/supplier.rs index d51d9e2a2f..288cdd84a9 100644 --- a/crates/ethcore/sync/src/chain/supplier.rs +++ b/crates/ethcore/sync/src/chain/supplier.rs @@ -18,29 +18,30 @@ use bytes::Bytes; #[cfg(not(test))] use devp2p::PAYLOAD_SOFT_LIMIT; +use time_utils::DeadlineStopwatch; #[cfg(test)] pub const PAYLOAD_SOFT_LIMIT: usize = 100_000; +use crate::types::{BlockNumber, ids::BlockId}; use enum_primitive::FromPrimitive; use ethereum_types::{H256, H512}; use network::{self, PeerId}; use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; -use std::cmp; -use types::{ids::BlockId, BlockNumber}; +use std::{cmp, time::Duration}; -use sync_io::SyncIo; +use crate::sync_io::SyncIo; use super::{ - request_id::{prepend_request_id, strip_request_id, RequestId}, + request_id::{RequestId, prepend_request_id, strip_request_id}, sync_packet::{PacketInfo, SyncPacket, SyncPacket::*}, }; use super::{ - ChainSync, PacketProcessError, RlpResponseResult, SyncHandler, MAX_BODIES_TO_SEND, - MAX_HEADERS_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND, + ChainSync, MAX_BODIES_TO_SEND, MAX_HEADERS_TO_SEND, MAX_RECEIPTS_HEADERS_TO_SEND, + PacketProcessError, RlpResponseResult, SyncHandler, }; -use chain::MAX_NODE_DATA_TO_SEND; +use crate::chain::MAX_NODE_DATA_TO_SEND; use std::borrow::Borrow; /// The Chain Sync Supplier: answers requests from peers with available data @@ -252,7 +253,11 @@ impl SyncSupplier { } number } - None => return Ok(Some((BlockHeadersPacket, RlpStream::new_list(0)))), //no such header, return nothing + None => { + trace!(target: "sync", "{} -> GetBlockHeaders: no such header {}", peer_id, hash); + //no such header, return nothing + return Ok(Some((BlockHeadersPacket, RlpStream::new_list(0)))); + } } } else { let number = r.val_at::(0)?; @@ -310,20 +315,41 @@ impl SyncSupplier { let mut added = 0; let mut rlp = RlpStream::new(); rlp.begin_unbounded_list(); + let mut not_found = 0; + let mut parse_errors = 0; + + let deadline = DeadlineStopwatch::new(Duration::from_millis(200)); for v in r { if let Ok(hash) = v.as_val::() { - if let Some(tx) = io.chain().queued_transaction(hash) { + // io.chain().transaction(hash) + + if deadline.is_expired() { + debug!(target: "sync", "{} -> GetPooledTransactions: deadline reached, only returning partial result to ", peer_id); + break; + } + + // we do not lock here, if we cannot access the memory at this point in time, + // we will just skip this transaction, otherwise the other peer might wait to long, resulting in a timeout. + // also this solved a potential deadlock situation: + if let Some(tx) = io + .chain() + .transaction_if_readable(&hash, &deadline.time_left()) + { tx.signed().rlp_append(&mut rlp); added += 1; if rlp.len() > PAYLOAD_SOFT_LIMIT { break; } + } else { + not_found += 1; } + } else { + parse_errors += 1; } } rlp.finalize_unbounded_list(); - trace!(target: "sync", "{} -> GetPooledTransactions: returned {} entries", peer_id, added); + debug!(target: "sync", "{} -> GetPooledTransactions: returned {} entries. Not found: {}. unparsable {}", peer_id, added, not_found, parse_errors); Ok(Some((PooledTransactionsPacket, rlp))) } @@ -503,7 +529,10 @@ impl SyncSupplier { #[cfg(test)] mod test { use super::{super::tests::*, *}; - use blocks::SyncHeader; + use crate::{ + blocks::SyncHeader, + tests::{helpers::TestIo, snapshot::TestSnapshotService}, + }; use bytes::Bytes; use ethcore::{ client::{BlockChainClient, EachBlockWith, TestBlockChainClient}, @@ -513,7 +542,6 @@ mod test { use parking_lot::RwLock; use rlp::{Rlp, RlpStream}; use std::{collections::VecDeque, str::FromStr}; - use tests::{helpers::TestIo, snapshot::TestSnapshotService}; #[test] fn return_block_headers() { @@ -535,7 +563,7 @@ mod test { rlp.out() } fn to_header_vec( - rlp: ::chain::RlpResponseResult, + rlp: crate::chain::RlpResponseResult, eip1559_transition: BlockNumber, ) -> Vec { Rlp::new(&rlp.unwrap().unwrap().1.out()) diff --git a/crates/ethcore/sync/src/chain/sync_packet.rs b/crates/ethcore/sync/src/chain/sync_packet.rs index f8964040d2..866ed02e85 100644 --- a/crates/ethcore/sync/src/chain/sync_packet.rs +++ b/crates/ethcore/sync/src/chain/sync_packet.rs @@ -24,7 +24,7 @@ #![allow(unused_doc_comments)] -use api::{ETH_PROTOCOL, PAR_PROTOCOL}; +use crate::api::{ETH_PROTOCOL, PAR_PROTOCOL}; use network::{PacketId, ProtocolId}; // An enum that defines all known packet ids in the context of diff --git a/crates/ethcore/sync/src/lib.rs b/crates/ethcore/sync/src/lib.rs index b288b8892d..78a8cc8044 100644 --- a/crates/ethcore/sync/src/lib.rs +++ b/crates/ethcore/sync/src/lib.rs @@ -28,7 +28,6 @@ extern crate ethcore_io as io; extern crate ethcore_network as network; extern crate ethcore_network_devp2p as devp2p; extern crate ethereum_forkid; -extern crate ethereum_types; extern crate ethkey; extern crate ethstore; extern crate fastmap; @@ -61,7 +60,6 @@ extern crate macros; extern crate log; #[macro_use] extern crate trace_time; -extern crate ethcore_miner; mod block_sync; mod blocks; @@ -75,7 +73,9 @@ mod tests; mod api; -pub use api::*; -pub use chain::{SyncState, SyncStatus}; +pub use crate::{ + api::*, + chain::{SyncState, SyncStatus}, +}; pub use devp2p::validate_node_url; pub use network::{ConnectionDirection, ConnectionFilter, Error, ErrorKind, NonReservedPeerMode}; diff --git a/crates/ethcore/sync/src/snapshot.rs b/crates/ethcore/sync/src/snapshot.rs index b5f33081e7..d614f2d628 100644 --- a/crates/ethcore/sync/src/snapshot.rs +++ b/crates/ethcore/sync/src/snapshot.rs @@ -226,9 +226,11 @@ mod test { let (manifest, mhash, state_chunks, block_chunks) = test_manifest(); snapshot.reset_to(&manifest, &mhash); assert_eq!(snapshot.done_chunks(), 0); - assert!(snapshot - .validate_chunk(&H256::random().as_bytes().to_vec()) - .is_err()); + assert!( + snapshot + .validate_chunk(&H256::random().as_bytes().to_vec()) + .is_err() + ); let requested: Vec = (0..40).map(|_| snapshot.needed_chunk().unwrap()).collect(); assert!(snapshot.needed_chunk().is_none()); diff --git a/crates/ethcore/sync/src/sync_io.rs b/crates/ethcore/sync/src/sync_io.rs index fd811e469e..b6382b40f5 100644 --- a/crates/ethcore/sync/src/sync_io.rs +++ b/crates/ethcore/sync/src/sync_io.rs @@ -14,15 +14,18 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + chain::sync_packet::{PacketInfo, SyncPacket}, + types::BlockNumber, +}; use bytes::Bytes; -use chain::sync_packet::{PacketInfo, SyncPacket}; use ethcore::{client::BlockChainClient, snapshot::SnapshotService}; +use ethereum_types::H512; use network::{ - client_version::ClientVersion, Error, NetworkContext, PacketId, PeerId, ProtocolId, SessionInfo, + Error, NetworkContext, PacketId, PeerId, ProtocolId, SessionInfo, client_version::ClientVersion, }; use parking_lot::RwLock; use std::collections::HashMap; -use types::BlockNumber; /// IO interface for the syncing handler. /// Provides peer connection management and an interface to the blockchain client. @@ -56,6 +59,9 @@ pub trait SyncIo { fn is_expired(&self) -> bool; /// Return sync overlay fn chain_overlay(&self) -> &RwLock>; + + /// Returns the peer ID for a given node id, if a corresponding peer exists. + fn node_id_to_peer_id(&self, node_id: &H512) -> Option; } /// Wraps `NetworkContext` and the blockchain client @@ -130,4 +136,8 @@ impl<'s> SyncIo for NetSyncIo<'s> { fn peer_version(&self, peer_id: PeerId) -> ClientVersion { self.network.peer_client_version(peer_id) } + + fn node_id_to_peer_id(&self, node_id: &H512) -> Option { + self.network.node_id_to_peer_id(node_id) + } } diff --git a/crates/ethcore/sync/src/tests/chain.rs b/crates/ethcore/sync/src/tests/chain.rs index 3cc12729b0..bfbc1d0b74 100644 --- a/crates/ethcore/sync/src/tests/chain.rs +++ b/crates/ethcore/sync/src/tests/chain.rs @@ -15,15 +15,14 @@ // along with OpenEthereum. If not, see . use super::helpers::*; -use chain::SyncState; +use crate::{SyncConfig, WarpSync, chain::SyncState}; use ethcore::client::{ BlockChainClient, BlockId, BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient, }; use std::sync::Arc; -use SyncConfig; -use WarpSync; #[test] +#[cfg(feature = "devP2PTests")] fn two_peers() { ::env_logger::try_init().ok(); let mut net = TestNet::new(3); @@ -38,6 +37,7 @@ fn two_peers() { } #[test] +#[cfg(feature = "devP2PTests")] fn long_chain() { ::env_logger::try_init().ok(); let mut net = TestNet::new(2); @@ -67,10 +67,12 @@ fn takes_few_steps() { net.peer(1).chain.add_blocks(100, EachBlockWith::Uncle); net.peer(2).chain.add_blocks(100, EachBlockWith::Uncle); let total_steps = net.sync(); - assert!(total_steps < 20); + // hotfix for https://github.com/DMDcoin/diamond-node/issues/209 increased the number of steps required to sync. + assert!(total_steps <= 110); } #[test] +#[cfg(feature = "devP2PTests")] fn empty_blocks() { ::env_logger::try_init().ok(); let mut net = TestNet::new(3); @@ -92,6 +94,7 @@ fn empty_blocks() { } #[test] +#[cfg(feature = "devP2PTests")] fn forked() { ::env_logger::try_init().ok(); let mut net = TestNet::new(3); @@ -116,6 +119,7 @@ fn forked() { } #[test] +#[cfg(feature = "devP2PTests")] fn forked_with_misbehaving_peer() { ::env_logger::try_init().ok(); let mut net = TestNet::new(3); @@ -140,6 +144,7 @@ fn forked_with_misbehaving_peer() { } #[test] +#[cfg(feature = "devP2PTests")] fn net_hard_fork() { ::env_logger::try_init().ok(); let ref_client = TestBlockChainClient::new(); @@ -165,6 +170,7 @@ fn net_hard_fork() { } #[test] +#[cfg(feature = "devP2PTests")] fn restart() { ::env_logger::try_init().ok(); let mut net = TestNet::new(3); @@ -195,6 +201,7 @@ fn status_empty() { } #[test] +#[cfg(feature = "devP2PTests")] fn status_packet() { let mut net = TestNet::new(2); net.peer(0).chain.add_blocks(100, EachBlockWith::Uncle); @@ -209,6 +216,7 @@ fn status_packet() { } #[test] +#[cfg(feature = "devP2PTests")] fn propagate_hashes() { let mut net = TestNet::new(6); net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); @@ -236,6 +244,7 @@ fn propagate_hashes() { } #[test] +#[cfg(feature = "devP2PTests")] fn propagate_blocks() { let mut net = TestNet::new(20); net.peer(1).chain.add_blocks(10, EachBlockWith::Uncle); @@ -258,6 +267,7 @@ fn propagate_blocks() { } #[test] +#[cfg(feature = "devP2PTests")] fn restart_on_malformed_block() { ::env_logger::try_init().ok(); let mut net = TestNet::new(2); @@ -287,6 +297,7 @@ fn reject_on_broken_chain() { } #[test] +#[cfg(feature = "devP2PTests")] fn disconnect_on_unrelated_chain() { ::env_logger::try_init().ok(); let mut net = TestNet::new(2); diff --git a/crates/ethcore/sync/src/tests/consensus.rs b/crates/ethcore/sync/src/tests/consensus.rs index c9d441ed1e..ac0229ac5c 100644 --- a/crates/ethcore/sync/src/tests/consensus.rs +++ b/crates/ethcore/sync/src/tests/consensus.rs @@ -15,6 +15,11 @@ // along with OpenEthereum. If not, see . use super::helpers::*; +use crate::{ + SyncConfig, + io::{IoChannel, IoHandler}, + types::transaction::{Action, PendingTransaction, Transaction, TypedTransaction}, +}; use crypto::publickey::{KeyPair, Secret}; use ethcore::{ client::{ChainInfo, ClientIoMessage}, @@ -24,10 +29,7 @@ use ethcore::{ }; use ethereum_types::{Address, U256}; use hash::keccak; -use io::{IoChannel, IoHandler}; use std::sync::Arc; -use types::transaction::{Action, PendingTransaction, Transaction, TypedTransaction}; -use SyncConfig; fn new_tx(secret: &Secret, nonce: U256, chain_id: u64) -> PendingTransaction { let signed = TypedTransaction::Legacy(Transaction { @@ -43,6 +45,7 @@ fn new_tx(secret: &Secret, nonce: U256, chain_id: u64) -> PendingTransaction { } #[test] +#[cfg(feature = "devP2PTests")] fn authority_round() { let s0 = KeyPair::from_secret_slice(keccak("1").as_bytes()).unwrap(); let s1 = KeyPair::from_secret_slice(keccak("0").as_bytes()).unwrap(); diff --git a/crates/ethcore/sync/src/tests/helpers.rs b/crates/ethcore/sync/src/tests/helpers.rs index c5793ab23e..8131754eec 100644 --- a/crates/ethcore/sync/src/tests/helpers.rs +++ b/crates/ethcore/sync/src/tests/helpers.rs @@ -14,36 +14,40 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use api::PAR_PROTOCOL; -use bytes::Bytes; -use chain::{ - sync_packet::{PacketInfo, SyncPacket}, - ChainSync, ForkFilterApi, SyncSupplier, ETH_PROTOCOL_VERSION_66, PAR_PROTOCOL_VERSION_2, +use crate::{ + api::PAR_PROTOCOL, + chain::{ + ChainSync, ETH_PROTOCOL_VERSION_66, ForkFilterApi, PAR_PROTOCOL_VERSION_2, SyncSupplier, + sync_packet::{PacketInfo, SyncPacket}, + }, }; +use bytes::Bytes; use ethcore::{ client::{ BlockChainClient, ChainMessageType, ChainNotify, Client as EthcoreClient, ClientConfig, ClientIoMessage, NewBlocks, TestBlockChainClient, }, + exit::ShutdownManager, miner::Miner, snapshot::SnapshotService, spec::Spec, test_helpers, }; +use crate::{ + io::{IoChannel, IoContext, IoHandler}, + sync_io::SyncIo, + tests::snapshot::*, +}; use ethereum_types::H256; -use io::{IoChannel, IoContext, IoHandler}; -use network::{self, client_version::ClientVersion, PacketId, PeerId, ProtocolId, SessionInfo}; +use network::{self, PacketId, PeerId, ProtocolId, SessionInfo, client_version::ClientVersion}; use parking_lot::RwLock; use std::{ collections::{HashMap, HashSet, VecDeque}, sync::Arc, }; -use sync_io::SyncIo; -use tests::snapshot::*; -use types::BlockNumber; -use SyncConfig; +use crate::{SyncConfig, types::BlockNumber}; pub trait FlushingBlockChainClient: BlockChainClient { fn flush(&self) {} @@ -177,6 +181,10 @@ where fn chain_overlay(&self) -> &RwLock> { &self.overlay } + + fn node_id_to_peer_id(&self, node_id: ðereum_types::H512) -> Option { + return Some(node_id.to_low_u64_le() as PeerId); + } } /// Mock for emulution of async run of new blocks @@ -268,7 +276,7 @@ where fn process_io_message(&self, message: ChainMessageType) { let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); match message { - ChainMessageType::Consensus(data) => { + ChainMessageType::Consensus(_block, data) => { self.sync.write().propagate_consensus_packet(&mut io, data) } } @@ -341,7 +349,7 @@ impl Peer for EthPeer { self.sync.write().maintain_peers(&mut io); self.sync.write().maintain_sync(&mut io); self.sync.write().continue_sync(&mut io); - self.sync.write().propagate_new_transactions(&mut io); + self.sync.write().propagate_new_ready_transactions(&mut io); } fn restart_sync(&self) { @@ -447,6 +455,7 @@ impl TestNet> { test_helpers::new_db(), miner.clone(), channel.clone(), + Arc::new(ShutdownManager::null()), ) .unwrap(); diff --git a/crates/ethcore/sync/src/tests/mod.rs b/crates/ethcore/sync/src/tests/mod.rs index cc6d4b357c..99ae3cc392 100644 --- a/crates/ethcore/sync/src/tests/mod.rs +++ b/crates/ethcore/sync/src/tests/mod.rs @@ -18,6 +18,3 @@ mod chain; mod consensus; pub mod helpers; pub mod snapshot; - -#[cfg(feature = "ipc")] -mod rpc; diff --git a/crates/ethcore/sync/src/tests/rpc.rs b/crates/ethcore/sync/src/tests/rpc.rs deleted file mode 100644 index fd3b1e0440..0000000000 --- a/crates/ethcore/sync/src/tests/rpc.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015-2020 Parity Technologies (UK) Ltd. -// This file is part of OpenEthereum. - -// OpenEthereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// OpenEthereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with OpenEthereum. If not, see . - -use super::super::NetworkConfiguration; -use ipc::binary::{deserialize, serialize}; -use network::NetworkConfiguration as BasicNetworkConfiguration; -use std::convert::From; - -#[test] -fn network_settings_serialize() { - let net_cfg = NetworkConfiguration::from(BasicNetworkConfiguration::new_local()); - let serialized = serialize(&net_cfg).unwrap(); - let deserialized = deserialize::(&serialized).unwrap(); - - assert_eq!(net_cfg.udp_port, deserialized.udp_port); -} diff --git a/crates/ethcore/sync/src/tests/snapshot.rs b/crates/ethcore/sync/src/tests/snapshot.rs index 9cc07c1b3b..354b286540 100644 --- a/crates/ethcore/sync/src/tests/snapshot.rs +++ b/crates/ethcore/sync/src/tests/snapshot.rs @@ -15,6 +15,7 @@ // along with OpenEthereum. If not, see . use super::helpers::*; +use crate::{SyncConfig, WarpSync, types::BlockNumber}; use bytes::Bytes; use ethcore::{ client::EachBlockWith, @@ -24,9 +25,6 @@ use ethereum_types::H256; use hash::keccak; use parking_lot::Mutex; use std::{collections::HashMap, sync::Arc}; -use types::BlockNumber; -use SyncConfig; -use WarpSync; pub struct TestSnapshotService { manifest: Option, @@ -181,6 +179,7 @@ impl SnapshotService for TestSnapshotService { } #[test] +#[cfg(feature = "devP2PTests")] fn snapshot_sync() { ::env_logger::try_init().ok(); let mut config = SyncConfig::default(); diff --git a/crates/ethcore/sync/src/transactions_stats.rs b/crates/ethcore/sync/src/transactions_stats.rs index 5b1b6f57ce..a2c1578b3e 100644 --- a/crates/ethcore/sync/src/transactions_stats.rs +++ b/crates/ethcore/sync/src/transactions_stats.rs @@ -14,14 +14,13 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use api::TransactionStats; +use crate::{api::TransactionStats, types::BlockNumber}; use ethereum_types::{H256, H512}; use fastmap::H256FastMap; use std::{ collections::{HashMap, HashSet}, hash::BuildHasher, }; -use types::BlockNumber; type NodeId = H512; diff --git a/crates/ethcore/types/Cargo.toml b/crates/ethcore/types/Cargo.toml index e0c059a3ed..cbfaf37da9 100644 --- a/crates/ethcore/types/Cargo.toml +++ b/crates/ethcore/types/Cargo.toml @@ -16,7 +16,7 @@ parity-bytes = "0.1" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } parity-util-mem = "0.7" rlp = { version = "0.4.6" } -rlp_derive = { path = "../../util/rlp-derive" } +rlp-derive = { version = "0.2" } unexpected = { path = "../../util/unexpected" } serde = "1.0" serde_json = "1.0" diff --git a/crates/ethcore/types/src/filter.rs b/crates/ethcore/types/src/filter.rs index 5415b447fc..8ecedb0bb4 100644 --- a/crates/ethcore/types/src/filter.rs +++ b/crates/ethcore/types/src/filter.rs @@ -67,17 +67,17 @@ impl Clone for Filter { impl Filter { /// Returns combinations of each address and topic. pub fn bloom_possibilities(&self) -> Vec { - let blooms = match self.address { - Some(ref addresses) if !addresses.is_empty() => addresses + let blooms = match &self.address { + Some(addresses) if !addresses.is_empty() => addresses .iter() - .map(|ref address| Bloom::from(BloomInput::Raw(address.as_bytes()))) + .map(|address| Bloom::from(BloomInput::Raw(address.as_bytes()))) .collect(), _ => vec![Bloom::default()], }; - self.topics.iter().fold(blooms, |bs, topic| match *topic { + self.topics.iter().fold(blooms, |bs, topic| match topic { None => bs, - Some(ref topics) => bs + Some(topics) => bs .into_iter() .flat_map(|bloom| { topics diff --git a/crates/ethcore/types/src/trace_filter.rs b/crates/ethcore/types/src/trace_filter.rs index 66104acb42..eea0d2685c 100644 --- a/crates/ethcore/types/src/trace_filter.rs +++ b/crates/ethcore/types/src/trace_filter.rs @@ -20,7 +20,7 @@ use crate::ids::BlockId; use ethereum_types::Address; use std::ops::Range; -/// Easy to use trace filter. +/// Easy to use crate::trace filter. pub struct Filter { /// Range of filtering. pub range: Range, diff --git a/crates/ethcore/types/src/views/block.rs b/crates/ethcore/types/src/views/block.rs index 99994f0573..0aceeaf433 100644 --- a/crates/ethcore/types/src/views/block.rs +++ b/crates/ethcore/types/src/views/block.rs @@ -44,7 +44,7 @@ impl<'a> BlockView<'a> { /// #[macro_use] /// extern crate common_types as types; /// - /// use types::views::{BlockView}; + /// use crate::types::views::{BlockView}; /// /// fn main() { /// let bytes : &[u8] = &[]; diff --git a/crates/ethcore/types/src/views/body.rs b/crates/ethcore/types/src/views/body.rs index 6772ccd6b5..b7a1ee22d6 100644 --- a/crates/ethcore/types/src/views/body.rs +++ b/crates/ethcore/types/src/views/body.rs @@ -42,7 +42,7 @@ impl<'a> BodyView<'a> { /// #[macro_use] /// extern crate common_types as types; /// - /// use types::views::{BodyView}; + /// use crate::types::views::{BodyView}; /// /// fn main() { /// let bytes : &[u8] = &[]; diff --git a/crates/ethcore/types/src/views/header.rs b/crates/ethcore/types/src/views/header.rs index 69f3175c76..b474def099 100644 --- a/crates/ethcore/types/src/views/header.rs +++ b/crates/ethcore/types/src/views/header.rs @@ -36,7 +36,7 @@ impl<'a> HeaderView<'a> { /// #[macro_use] /// extern crate common_types as types; /// - /// use types::views::{HeaderView}; + /// use crate::types::views::{HeaderView}; /// /// fn main() { /// let bytes : &[u8] = &[]; diff --git a/crates/ethjson/Cargo.toml b/crates/ethjson/Cargo.toml index 436bfef02b..b0d321fe06 100644 --- a/crates/ethjson/Cargo.toml +++ b/crates/ethjson/Cargo.toml @@ -16,6 +16,7 @@ rustc-hex = "1.0" serde = "1.0" serde_json = "1.0" serde_derive = "1.0" +serde_with = { version = "3.6", features = [ "hex", "std", "macros" ] } [dev-dependencies] macros = { path = "../util/macros" } diff --git a/crates/ethjson/src/spec/hbbft.rs b/crates/ethjson/src/spec/hbbft.rs index 10c6a88e71..1401958137 100644 --- a/crates/ethjson/src/spec/hbbft.rs +++ b/crates/ethjson/src/spec/hbbft.rs @@ -17,6 +17,7 @@ //! Hbbft parameter deserialization. use ethereum_types::Address; +use serde_with::serde_as; /// Skip block reward parameter. /// Defines one (potential open) range about skips @@ -32,6 +33,37 @@ pub struct HbbftParamsSkipBlockReward { pub to_block: Option, } +#[serde_as] +#[derive(Debug, PartialEq, Deserialize, Serialize, Clone)] +#[serde(deny_unknown_fields)] +#[serde(rename_all = "camelCase")] +pub struct HbbftNetworkFork { + /// Block number at which the fork starts. + pub block_number_start: u64, + + /// Forks that became finished, require a definition when the take over of the + /// specified validators was finished. + #[serde(default)] + pub block_number_end: Option, + + /// Validator set (public keys) of the fork. + #[serde_as(as = "Vec")] + pub validators: Vec>, + + #[serde_as(as = "Vec")] + pub parts: Vec>, + + #[serde_as(as = "Vec>")] + pub acks: Vec>>, +} + +impl HbbftNetworkFork { + /// Returns true if the fork is finished. + pub fn to_json(&self) -> String { + serde_json::to_string(self).expect("HbbftNetworkFork must convert to JSON") + } +} + /// Hbbft parameters. #[derive(Debug, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] @@ -54,6 +86,13 @@ pub struct HbbftParams { /// Directory where to store the Hbbft Messages. /// Usually only the latest HBBFT messages are interesting for Debug, Analytics or Evidence. pub blocks_to_keep_directory: Option, + /// Hbbft network forks. + /// A Fork defines a new Validator Set. + /// This validator set is becomming pending so it can write it's PARTs and ACKS. + /// From beginning of the fork trigger block until the finality of the key gen transactions, + /// no block verifications are done. + #[serde(default)] + pub forks: Vec, } /// Hbbft engine config. @@ -88,10 +127,64 @@ impl HbbftParams { #[cfg(test)] mod tests { - use super::Hbbft; use ethereum_types::Address; + + use super::Hbbft; use std::str::FromStr; + #[test] + fn hbbft_deserialization_forks() { + let s = r#"{ + "params": { + "minimumBlockTime": 0, + "maximumBlockTime": 600, + "transactionQueueSizeTrigger": 1, + "isUnitTest": true, + "blockRewardContractAddress": "0x2000000000000000000000000000000000000002", + "forks": [ + { + "blockNumberStart" : 777, + "validators": [ + "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678" + ], + "parts": ["19585436b7d97298a751e2a6020c30677497772013001420c0a6aea5790347bdf5531c1387be685a232b01ec614913b18da0a6cbcd1074f1733f902a7eb656e9"], + "acks": [["19585436b7d97298a751e2a6020c30677497772013001420c0a6aea5790347bdf5531c1387be685a232b01ec614913b18da0a6cbcd1074f1733f902a7eb656e9"]] + } + ] + } + }"#; + + let deserialized: Hbbft = serde_json::from_str(s).unwrap(); + assert_eq!(deserialized.params.forks.len(), 1); + assert_eq!( + deserialized + .params + .forks + .get(0) + .expect("") + .block_number_start, + 777 + ); + assert_eq!(deserialized.params.forks.get(0).expect("").parts.len(), 1); + assert_eq!(deserialized.params.forks.get(0).expect("").acks.len(), 1); + assert_eq!( + deserialized.params.forks.get(0).expect("").validators.len(), + 1 + ); + assert_eq!( + deserialized + .params + .forks + .get(0) + .expect("") + .validators + .get(0) + .expect("") + .len(), + 64 + ); + } + #[test] fn hbbft_deserialization() { let s = r#"{ @@ -176,4 +269,26 @@ mod tests { false ); } + + #[test] + fn test_fork_serialisation() { + let fork = super::HbbftNetworkFork { + block_number_start: 10, + block_number_end: Some(100), + validators: vec![vec![1, 2, 3, 4]], + parts: vec![vec![5, 6, 7, 8]], + acks: vec![vec![vec![9, 10, 11, 12]]], + }; + + let json = fork.to_json(); + let deserialized: super::HbbftNetworkFork = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.block_number_start, 10); + assert_eq!(deserialized.block_number_end, Some(100)); + assert_eq!(deserialized.validators.len(), 1); + assert_eq!(deserialized.parts.len(), 1); + assert_eq!(deserialized.acks.len(), 1); + + assert_eq!(deserialized.parts[0][1], 6); + assert_eq!(deserialized.acks[0][0][2], 11); + } } diff --git a/crates/net/fake-fetch/Cargo.toml b/crates/net/fake-fetch/Cargo.toml index 870da27c88..cdf89a54fa 100644 --- a/crates/net/fake-fetch/Cargo.toml +++ b/crates/net/fake-fetch/Cargo.toml @@ -4,6 +4,7 @@ name = "fake-fetch" version = "0.0.1" license = "GPL-3.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] fetch = { path = "../fetch" } diff --git a/crates/net/fetch/Cargo.toml b/crates/net/fetch/Cargo.toml index a094842bcd..4f19bb2d26 100644 --- a/crates/net/fetch/Cargo.toml +++ b/crates/net/fetch/Cargo.toml @@ -1,10 +1,11 @@ [package] description = "HTTP/HTTPS fetching library" -homepage = "https://github.com/openethereum/openethereum" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "fetch" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2018" [dependencies] futures = "0.1" diff --git a/crates/net/network-devp2p/Cargo.toml b/crates/net/network-devp2p/Cargo.toml index bcd80899cd..241d318c82 100644 --- a/crates/net/network-devp2p/Cargo.toml +++ b/crates/net/network-devp2p/Cargo.toml @@ -1,10 +1,11 @@ [package] description = "DevP2P implementation of the ethcore network library" -homepage = "https://github.com/openethereum/openethereum" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore-network-devp2p" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] log = "0.4" @@ -12,11 +13,10 @@ mio = "0.6.8" bytes = "0.4" rand = "0.7.3" tiny-keccak = "1.4" -rust-crypto = "0.2.34" slab = "0.2" -igd = "0.7" +igd = "0.8" libc = "0.2.7" -parking_lot = "0.11.1" +parking_lot = "0.12" ansi_term = "0.10" rustc-hex = "1.0" ethcore-io = { path = "../../runtime/io", features = ["mio"] } @@ -24,7 +24,7 @@ parity-bytes = "0.1" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } ethcore-network = { path = "../network" } ethereum-types = "0.9.2" -ethkey = { path = "../../../crates/accounts/ethkey" } +ethkey = { path = "../../accounts/ethkey" } rlp = { version = "0.4.6" } parity-path = "0.1" ipnetwork = "0.12.6" @@ -35,6 +35,8 @@ serde_json = "1.0" serde_derive = "1.0" error-chain = { version = "0.12", default-features = false } lru-cache = "0.1" +stats = { path = "../../util/stats" } +time-utils = { path = "../../util/time-utils" } [dev-dependencies] env_logger = "0.5" diff --git a/crates/net/network-devp2p/src/connection.rs b/crates/net/network-devp2p/src/connection.rs index 27211d9000..783f1f860e 100644 --- a/crates/net/network-devp2p/src/connection.rs +++ b/crates/net/network-devp2p/src/connection.rs @@ -14,19 +14,21 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + handshake::Handshake, + io::{IoContext, StreamToken}, +}; use bytes::{Buf, BufMut}; use crypto::{ aes::{AesCtr256, AesEcb256}, publickey::Secret, }; use ethereum_types::{H128, H256, H512}; -use handshake::Handshake; use hash::{keccak, write_keccak}; -use io::{IoContext, StreamToken}; use mio::{ + PollOpt, Ready, Token, deprecated::{EventLoop, Handler, TryRead, TryWrite}, tcp::*, - PollOpt, Ready, Token, }; use network::{Error, ErrorKind}; use parity_bytes::*; @@ -41,8 +43,8 @@ use std::{ use tiny_keccak::Keccak; const ENCRYPTED_HEADER_LEN: usize = 32; -const RECEIVE_PAYLOAD: Duration = Duration::from_secs(30); -pub const MAX_PAYLOAD_SIZE: usize = (1 << 24) - 1; +const RECEIVE_PAYLOAD: Duration = Duration::from_secs(60); +pub const MAX_PAYLOAD_SIZE: usize = (1 << 26) - 1; /// Network responses should try not to go over this limit. /// This should be lower than MAX_PAYLOAD_SIZE @@ -304,7 +306,6 @@ pub enum WriteStatus { /// `RLPx` packet pub struct Packet { - pub protocol: u16, pub data: Bytes, } @@ -515,10 +516,7 @@ impl EncryptedConnection { self.decoder .decrypt(&mut payload[..self.payload_len + padding])?; payload.truncate(self.payload_len); - Ok(Packet { - protocol: self.protocol_id, - data: payload, - }) + Ok(Packet { data: payload }) } /// Update MAC after reading or writing any data. @@ -586,7 +584,7 @@ mod tests { }; use super::*; - use io::*; + use crate::io::*; use mio::Ready; use parity_bytes::Bytes; diff --git a/crates/net/network-devp2p/src/discovery.rs b/crates/net/network-devp2p/src/discovery.rs index d3d727ae43..522c25694d 100644 --- a/crates/net/network-devp2p/src/discovery.rs +++ b/crates/net/network-devp2p/src/discovery.rs @@ -14,22 +14,22 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use crypto::publickey::{recover, sign, KeyPair, Secret}; +use crate::node_table::*; +use crypto::publickey::{KeyPair, Secret, recover, sign}; use ethereum_types::{H256, H520}; use hash::keccak; use lru_cache::LruCache; use network::{Error, ErrorKind, IpFilter}; -use node_table::*; use parity_bytes::Bytes; use rlp::{Rlp, RlpStream}; use std::{ - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + collections::{HashMap, HashSet, VecDeque, hash_map::Entry}, default::Default, net::SocketAddr, time::{Duration, Instant, SystemTime, UNIX_EPOCH}, }; -use PROTOCOL_VERSION; +use crate::PROTOCOL_VERSION; const ADDRESS_BYTES_SIZE: usize = 32; // Size of address type in bytes. const ADDRESS_BITS: usize = 8 * ADDRESS_BYTES_SIZE; // Denoted by n in [Kademlia]. @@ -982,7 +982,7 @@ where #[cfg(test)] mod tests { use super::*; - use node_table::{Node, NodeEndpoint, NodeId}; + use crate::node_table::{Node, NodeEndpoint, NodeId}; use std::net::{IpAddr, Ipv4Addr}; use crypto::publickey::{Generator, Random}; @@ -1234,39 +1234,39 @@ mod tests { udp_port: 40447, }; let node_ids_hex: [&str; 32] = [ - "22536fa57acc12c4993295cbc26fef4550513496712b301ad2283d356c8108521244a362e64e6d907a0d0b4e65526699c5ae3cfebfc680505fe3b33d50672835", - "22c482f42401546f8dd7ed6b1c0cad976da6630730f1116614579ccb084791a528ff2676bfe94434de80e5d7e479f1ea1d7737077da3bd5e69a0f3e5bf596091", - "234c73e3a8f6835a7f9a9d2a896bff4908d66d21d5433a2c37d94f1fa9a6ca17d02388f31013ff87e3ad86506e76bd1006b9cac3815974a2b47c8d4f2124697e", - "2a5aaf4e2046c521e890dc82313c6151a55078f045a7e3d259f168238d029271cdd9a0943468d45c1e36a34a8a6d4de4b0262e48d3c8cfdd4c2aab5df42926b9", - "341d8c94d9670461186cfc1f66d4246cb12384940e9f621ec8d6c216b5d037cde5f7a41b70474ca36ced4a4f2fe91c9dc5a24a128414672661f78e8611d54bfd", - "3d9fd01851f3ae1bfd06b48e89738f29f9a2b4dce3ab7864df4fccca55d1ac88044956ba47d0c4cb44a19924626a3a3aa5a4de8958365cb7385111ce7b929200", - "406d5507a7fbc194a495800ae8cf408093336febc24d03d6c63756f522274ab02146ceb1b0213291a9a1544680503837519f88f1e8677d921de62c82935b4e6c", - "4c537f00805f320616ee49c7bc36e1d7e52a04a782b0cc00fd3d6b77200b027cef5f875ed38f1167fef4b02d7bd49a661812301d9d680bb62297131204c035f9", - "4fc8e3fdbdd7acad82b283ac52c121b805f3b15ffcaa6b2ca67b9e375aa88e978951ffa3d03ee13be99f0ee987db0bbfc6a7ca02b175e9123d79826025b4089d", - "55b5042a6910bc908a0520966e8cbcc92ac299bdb7efbfbcf703df1506fa0f9b09c5eeb930080de848d2864cca71f885942852c51233db0ee46fe0447306d61f", - "5d24f28b350c4c37fc4dad7f418e029992c9e4ac356bb3d9a1356ba1076339863c05044d7ceba233c65779401f8a3b38fe67b6a592c1be4834dc869f7bb932eb", - "5f6edaf2f2ae3003f4b4ff90b8e71a717c832c71a634d96e77fe046f9a88adc8de5718ff3c47659aea4cead5376df5b731e1b6530e6b0999f56ad75d4dabd3f6", - "6214c04211efe91abd23d65e2dc8e711b06d4fb13dcfd65b691dc51f58455b2145f9b38f523b72a45a12705a28d389308a34455720d774c9b805326df42b5a63", - "69df92573ddbbce88b72a930843dbb70728b2a020e0cc4e8ba805dcf7f19297bfc5def4ca447e9e6ec66971be1815b8f49042720431f698b6a87a185d94fa6c8", - "72ffc23de007cf8b6f4a117f7427b532d05861c314344ffa265175f57ee45dae041a710a4dc74124dba1dabdc0f52dfd21e3154d1d4285aab529810c6161d623", - "80b567f279a9512f3a66ebd8f87a93acd4d50bf66f5eff6d04039c1f5838e37021e981539659b33e0644b243fc9671209a80cbef40d1bcf7c7117d353cb45532", - "9009dc9e3bf50595f84271f46d4c7a5ad6971f7d2ffce1905bfc40a407d34fc5e2dcebd92746eadcd2c5fa4d5aaccb0e01b542d506b361851df3f19e6bc629a3", - "95264f56e091efeba911003fd01eeb2c81f6fc4bb7b10c92e4c7bfaf460b7246d232e61ad8a223d74870981a84e15b2d5134c25d931cb860c6912b20a2d3ac01", - "96013a472a9f7ff9c5c76b5ca958f14ee510d826703aa41d4c88eac51d30d14229b9f19f6e0469c37aaa6d2136a978a4aaa38ca766f48e53e569f84e44252962", - "a513c988cf8480ad2992caa64e3fa059ce07efda260dfeefed78e1d41ea3f97844603b8a9737eb633086fd9ac2f201200cb656cda8a91bf6cc500d6039db6f53", - "ab3311f38e3641c8b3b1fd36dd7f94b148166e267258e840d29d1859537c74f202bd3342359b3623f96c23fa662d1b65182a898bf20343744b37cb265182e500", - "ac8f41dbd637891a08c9cf715c23577bdd431ba40231682a5a9ba7fd6cb6d66c04f63d6d65c7d9f8737e641e05fdbeede57138a174f0d55e7835575dd6cddd98", - "accdad251888d53e4e18efee1e0d749d050216b14896efb657e9c7b1b78dab82a5b6fb3234017aa19a2f50475d73960f352d308b2e0e841cbebaf418362a4f21", - "b138622208f74d2b8e8fc10bcd4cf3302685cd77d339280a939474b92be8b93e441c50709e25c82cc88a2a4207e9f2938912d60600226efe322b43c6ef5e7aef", - "b4f64e1fa6a5cd6198b2515bde63fbdabaf7e7a31dbaf5369babbda4b8cd0bf5025ac4b7d2d6e6e3bc76c890df585d28d4815e464c8792ef677df9206864a12b", - "c1136e08a27c93812ae2dd47201d9e81c82d1995001b88dba9eec700e1d3385dfaf7ae834226c3c90a138f1808cd10b5502f49ee774a2bc707f34bd7d160b7bd", - "c203ae9b5d1953b0ac462e66338800ec26982e2af54bd444fc8978973191633d4f483e31b28233c07bb99f34d57c680fa5f8e093e64f13b235005b7ab6e2d594", - "c2e1067c58a9948e773e0a3637d946e26d95762f89ec9d35e2ad84f770309d94168d4e112c78d62b60efc6216bc5d31475f24307b1b8e0fa8dcbb18a10cb85f5", - "d60ecb1a89e0d5aeff14c9a95da9f5492eb15871c53563b86b7c5ddf0da74b4c29e682fdd22aae2290e0b16ef4b6d707ef55396ca98f755c95b689cf65ce5f80", - "df5ad4ea6242929df86f2162d1cc62b0e0a6f0a03428a39dea98f6a689335b5ceaf1f0696c17b717b141aeb45a29108d95c3a7d2d1d0bb3441219504ae672917", - "e1268f5dd9552a11989df9d4953bb388e7466711b2bd9882a3ed4d0767a21f046c53c20f9a18d66bae1d6a5544492857ddecb0b5b4818bd4557be252ddd66c71", - "e626019dc0b50b9e254461f19d29e69a4669c5256134a6352c6c30d3bc55d201a5b43fc2e006556cfaf29765b683e807e03093798942826244e4ee9e47c75d3f", - ]; + "22536fa57acc12c4993295cbc26fef4550513496712b301ad2283d356c8108521244a362e64e6d907a0d0b4e65526699c5ae3cfebfc680505fe3b33d50672835", + "22c482f42401546f8dd7ed6b1c0cad976da6630730f1116614579ccb084791a528ff2676bfe94434de80e5d7e479f1ea1d7737077da3bd5e69a0f3e5bf596091", + "234c73e3a8f6835a7f9a9d2a896bff4908d66d21d5433a2c37d94f1fa9a6ca17d02388f31013ff87e3ad86506e76bd1006b9cac3815974a2b47c8d4f2124697e", + "2a5aaf4e2046c521e890dc82313c6151a55078f045a7e3d259f168238d029271cdd9a0943468d45c1e36a34a8a6d4de4b0262e48d3c8cfdd4c2aab5df42926b9", + "341d8c94d9670461186cfc1f66d4246cb12384940e9f621ec8d6c216b5d037cde5f7a41b70474ca36ced4a4f2fe91c9dc5a24a128414672661f78e8611d54bfd", + "3d9fd01851f3ae1bfd06b48e89738f29f9a2b4dce3ab7864df4fccca55d1ac88044956ba47d0c4cb44a19924626a3a3aa5a4de8958365cb7385111ce7b929200", + "406d5507a7fbc194a495800ae8cf408093336febc24d03d6c63756f522274ab02146ceb1b0213291a9a1544680503837519f88f1e8677d921de62c82935b4e6c", + "4c537f00805f320616ee49c7bc36e1d7e52a04a782b0cc00fd3d6b77200b027cef5f875ed38f1167fef4b02d7bd49a661812301d9d680bb62297131204c035f9", + "4fc8e3fdbdd7acad82b283ac52c121b805f3b15ffcaa6b2ca67b9e375aa88e978951ffa3d03ee13be99f0ee987db0bbfc6a7ca02b175e9123d79826025b4089d", + "55b5042a6910bc908a0520966e8cbcc92ac299bdb7efbfbcf703df1506fa0f9b09c5eeb930080de848d2864cca71f885942852c51233db0ee46fe0447306d61f", + "5d24f28b350c4c37fc4dad7f418e029992c9e4ac356bb3d9a1356ba1076339863c05044d7ceba233c65779401f8a3b38fe67b6a592c1be4834dc869f7bb932eb", + "5f6edaf2f2ae3003f4b4ff90b8e71a717c832c71a634d96e77fe046f9a88adc8de5718ff3c47659aea4cead5376df5b731e1b6530e6b0999f56ad75d4dabd3f6", + "6214c04211efe91abd23d65e2dc8e711b06d4fb13dcfd65b691dc51f58455b2145f9b38f523b72a45a12705a28d389308a34455720d774c9b805326df42b5a63", + "69df92573ddbbce88b72a930843dbb70728b2a020e0cc4e8ba805dcf7f19297bfc5def4ca447e9e6ec66971be1815b8f49042720431f698b6a87a185d94fa6c8", + "72ffc23de007cf8b6f4a117f7427b532d05861c314344ffa265175f57ee45dae041a710a4dc74124dba1dabdc0f52dfd21e3154d1d4285aab529810c6161d623", + "80b567f279a9512f3a66ebd8f87a93acd4d50bf66f5eff6d04039c1f5838e37021e981539659b33e0644b243fc9671209a80cbef40d1bcf7c7117d353cb45532", + "9009dc9e3bf50595f84271f46d4c7a5ad6971f7d2ffce1905bfc40a407d34fc5e2dcebd92746eadcd2c5fa4d5aaccb0e01b542d506b361851df3f19e6bc629a3", + "95264f56e091efeba911003fd01eeb2c81f6fc4bb7b10c92e4c7bfaf460b7246d232e61ad8a223d74870981a84e15b2d5134c25d931cb860c6912b20a2d3ac01", + "96013a472a9f7ff9c5c76b5ca958f14ee510d826703aa41d4c88eac51d30d14229b9f19f6e0469c37aaa6d2136a978a4aaa38ca766f48e53e569f84e44252962", + "a513c988cf8480ad2992caa64e3fa059ce07efda260dfeefed78e1d41ea3f97844603b8a9737eb633086fd9ac2f201200cb656cda8a91bf6cc500d6039db6f53", + "ab3311f38e3641c8b3b1fd36dd7f94b148166e267258e840d29d1859537c74f202bd3342359b3623f96c23fa662d1b65182a898bf20343744b37cb265182e500", + "ac8f41dbd637891a08c9cf715c23577bdd431ba40231682a5a9ba7fd6cb6d66c04f63d6d65c7d9f8737e641e05fdbeede57138a174f0d55e7835575dd6cddd98", + "accdad251888d53e4e18efee1e0d749d050216b14896efb657e9c7b1b78dab82a5b6fb3234017aa19a2f50475d73960f352d308b2e0e841cbebaf418362a4f21", + "b138622208f74d2b8e8fc10bcd4cf3302685cd77d339280a939474b92be8b93e441c50709e25c82cc88a2a4207e9f2938912d60600226efe322b43c6ef5e7aef", + "b4f64e1fa6a5cd6198b2515bde63fbdabaf7e7a31dbaf5369babbda4b8cd0bf5025ac4b7d2d6e6e3bc76c890df585d28d4815e464c8792ef677df9206864a12b", + "c1136e08a27c93812ae2dd47201d9e81c82d1995001b88dba9eec700e1d3385dfaf7ae834226c3c90a138f1808cd10b5502f49ee774a2bc707f34bd7d160b7bd", + "c203ae9b5d1953b0ac462e66338800ec26982e2af54bd444fc8978973191633d4f483e31b28233c07bb99f34d57c680fa5f8e093e64f13b235005b7ab6e2d594", + "c2e1067c58a9948e773e0a3637d946e26d95762f89ec9d35e2ad84f770309d94168d4e112c78d62b60efc6216bc5d31475f24307b1b8e0fa8dcbb18a10cb85f5", + "d60ecb1a89e0d5aeff14c9a95da9f5492eb15871c53563b86b7c5ddf0da74b4c29e682fdd22aae2290e0b16ef4b6d707ef55396ca98f755c95b689cf65ce5f80", + "df5ad4ea6242929df86f2162d1cc62b0e0a6f0a03428a39dea98f6a689335b5ceaf1f0696c17b717b141aeb45a29108d95c3a7d2d1d0bb3441219504ae672917", + "e1268f5dd9552a11989df9d4953bb388e7466711b2bd9882a3ed4d0767a21f046c53c20f9a18d66bae1d6a5544492857ddecb0b5b4818bd4557be252ddd66c71", + "e626019dc0b50b9e254461f19d29e69a4669c5256134a6352c6c30d3bc55d201a5b43fc2e006556cfaf29765b683e807e03093798942826244e4ee9e47c75d3f", + ]; let node_entries = node_ids_hex .iter() .map(|node_id_hex| NodeId::from_str(node_id_hex).unwrap()) diff --git a/crates/net/network-devp2p/src/handshake.rs b/crates/net/network-devp2p/src/handshake.rs index c9379e50c1..7c5cc9f45f 100644 --- a/crates/net/network-devp2p/src/handshake.rs +++ b/crates/net/network-devp2p/src/handshake.rs @@ -14,16 +14,18 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use connection::Connection; -use crypto::publickey::{ecdh, ecies, recover, sign, Generator, KeyPair, Public, Random, Secret}; +use crate::{ + connection::Connection, + host::HostInfo, + io::{IoContext, StreamToken}, + node_table::NodeId, +}; +use crypto::publickey::{Generator, KeyPair, Public, Random, Secret, ecdh, ecies, recover, sign}; use ethereum_types::{H256, H520}; -use host::HostInfo; -use io::{IoContext, StreamToken}; use mio::tcp::*; use network::{Error, ErrorKind}; -use node_table::NodeId; use parity_bytes::Bytes; -use rand::{random, Rng}; +use rand::{Rng, random}; use rlp::{Rlp, RlpStream}; use std::time::Duration; @@ -142,7 +144,13 @@ impl Handshake { while let Some(data) = self.connection.readable()? { match self.state { HandshakeState::New => {} - HandshakeState::StartSession => {} + HandshakeState::StartSession => { + error!(target: "network", "starting session, clearing timer for {}", self.connection.token); + if let Err(e) = io.clear_timer(self.connection.token) { + debug!(target: "network", "failed to clear timer for session: {} {e:?}", self.connection.token); + } + break; + } HandshakeState::ReadingAuth => { self.read_auth(io, host.secret(), &data)?; } @@ -156,10 +164,6 @@ impl Handshake { self.read_ack_eip8(host.secret(), &data)?; } } - if self.state == HandshakeState::StartSession { - io.clear_timer(self.connection.token).ok(); - break; - } } Ok(()) } @@ -239,7 +243,7 @@ impl Handshake { where Message: Send + Clone + Sync + 'static, { - trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "{} Received EIP8 handshake auth from {:?}", self.connection.token, self.connection.remote_addr_str()); self.auth_cipher.extend_from_slice(data); let auth = ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])?; let rlp = Rlp::new(&auth); @@ -271,6 +275,7 @@ impl Handshake { self.remote_ephemeral = Public::from_slice(&ack[0..64]); self.remote_nonce = H256::from_slice(&ack[64..(64 + 32)]); self.state = HandshakeState::StartSession; + trace!(target: "network", "handshake completed for from {:?}", self.connection.remote_addr_str()); } Err(_) => { // Try to interpret as EIP-8 packet @@ -288,7 +293,7 @@ impl Handshake { } fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), Error> { - trace!(target: "network", "Received EIP8 handshake auth from {:?}", self.connection.remote_addr_str()); + trace!(target: "network", "{} Received EIP8 handshake auth from {:?}",self.connection.token, self.connection.remote_addr_str()); self.ack_cipher.extend_from_slice(data); let ack = ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..])?; let rlp = Rlp::new(&ack); @@ -381,9 +386,9 @@ impl Handshake { #[cfg(test)] mod test { use super::*; + use crate::io::*; use crypto::publickey::Public; use ethereum_types::{H256, H512}; - use io::*; use mio::tcp::TcpStream; use rustc_hex::FromHex; use std::str::FromStr; diff --git a/crates/net/network-devp2p/src/host.rs b/crates/net/network-devp2p/src/host.rs index e8d846889f..8fb8bae38c 100644 --- a/crates/net/network-devp2p/src/host.rs +++ b/crates/net/network-devp2p/src/host.rs @@ -14,10 +14,13 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + mio::{deprecated::EventLoop, tcp::*, udp::*, *}, + session_container::{SessionContainer, SharedSession}, +}; use crypto::publickey::{Generator, KeyPair, Random, Secret}; use ethereum_types::H256; use hash::keccak; -use mio::{deprecated::EventLoop, tcp::*, udp::*, *}; use rlp::{Encodable, RlpStream}; use std::{ cmp::{max, min}, @@ -29,45 +32,63 @@ use std::{ path::{Path, PathBuf}, str::FromStr, sync::{ - atomic::{AtomicBool, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicBool, AtomicU64, Ordering as AtomicOrdering}, }, time::Duration, }; -use discovery::{Discovery, NodeEntry, TableUpdates, MAX_DATAGRAM_SIZE}; -use io::*; -use ip_utils::{map_external_address, select_public_address}; +use crate::{ + PROTOCOL_VERSION, + discovery::{Discovery, MAX_DATAGRAM_SIZE, NodeEntry, TableUpdates}, + io::*, + ip_utils::{map_external_address, select_public_address}, + node_table::*, + session::SessionData, +}; use network::{ - client_version::ClientVersion, ConnectionDirection, ConnectionFilter, DisconnectReason, Error, - ErrorKind, NetworkConfiguration, NetworkContext as NetworkContextTrait, NetworkIoMessage, + ConnectionDirection, ConnectionFilter, DisconnectReason, Error, ErrorKind, + NetworkConfiguration, NetworkContext as NetworkContextTrait, NetworkIoMessage, NetworkProtocolHandler, NonReservedPeerMode, PacketId, PeerId, ProtocolId, SessionInfo, + client_version::ClientVersion, }; -use node_table::*; use parity_path::restrict_permissions_owner; use parking_lot::{Mutex, RwLock}; -use session::{Session, SessionData}; -use PROTOCOL_VERSION; +use stats::{PrometheusMetrics, PrometheusRegistry}; -type Slab = ::slab::Slab; - -const MAX_SESSIONS: usize = 2048 + MAX_HANDSHAKES; -const MAX_HANDSHAKES: usize = 1024; +const MAX_SESSIONS: usize = 2048; +const MAX_HANDSHAKES: usize = 1899; const DEFAULT_PORT: u16 = 30303; +//const SYS_TIMER: TimerToken = 0; // StreamToken/TimerToken -const TCP_ACCEPT: StreamToken = SYS_TIMER + 1; -const IDLE: TimerToken = SYS_TIMER + 2; -const DISCOVERY: StreamToken = SYS_TIMER + 3; -const DISCOVERY_REFRESH: TimerToken = SYS_TIMER + 4; -const FAST_DISCOVERY_REFRESH: TimerToken = SYS_TIMER + 5; -const DISCOVERY_ROUND: TimerToken = SYS_TIMER + 6; -const NODE_TABLE: TimerToken = SYS_TIMER + 7; -const FIRST_SESSION: StreamToken = 0; -const LAST_SESSION: StreamToken = FIRST_SESSION + MAX_SESSIONS - 1; -const USER_TIMER: TimerToken = LAST_SESSION + 256; -const SYS_TIMER: TimerToken = LAST_SESSION + 1; +const TCP_ACCEPT: StreamToken = 1; +const IDLE: TimerToken = 2; +const DISCOVERY: StreamToken = 3; +const DISCOVERY_REFRESH: TimerToken = 4; +const FAST_DISCOVERY_REFRESH: TimerToken = 5; +const DISCOVERY_ROUND: TimerToken = 6; +const NODE_TABLE: TimerToken = 7; + +// Maximum count of peer mappings we remember. each node ID takes 64 bytes for nodeID, 8 bytes for peer id, and probabyl 3 * 8 bytes for internals. = about 100 bytes per entry.) +// 10000 elements should take about 1 MB of memory. +const MAX_NODE_TO_PEER_MAPPINGS: usize = 10000; + +// the user timers are a collection of timers that are registered by the protocol handlers. +// therefore comming from other modules. +// to be not in conflict with the token ID system, we choose a very +// high number for the user timers, +// that is realisticly unreachable by the peer stream tokens, +// but still have enough number space for user timers. +const FIRST_USER_TIMER: TimerToken = 8; +const MAX_USER_TIMERS: TimerToken = 91; +const LAST_USER_TIMER: TimerToken = FIRST_USER_TIMER + MAX_USER_TIMERS; + +const FIRST_HANDSHAKE: StreamToken = LAST_USER_TIMER + 1; +const LAST_HANDSHAKE: StreamToken = FIRST_HANDSHAKE + MAX_HANDSHAKES; +const FIRST_SESSION: StreamToken = FIRST_HANDSHAKE + MAX_HANDSHAKES + 1; +const LAST_SESSION: StreamToken = StreamToken::MAX; // Timeouts // for IDLE TimerToken @@ -104,10 +125,51 @@ impl Encodable for CapabilityInfo { pub struct NetworkContext<'s> { io: &'s IoContext, protocol: ProtocolId, - sessions: Arc>>, + sessions: &'s SessionContainer, session: Option, session_id: Option, reserved_peers: &'s HashSet, + statistics: &'s NetworkingStatistics, +} + +pub struct NetworkingStatistics { + logging_enabled: bool, + + bytes_sent: AtomicU64, + peer_losses: AtomicU64, + packages_send: AtomicU64, +} + +impl NetworkingStatistics { + pub fn new(logging_enabled: bool) -> NetworkingStatistics { + NetworkingStatistics { + logging_enabled: logging_enabled, + bytes_sent: AtomicU64::new(0), + peer_losses: AtomicU64::new(0), + packages_send: AtomicU64::new(0), + } + } +} + +impl PrometheusMetrics for NetworkingStatistics { + fn prometheus_metrics(&self, registry: &mut PrometheusRegistry) { + registry.register_counter( + "p2p_bytes_sent", + "total", + self.bytes_sent.load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + registry.register_counter( + "p2p_packages_sent", + "count", + self.packages_send + .load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + registry.register_counter( + "p2p_peer_losses", + "count", + self.peer_losses.load(std::sync::atomic::Ordering::Relaxed) as i64, + ); + } } impl<'s> NetworkContext<'s> { @@ -116,8 +178,9 @@ impl<'s> NetworkContext<'s> { io: &'s IoContext, protocol: ProtocolId, session: Option, - sessions: Arc>>, + sessions: &'s SessionContainer, reserved_peers: &'s HashSet, + statistics: &'s NetworkingStatistics, ) -> NetworkContext<'s> { let id = session.as_ref().map(|s| s.lock().token()); NetworkContext { @@ -126,20 +189,32 @@ impl<'s> NetworkContext<'s> { session_id: id, session, sessions, - reserved_peers: reserved_peers, + reserved_peers, + statistics, } } fn resolve_session(&self, peer: PeerId) -> Option { match self.session_id { Some(id) if id == peer => self.session.clone(), - _ => self.sessions.read().get(peer).cloned(), + _ => { + if peer >= FIRST_SESSION { + self.sessions.get_session(peer) + } else { + self.sessions.get_handshake(peer) + } + } } } } impl<'s> NetworkContextTrait for NetworkContext<'s> { - fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), Error> { + fn send( + &self, + peer: PeerId, + packet_id: PacketId, + data: Vec, + ) -> std::result::Result<(), network::Error> { self.send_protocol(self.protocol, peer, packet_id, data) } @@ -149,19 +224,37 @@ impl<'s> NetworkContextTrait for NetworkContext<'s> { peer: PeerId, packet_id: PacketId, data: Vec, - ) -> Result<(), Error> { + ) -> std::result::Result<(), Error> { let session = self.resolve_session(peer); if let Some(session) = session { session .lock() .send_packet(self.io, Some(protocol), packet_id as u8, &data)?; + + if self.statistics.logging_enabled { + self.statistics + .bytes_sent + .fetch_add(data.len() as u64, std::sync::atomic::Ordering::Relaxed); + self.statistics + .packages_send + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + } } else { - trace!(target: "network", "Send: Peer no longer exist") + trace!(target: "network", "Send: Peer no longer exist"); + if self.statistics.logging_enabled { + self.statistics + .peer_losses + .fetch_add(1, std::sync::atomic::Ordering::Relaxed); + } } Ok(()) } - fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), Error> { + fn respond( + &self, + packet_id: PacketId, + data: Vec, + ) -> std::result::Result<(), network::Error> { assert!( self.session.is_some(), "Respond called without network context" @@ -175,27 +268,28 @@ impl<'s> NetworkContextTrait for NetworkContext<'s> { fn disable_peer(&self, peer: PeerId) { self.io .message(NetworkIoMessage::DisablePeer(peer)) - .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); + .unwrap_or_else(|e| warn!("Error disable_peer: {:?} {:?}", peer, e)); } fn disconnect_peer(&self, peer: PeerId) { self.io .message(NetworkIoMessage::Disconnect(peer)) - .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); + .unwrap_or_else(|e| warn!("Error disconnect_peer: {:?} {:?}", peer, e)); } fn is_expired(&self) -> bool { self.session.as_ref().map_or(false, |s| s.lock().expired()) } - fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error> { + fn register_timer(&self, token: TimerToken, delay: Duration) -> std::result::Result<(), Error> { + trace!(target: "network", "Registering timer: {:?} for protocol: {} with delay {}", token, self.protocol, delay.as_millis()); self.io .message(NetworkIoMessage::AddTimer { token, delay, protocol: self.protocol, }) - .unwrap_or_else(|e| warn!("Error sending network IO message: {:?}", e)); + .unwrap_or_else(|e| warn!("Error register_timer: {:?}", e)); Ok(()) } @@ -226,18 +320,8 @@ impl<'s> NetworkContextTrait for NetworkContext<'s> { .unwrap_or(false) } - fn node_id_to_peer_id(&self, node_id: NodeId) -> Option { - let sessions = self.sessions.read(); - let sessions = &*sessions; - - for i in (0..MAX_SESSIONS).map(|x| x + FIRST_SESSION) { - if let Some(session) = sessions.get(i) { - if session.lock().info.id == Some(node_id) { - return Some(i); - } - } - } - None + fn node_id_to_peer_id(&self, node_id: &NodeId) -> Option { + self.sessions.node_id_to_peer_id(node_id, true) } } @@ -278,8 +362,6 @@ impl HostInfo { } } -type SharedSession = Arc>; - #[derive(Copy, Clone)] struct ProtocolTimer { pub protocol: ProtocolId, @@ -293,7 +375,8 @@ pub struct Host { pub info: RwLock, udp_socket: Mutex>, tcp_listener: Mutex, - sessions: Arc>>, + handshake_lock: Mutex<()>, + sessions: SessionContainer, discovery: Mutex>>, nodes: RwLock, handlers: RwLock>>, @@ -302,6 +385,7 @@ pub struct Host { reserved_nodes: RwLock>, stopping: AtomicBool, filter: Option>, + statistics: NetworkingStatistics, } impl Host { @@ -362,17 +446,21 @@ impl Host { discovery: Mutex::new(None), udp_socket: Mutex::new(None), tcp_listener: Mutex::new(tcp_listener), - sessions: Arc::new(RwLock::new(Slab::new_starting_at( - FIRST_SESSION, + sessions: SessionContainer::new( + FIRST_HANDSHAKE, MAX_SESSIONS, - ))), + MAX_NODE_TO_PEER_MAPPINGS, + MAX_HANDSHAKES, + ), + handshake_lock: Mutex::new(()), nodes: RwLock::new(NodeTable::new(path)), handlers: RwLock::new(HashMap::new()), timers: RwLock::new(HashMap::new()), - timer_counter: RwLock::new(USER_TIMER), + timer_counter: RwLock::new(FIRST_USER_TIMER), reserved_nodes: RwLock::new(HashSet::new()), stopping: AtomicBool::new(false), filter, + statistics: NetworkingStatistics::new(true), }; for n in boot_nodes { @@ -437,10 +525,10 @@ impl Host { drop(info); if let NonReservedPeerMode::Deny = mode { // disconnect all non-reserved peers here. - let reserved: HashSet = self.reserved_nodes.read().clone(); + let reserved = self.reserved_nodes.read(); let mut to_kill = Vec::new(); for e in self.sessions.read().iter() { - let mut s = e.lock(); + let mut s = e.1.lock(); { let id = s.id(); if id.map_or(false, |id| reserved.contains(id)) { @@ -482,10 +570,17 @@ impl Host { self.stopping.store(true, AtomicOrdering::SeqCst); let mut to_kill = Vec::new(); for e in self.sessions.read().iter() { - let mut s = e.lock(); + let mut s = e.1.lock(); s.disconnect(io, DisconnectReason::ClientQuit); to_kill.push(s.token()); } + + for e in self.sessions.read_handshakes().iter() { + let mut s = e.1.lock(); + s.disconnect(io, DisconnectReason::ClientQuit); + to_kill.push(s.token()); + } + for p in to_kill { trace!(target: "network", "Disconnecting on shutdown: {}", p); self.kill_connection(p, io, true); @@ -498,11 +593,10 @@ impl Host { let sessions = self.sessions.read(); let sessions = &*sessions; - let mut peers = Vec::with_capacity(sessions.count()); - for i in (0..MAX_SESSIONS).map(|x| x + FIRST_SESSION) { - if sessions.get(i).is_some() { - peers.push(i); - } + let mut peers = Vec::with_capacity(sessions.len()); + + for peer_id in sessions.keys() { + peers.push(peer_id.clone()); } peers } @@ -583,38 +677,24 @@ impl Host { } fn have_session(&self, id: &NodeId) -> bool { - self.sessions - .read() - .iter() - .any(|e| e.lock().info.id == Some(*id)) + return self.sessions.get_session_for(id).is_some(); } - // returns (handshakes, egress, ingress) - fn session_count(&self) -> (usize, usize, usize) { - let mut handshakes = 0; - let mut egress = 0; - let mut ingress = 0; - for s in self.sessions.read().iter() { - match s.try_lock() { - Some(ref s) if s.is_ready() && s.info.originated => egress += 1, - Some(ref s) if s.is_ready() && !s.info.originated => ingress += 1, - _ => handshakes += 1, - } - } - (handshakes, egress, ingress) + /// returns if there is a known handshake for the given node id is going on. + /// Only Egress handshakes can be considered, for ingress handshakes the NodeID is unknown. + fn have_handshake(&self, id: &NodeId) -> bool { + return self.sessions.get_handshake_for(id).is_some(); } - fn connecting_to(&self, id: &NodeId) -> bool { - self.sessions - .read() - .iter() - .any(|e| e.lock().id() == Some(id)) + // returns (handshakes, egress, ingress) + fn session_count(&self) -> (usize, usize, usize) { + self.sessions.session_count() } fn keep_alive(&self, io: &IoContext) { let mut to_kill = Vec::new(); for e in self.sessions.read().iter() { - let mut s = e.lock(); + let mut s = e.1.lock(); if !s.keep_alive(io) { s.disconnect(io, DisconnectReason::PingTimeout); to_kill.push(s.token()); @@ -639,7 +719,10 @@ impl Host { } fn connect_peers(&self, io: &IoContext) { - let (min_peers, mut pin, max_handshakes, allow_ips, self_id) = { + // dont connect to peers, while we are processing handshakes. + let _handshake_lock = self.handshake_lock.lock(); + + let (min_peers, pin, max_handshakes, allow_ips, self_id) = { let info = self.info.read(); if info.capabilities.is_empty() { return; @@ -655,54 +738,67 @@ impl Host { ) }; - let (handshake_count, egress_count, ingress_count) = self.session_count(); - let reserved_nodes = self.reserved_nodes.read(); - if egress_count + ingress_count >= min_peers as usize + reserved_nodes.len() { - // check if all pinned nodes are connected. - if reserved_nodes - .iter() - .all(|n| self.have_session(n) && self.connecting_to(n)) - { - return; - } + let (mut handshake_count, egress_count, ingress_count) = self.session_count(); + + trace!(target: "network", "initial handshake count: {handshake_count}"); + + // we clone the reserved nodes, to avoid deadlocks and reduce locking time. + let reserved_nodes = Arc::new(self.reserved_nodes.read().clone()); + let unconnected_reserved_nodes: Vec = reserved_nodes + .as_ref() + .into_iter() + .filter(|f| f.ne(&&self_id) && !self.have_handshake(f) && !self.have_session(f)) + .cloned() + .collect(); + + // reserved peers are already findable in the SessionContainer, even they are handshaking. + // so we wont trigger a second handshake here. + + let mut started: usize = 0; - // if not, only attempt connect to reserved peers - pin = true; + for reserved in &unconnected_reserved_nodes { + trace!(target: "network", "connect_peer because it is unconnected reserved peer: {reserved}"); + self.connect_peer(reserved, io); + started += 1; + handshake_count += 1; } - // allow 16 slots for incoming connections - if handshake_count >= max_handshakes { + if pin { return; } - // iterate over all nodes, reserved ones coming first. - // if we are pinned to only reserved nodes, ignore all others. - let nodes = reserved_nodes.iter().cloned().chain(if !pin { - self.nodes.read().nodes(&allow_ips) - } else { - Vec::new() - }); + if handshake_count >= max_handshakes { + return; + } let max_handshakes_per_round = max_handshakes / 2; - let mut started: usize = 0; - for id in nodes - .filter(|id| { - !self.have_session(id) - && !self.connecting_to(id) - && *id != self_id - && self.filter.as_ref().map_or(true, |f| { - f.connection_allowed(&self_id, &id, ConnectionDirection::Outbound) - }) - }) - .take(min( - max_handshakes_per_round, - max_handshakes - handshake_count, - )) - { + + // ip filter: + //.nodes(&allow_ips)) + let number_of_connects_to_make = (min_peers as usize) + .min(max_handshakes_per_round.min(max_handshakes - handshake_count)); + + let nodes_to_connect = + self.nodes + .read() + .nodes_filtered(number_of_connects_to_make, &allow_ips, |n: &Node| { + n.id != self_id + && !&reserved_nodes.contains(&n.id) + && self.filter.as_ref().map_or(true, |f| { + f.connection_allowed(&self_id, &n.id, ConnectionDirection::Outbound) + }) + && !self.have_session(&n.id) // alternative strategy: we might also get a list of active connections, instead of locking here to figure out if we have a session or not. + }); + + trace!(target: "network", "reserved nodes: {:?} nodes_to_connect: {:?}", reserved_nodes, nodes_to_connect); + + // now connect to nodes from the node table. + for id in nodes_to_connect { self.connect_peer(&id, io); started += 1; } - debug!(target: "network", "Connecting peers: {} sessions, {} pending + {} started", egress_count + ingress_count, handshake_count, started); + + debug!(target: "network", "Connecting peers: {} sessions, {} handshakes {} started", egress_count + ingress_count, handshake_count, started); } fn connect_peer(&self, id: &NodeId, io: &IoContext) { @@ -710,10 +806,6 @@ impl Host { trace!(target: "network", "Aborted connect. Node already connected."); return; } - if self.connecting_to(id) { - trace!(target: "network", "Aborted connect. Node already connecting."); - return; - } let socket = { let address = { @@ -748,28 +840,10 @@ impl Host { socket: TcpStream, id: Option<&NodeId>, io: &IoContext, - ) -> Result<(), Error> { + ) -> Result { let nonce = self.info.write().next_nonce(); - let mut sessions = self.sessions.write(); - - let token = sessions.insert_with_opt(|token| { - trace!(target: "network", "{}: Initiating session {:?}", token, id); - match Session::new(io, socket, token, id, &nonce, &self.info.read()) { - Ok(s) => Some(Arc::new(Mutex::new(s))), - Err(e) => { - debug!(target: "network", "Session create error: {:?}", e); - None - } - } - }); - - match token { - Some(t) => io.register_stream(t).map(|_| ()).map_err(Into::into), - None => { - debug!(target: "network", "Max sessions reached"); - Ok(()) - } - } + self.sessions + .create_handshake_connection(socket, id, io, &nonce, &self.info.read()) } fn accept(&self, io: &IoContext) { @@ -790,16 +864,14 @@ impl Host { } } - fn session_writable(&self, token: StreamToken, io: &IoContext) { - let session = { self.sessions.read().get(token).cloned() }; - + fn session_writable(&self, session: Option, io: &IoContext) { if let Some(session) = session { let mut s = session.lock(); if let Err(e) = s.writable(io, &self.info.read()) { - trace!(target: "network", "Session write error: {}: {:?}", token, e); + trace!(target: "network", "Session write error: {}: {:?}", s.token(), e); } if s.done() { - io.deregister_stream(token) + io.deregister_stream(s.token()) .unwrap_or_else(|e| debug!("Error deregistering stream: {:?}", e)); } } @@ -810,13 +882,14 @@ impl Host { self.kill_connection(token, io, true); } - fn session_readable(&self, token: StreamToken, io: &IoContext) { + fn session_readable(&self, session: Option, io: &IoContext) { let mut ready_data: Vec = Vec::new(); let mut packet_data: Vec<(ProtocolId, PacketId, Vec)> = Vec::new(); - let mut kill = false; - let session = { self.sessions.read().get(token).cloned() }; + let mut kill: Option = None; let mut ready_id = None; - if let Some(session) = session.clone() { + if let Some(session) = session { + let mut token = session.lock().token(); + trace!(target: "network", "Session readable called: {}", token); { loop { let session_result = session.lock().readable(io, &self.info.read()); @@ -838,17 +911,33 @@ impl Host { } _ => {} } - kill = true; + kill = Some(token); break; } Ok(SessionData::Ready) => { + // we allow only one Handshake to be handlet at a time. + let _handshake_lock = self.handshake_lock.lock(); + let (_, egress_count, ingress_count) = self.session_count(); - let reserved_nodes = self.reserved_nodes.read(); + + //if self.sessions.is_duplicate(&session) { + + kill = self.sessions.should_delete_duplicate_session(&session); + + if let Some(session_to_kill) = kill { + if session_to_kill == token { + // if its our session wich gets to be killed, we can skip the next setup steps. + trace!(target: "network", "not registering session {session_to_kill}"); + break; + } + } + let mut s = session.lock(); + let (min_peers, mut max_peers, reserved_only, self_id) = { let info = self.info.read(); let mut max_peers = info.config.max_peers; - for cap in &s.info.capabilities { + for cap in s.info.capabilities() { if let Some(num) = info.config.reserved_protocols.get(&cap.protocol) { @@ -876,11 +965,11 @@ impl Host { || (s.info.originated && egress_count > min_peers) || (!s.info.originated && ingress_count > max_ingress) { - if !reserved_nodes.contains(&id) { + if !self.reserved_nodes.read().contains(&id) { // only proceed if the connecting peer is reserved. - trace!(target: "network", "Disconnecting non-reserved peer {:?}", id); + trace!(target: "network", "Disconnecting non-reserved peer {:?} (TooManyPeers)", id); s.disconnect(io, DisconnectReason::TooManyPeers); - kill = true; + kill = Some(token); break; } } @@ -890,12 +979,27 @@ impl Host { }) { trace!(target: "network", "Inbound connection not allowed for {:?}", id); s.disconnect(io, DisconnectReason::UnexpectedIdentity); - kill = true; + kill = Some(token); break; } ready_id = Some(id); + let new_token = match self + .sessions + .register_finalized_handshake(&mut s, io) + { + Ok(t) => t, + Err(e) => { + warn!(target: "network", "Unable to finalize handshake for token {token} reason: {e}"); + break; + } + }; + + trace!(target: "network", "upgraded handshake to regular session for token: {} -> {}", token, new_token); + + token = new_token; + // Add it to the node table if !s.info.originated { if let Ok(address) = s.remote_addr() { @@ -941,18 +1045,35 @@ impl Host { } } - if kill { - self.kill_connection(token, io, true); + if let Some(peer_to_kill) = kill { + self.kill_connection(peer_to_kill, io, true); } + // todo: because of new duplicated session detection logic, + // https://github.com/DMDcoin/diamond-node/issues/252 + // this code should realisticly not be able to find duplicate sessions. + let handlers = self.handlers.read(); if !ready_data.is_empty() { - let duplicate = self.sessions.read().iter().any(|e| { - let session = e.lock(); - session.token() != token && session.info.id == ready_id - }); - if duplicate { - trace!(target: "network", "Rejected duplicate connection: {}", token); + let duplicates: Vec = self + .sessions + .read() + .iter() + .filter_map(|e| { + let session = e.1.lock(); + if session.token() != token + && session.info.id == ready_id + && !session.expired() + { + return Some(session.token()); + } else { + return None; + } + }) + .collect(); + + if duplicates.len() > 0 { + trace!(target: "network", "Rejected duplicate connection for {:?}: token: {} other connections: {:?}", ready_id, token, duplicates); session .lock() .disconnect(io, DisconnectReason::DuplicatePeer); @@ -960,16 +1081,18 @@ impl Host { self.kill_connection(token, io, false); return; } + + let reserved = self.reserved_nodes.read().clone(); for p in ready_data { - let reserved = self.reserved_nodes.read(); if let Some(h) = handlers.get(&p) { h.connected( &NetworkContext::new( io, p, Some(session.clone()), - self.sessions.clone(), + &self.sessions, &reserved, + &self.statistics, ), &token, ); @@ -988,8 +1111,9 @@ impl Host { io, p, Some(session.clone()), - self.sessions.clone(), + &self.sessions, &reserved, + &self.statistics, ), &token, packet_id, @@ -997,6 +1121,8 @@ impl Host { ); } } + } else { + trace!(target: "network", "Session not found"); } } @@ -1066,31 +1192,56 @@ impl Host { fn connection_timeout(&self, token: StreamToken, io: &IoContext) { trace!(target: "network", "Connection timeout: {}", token); - self.kill_connection(token, io, true) + self.kill_connection(token, io, true); } fn kill_connection(&self, token: StreamToken, io: &IoContext, remote: bool) { + self.kill_connection_with_failure(token, io, remote, true); + } + + fn kill_connection_with_failure( + &self, + token: StreamToken, + io: &IoContext, + remote: bool, + as_failure: bool, + ) { let mut to_disconnect: Vec = Vec::new(); let mut failure_id = None; let mut deregister = false; let mut expired_session = None; - if let FIRST_SESSION..=LAST_SESSION = token { - let sessions = self.sessions.read(); - if let Some(session) = sessions.get(token).cloned() { + if token >= FIRST_HANDSHAKE { + trace!(target: "network", "Killing connection: {}", token); + let session_o = if token >= FIRST_SESSION { + self.sessions.get_session(token) + } else { + self.sessions.get_handshake(token) + }; + + // we can shorten the session read lock here, if this causes a deadlock. + // on the other hand it is good, so not that many sessions manipulations can take place + // at the same time. + + if let Some(session) = session_o { expired_session = Some(session.clone()); let mut s = session.lock(); if !s.expired() { + if as_failure { + failure_id = s.id().cloned(); + } if s.is_ready() { for (p, _) in self.handlers.read().iter() { if s.have_capability(*p) { - to_disconnect.push(*p); + to_disconnect.push(p.clone()); } } } + s.set_expired(); - failure_id = s.id().cloned(); } deregister = remote || s.done(); + } else { + trace!(target: "network", "Session not found for token: {}", token); } } if let Some(id) = failure_id { @@ -1106,8 +1257,9 @@ impl Host { io, p, expired_session.clone(), - self.sessions.clone(), + &self.sessions, &reserved, + &self.statistics, ), &token, ); @@ -1124,7 +1276,7 @@ impl Host { { let sessions = self.sessions.read(); for c in sessions.iter() { - let s = c.lock(); + let s = c.1.lock(); if let Some(id) = s.id() { if node_changes.removed.contains(id) { to_remove.push(s.token()); @@ -1145,7 +1297,14 @@ impl Host { { let reserved = { self.reserved_nodes.read() }; - let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved); + let context = NetworkContext::new( + io, + protocol, + None, + &self.sessions, + &reserved, + &self.statistics, + ); action(&context); } @@ -1160,9 +1319,26 @@ impl Host { { let reserved = { self.reserved_nodes.read() }; - let context = NetworkContext::new(io, protocol, None, self.sessions.clone(), &reserved); + let context = NetworkContext::new( + io, + protocol, + None, + &self.sessions, + &reserved, + &self.statistics, + ); action(&context) } + + fn get_session_or_handshake(&self, token: StreamToken) -> Option { + if token >= FIRST_HANDSHAKE && token <= LAST_HANDSHAKE { + return self.sessions.get_handshake(token); + } else if token >= FIRST_SESSION && token <= LAST_SESSION { + return self.sessions.get_session(token); + } + warn!(target: "network", "get_session_or_handshake called with unexpected token: {}", token); + return None; + } } impl IoHandler for Host { @@ -1177,10 +1353,11 @@ impl IoHandler for Host { fn stream_hup(&self, io: &IoContext, stream: StreamToken) { trace!(target: "network", "Hup: {}", stream); - match stream { - FIRST_SESSION..=LAST_SESSION => self.connection_closed(stream, io), - _ => warn!(target: "network", "Unexpected hup"), - }; + if stream >= FIRST_HANDSHAKE { + self.connection_closed(stream, io) + } else { + warn!(target: "network", "Unexpected hup for session {}", stream); + } } fn stream_readable(&self, io: &IoContext, stream: StreamToken) { @@ -1188,7 +1365,12 @@ impl IoHandler for Host { return; } match stream { - FIRST_SESSION..=LAST_SESSION => self.session_readable(stream, io), + FIRST_HANDSHAKE..=LAST_HANDSHAKE => { + self.session_readable(self.sessions.get_handshake(stream), io) + } + FIRST_SESSION..=LAST_SESSION => { + self.session_readable(self.sessions.get_session(stream), io) + } DISCOVERY => self.discovery_readable(io), TCP_ACCEPT => self.accept(io), _ => panic!("Received unknown readable token"), @@ -1200,7 +1382,12 @@ impl IoHandler for Host { return; } match stream { - FIRST_SESSION..=LAST_SESSION => self.session_writable(stream, io), + FIRST_HANDSHAKE..=LAST_HANDSHAKE => { + self.session_writable(self.sessions.get_handshake(stream), io) + } + FIRST_SESSION..=LAST_SESSION => { + self.session_writable(self.sessions.get_session(stream), io) + } DISCOVERY => self.discovery_writable(io), _ => panic!("Received unknown writable token"), } @@ -1212,7 +1399,7 @@ impl IoHandler for Host { } match token { IDLE => self.maintain_network(io), - FIRST_SESSION..=LAST_SESSION => self.connection_timeout(token, io), + DISCOVERY_REFRESH => { // Run the _slow_ discovery if enough peers are connected if !self.has_enough_peers() { @@ -1242,7 +1429,7 @@ impl IoHandler for Host { nodes.clear_useless(); nodes.save(); } - _ => match self.timers.read().get(&token).cloned() { + FIRST_USER_TIMER..=LAST_USER_TIMER => match self.timers.read().get(&token).cloned() { Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() { None => { warn!(target: "network", "No handler found for protocol: {:?}", timer.protocol) @@ -1254,8 +1441,9 @@ impl IoHandler for Host { io, timer.protocol, None, - self.sessions.clone(), + &self.sessions, &reserved, + &self.statistics, ), timer.token, ); @@ -1265,10 +1453,17 @@ impl IoHandler for Host { warn!("Unknown timer token: {}", token); } // timer is not registerd through us }, + FIRST_HANDSHAKE..=LAST_SESSION => { + trace!(target: "network", "Timeout from Host impl: {}", token); + self.connection_timeout(token, io); + } + _ => { + warn!(target: "network", "HOST: Unknown timer token tick: {}", token); + } } } - fn message(&self, io: &IoContext, message: &NetworkIoMessage) { + fn message(&self, io: &::io::IoContext, message: &NetworkIoMessage) { if self.stopping.load(AtomicOrdering::SeqCst) { return; } @@ -1284,8 +1479,9 @@ impl IoHandler for Host { io, *protocol, None, - self.sessions.clone(), + &self.sessions, &reserved, + &self.statistics, )); self.handlers.write().insert(*protocol, h); let mut info = self.info.write(); @@ -1302,6 +1498,13 @@ impl IoHandler for Host { ref delay, ref token, } => { + trace!(target: "network", "Adding timer for protocol: {:?}, delay: {:?}, token: {}", protocol, delay.as_millis(), token); + + if token >= &MAX_USER_TIMERS { + warn!(target: "network", "Tried to register timer with token {} which is larger than MAX_USER_TIMERS {}", token, MAX_USER_TIMERS); + return; + } + let handler_token = { let mut timer_counter = self.timer_counter.write(); let counter = &mut *timer_counter; @@ -1309,6 +1512,11 @@ impl IoHandler for Host { *counter += 1; handler_token }; + + if handler_token > LAST_USER_TIMER { + warn!(target: "network", "Tried to register timer witch Index {token} what would be over the boundaries of usertimers: {LAST_USER_TIMER}"); + return; + } self.timers.write().insert( handler_token, ProtocolTimer { @@ -1316,11 +1524,13 @@ impl IoHandler for Host { token: *token, }, ); + + trace!(target: "network", "Registering handler_token {} token {} for protocol {:?} with delay {:?}", handler_token, token, protocol, delay); io.register_timer(handler_token, *delay) .unwrap_or_else(|e| debug!("Error registering timer {}: {:?}", token, e)); } NetworkIoMessage::Disconnect(ref peer) => { - let session = { self.sessions.read().get(*peer).cloned() }; + let session = self.get_session_or_handshake(peer.clone()); if let Some(session) = session { session .lock() @@ -1330,7 +1540,7 @@ impl IoHandler for Host { self.kill_connection(*peer, io, false); } NetworkIoMessage::DisablePeer(ref peer) => { - let session = { self.sessions.read().get(*peer).cloned() }; + let session = self.get_session_or_handshake(peer.clone()); if let Some(session) = session { session .lock() @@ -1341,7 +1551,7 @@ impl IoHandler for Host { nodes.mark_as_useless(id); } } - trace!(target: "network", "Disabling peer {}", peer); + debug!(target: "network", "Disabling peer {}", peer); self.kill_connection(*peer, io, false); } NetworkIoMessage::InitPublicInterface => self @@ -1357,9 +1567,19 @@ impl IoHandler for Host { reg: Token, event_loop: &mut EventLoop>, ) { + trace!(target: "network", "register_stream {}", stream); match stream { - FIRST_SESSION..=LAST_SESSION => { - let session = { self.sessions.read().get(stream).cloned() }; + FIRST_HANDSHAKE..=LAST_HANDSHAKE => { + let session = { self.sessions.get_handshake(stream) }; + if let Some(session) = session { + session + .lock() + .register_socket(reg, event_loop) + .expect("Error registering socket"); + } + } + FIRST_SESSION.. => { + let session = { self.sessions.get_session(stream) }; if let Some(session) = session { session .lock() @@ -1393,17 +1613,14 @@ impl IoHandler for Host { event_loop: &mut EventLoop>, ) { match stream { + FIRST_HANDSHAKE..FIRST_SESSION => { + let _handhake_lock = self.handshake_lock.lock(); // we do not allow new handshakes to get processed during deregistering a stream. + self.sessions + .deregister_handshake_stream(stream, event_loop); + } FIRST_SESSION..=LAST_SESSION => { - let mut connections = self.sessions.write(); - if let Some(connection) = connections.get(stream).cloned() { - let c = connection.lock(); - if c.expired() { - // make sure it is the same connection that the event was generated for - c.deregister_socket(event_loop) - .expect("Error deregistering socket"); - connections.remove(stream); - } - } + let _handhake_lock = self.handshake_lock.lock(); // since finalizing handshakes is the only way to promot a handshake to a session, we also block handshakes here. + self.sessions.deregister_session_stream(stream, event_loop); } DISCOVERY => (), _ => warn!("Unexpected stream deregistration"), @@ -1417,15 +1634,6 @@ impl IoHandler for Host { event_loop: &mut EventLoop>, ) { match stream { - FIRST_SESSION..=LAST_SESSION => { - let connection = { self.sessions.read().get(stream).cloned() }; - if let Some(connection) = connection { - connection - .lock() - .update_socket(reg, event_loop) - .expect("Error updating socket"); - } - } DISCOVERY => match ( self.udp_socket.lock().as_ref(), self.discovery.lock().as_ref(), @@ -1450,11 +1658,46 @@ impl IoHandler for Host { PollOpt::edge(), ) .expect("Error reregistering stream"), - _ => warn!("Unexpected stream update"), + _ => { + let connection = self.get_session_or_handshake(stream); + if let Some(connection) = connection { + connection + .lock() + .update_socket(reg, event_loop) + .expect("Error updating socket"); + } + } } } } +impl PrometheusMetrics for Host { + fn prometheus_metrics(&self, r: &mut PrometheusRegistry) { + let lockdur = Duration::from_millis(50); + + if let Some((handshakes, egress, ingress)) = + self.sessions.session_count_try(Duration::from_millis(20)) + { + r.register_gauge("p2p_ingress", "count", ingress as i64); + r.register_gauge("p2p_egress", "count", egress as i64); + r.register_gauge("p2p_handshakes", "count", handshakes as i64); + } + + if let Some(reserved_nodes) = self.reserved_nodes.try_read_for(lockdur) { + r.register_gauge("p2p_reserved_nodes", "count", reserved_nodes.len() as i64); + } + + if let Some(nodes) = self.nodes.try_read_for(lockdur) { + r.register_gauge("p2p_nodes", "count", nodes.count_nodes() as i64); + r.register_gauge("p2p_uselessnodes", "count", nodes.count_useless() as i64); + } + + self.statistics.prometheus_metrics(r); + } + + //r.register_gauge("connected_peers", "", self.connect_peers(io)); +} + fn save_key(path: &Path, key: &Secret) { let mut path_buf = PathBuf::from(path); if let Err(e) = fs::create_dir_all(path_buf.as_path()) { diff --git a/crates/net/network-devp2p/src/ip_utils.rs b/crates/net/network-devp2p/src/ip_utils.rs index 08d1ae0851..c9a81462d9 100644 --- a/crates/net/network-devp2p/src/ip_utils.rs +++ b/crates/net/network-devp2p/src/ip_utils.rs @@ -18,9 +18,9 @@ #![allow(unstable_name_collisions)] -use igd::{search_gateway_from_timeout, PortMappingProtocol}; +use crate::node_table::NodeEndpoint; +use igd::{PortMappingProtocol, search_gateway_from_timeout}; use ipnetwork::IpNetwork; -use node_table::NodeEndpoint; use std::{ io, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, @@ -229,7 +229,7 @@ impl SocketAddrExt for IpAddr { #[cfg(not(any(windows, target_os = "android")))] mod getinterfaces { use libc::{ - freeifaddrs, getifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6, AF_INET, AF_INET6, + AF_INET, AF_INET6, freeifaddrs, getifaddrs, ifaddrs, sockaddr, sockaddr_in, sockaddr_in6, }; use std::{ io, mem, diff --git a/crates/net/network-devp2p/src/lib.rs b/crates/net/network-devp2p/src/lib.rs index 5b955ea45f..d454a02c33 100644 --- a/crates/net/network-devp2p/src/lib.rs +++ b/crates/net/network-devp2p/src/lib.rs @@ -21,12 +21,12 @@ //! ```rust //! extern crate ethcore_network as net; //! extern crate ethcore_network_devp2p as devp2p; -//! extern crate ethereum_types as types; +//! use ethereum_types as types; //! use net::*; //! use devp2p::NetworkService; //! use std::sync::Arc; //! use std::time::Duration; -//! use types::U64; +//! use crate::types::U64; //! //! struct MyHandler; //! @@ -64,10 +64,8 @@ extern crate ansi_term; //TODO: remove this extern crate bytes; -extern crate crypto as rcrypto; extern crate ethcore_io as io; extern crate ethcore_network as network; -extern crate ethereum_types; extern crate ethkey; extern crate igd; extern crate ipnetwork; @@ -86,6 +84,7 @@ extern crate rustc_hex; extern crate serde; extern crate serde_json; extern crate slab; +extern crate stats; extern crate tiny_keccak; #[macro_use] @@ -111,13 +110,14 @@ mod ip_utils; mod node_table; mod service; mod session; +mod session_container; pub use host::NetworkContext; pub use service::NetworkService; pub use connection::PAYLOAD_SOFT_LIMIT; -pub use io::TimerToken; -pub use node_table::{validate_node_url, NodeId}; +pub use crate::io::TimerToken; +pub use node_table::{NodeId, validate_node_url}; const PROTOCOL_VERSION: u32 = 5; diff --git a/crates/net/network-devp2p/src/node_table.rs b/crates/net/network-devp2p/src/node_table.rs index fcf57c0c69..f3f1118edb 100644 --- a/crates/net/network-devp2p/src/node_table.rs +++ b/crates/net/network-devp2p/src/node_table.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use discovery::{NodeEntry, TableUpdates}; +use crate::{ + discovery::{NodeEntry, TableUpdates}, + ip_utils::*, +}; use ethereum_types::H512; -use ip_utils::*; use network::{AllowIP, Error, ErrorKind, IpFilter}; use rand::seq::SliceRandom; use rlp::{DecoderError, Rlp, RlpStream}; @@ -109,7 +111,8 @@ impl NodeEndpoint { rlp.append(&(&a.ip().octets()[..])); } SocketAddr::V6(a) => unsafe { - let o: *const u8 = a.ip().segments().as_ptr() as *const u8; + let segments = a.ip().segments(); + let o: *const u8 = segments.as_ptr() as *const u8; rlp.append(&slice::from_raw_parts(o, 16)); }, }; @@ -165,7 +168,6 @@ impl Display for NodeEndpoint { #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum PeerType { _Required, - Optional, } /// A type for representing an interaction (contact) with a node at a given time @@ -209,7 +211,6 @@ impl NodeContact { pub struct Node { pub id: NodeId, pub endpoint: NodeEndpoint, - pub peer_type: PeerType, pub last_contact: Option, } @@ -218,7 +219,6 @@ impl Node { Node { id, endpoint, - peer_type: PeerType::Optional, last_contact: None, } } @@ -254,7 +254,6 @@ impl FromStr for Node { Ok(Node { id, endpoint, - peer_type: PeerType::Optional, last_contact: None, }) } @@ -375,6 +374,26 @@ impl NodeTable { .collect() } + /// Returns node ids sorted by failure percentage, for nodes with the same failure percentage the absolute number of + /// failures is considered. + pub fn nodes_filtered( + &self, + max_count: usize, + ip_filter: &IpFilter, + filter: F, + ) -> Vec + where + F: Fn(&Node) -> bool, + { + self.ordered_entries() + .iter() + .filter(|n| n.endpoint.is_allowed(&ip_filter)) + .filter(|n| filter(n)) + .take(max_count) + .map(|n| n.id) + .collect() + } + /// Ordered list of all entries by failure percentage, for nodes with the same failure percentage the absolute /// number of failures is considered. pub fn entries(&self) -> Vec { @@ -429,7 +448,9 @@ impl NodeTable { /// Mark as useless, no further attempts to connect until next call to `clear_useless`. pub fn mark_as_useless(&mut self, id: &NodeId) { - self.useless_nodes.insert(id.clone()); + if self.useless_nodes.insert(id.clone()) { + debug!(target: "network", "Node was marked as useless: {:?}", id); + } } /// Attempt to connect to useless nodes again. @@ -437,6 +458,15 @@ impl NodeTable { self.useless_nodes.clear(); } + /// count of useless nodes. + pub fn count_useless(&self) -> usize { + self.useless_nodes.len() + } + + pub fn count_nodes(&self) -> usize { + self.nodes.len() + } + /// Save the nodes.json file. pub fn save(&self) { let mut path = match self.path { @@ -629,7 +659,9 @@ mod tests { #[test] fn node_parse() { assert!(validate_node_url("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").is_none()); - let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770"); + let node = Node::from_str( + "enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770", + ); assert!(node.is_ok()); let node = node.unwrap(); let v4 = match node.endpoint.address { @@ -744,15 +776,21 @@ mod tests { ], custom_block: vec![], }; - assert!(!NodeEndpoint::from_str("123.99.55.44:7770") - .unwrap() - .is_allowed(&filter)); - assert!(NodeEndpoint::from_str("10.0.0.1:7770") - .unwrap() - .is_allowed(&filter)); - assert!(NodeEndpoint::from_str("1.0.0.55:5550") - .unwrap() - .is_allowed(&filter)); + assert!( + !NodeEndpoint::from_str("123.99.55.44:7770") + .unwrap() + .is_allowed(&filter) + ); + assert!( + NodeEndpoint::from_str("10.0.0.1:7770") + .unwrap() + .is_allowed(&filter) + ); + assert!( + NodeEndpoint::from_str("1.0.0.55:5550") + .unwrap() + .is_allowed(&filter) + ); } #[test] @@ -765,15 +803,21 @@ mod tests { IpNetwork::from_str(&"1.0.0.0/8").unwrap(), ], }; - assert!(NodeEndpoint::from_str("123.99.55.44:7770") - .unwrap() - .is_allowed(&filter)); - assert!(!NodeEndpoint::from_str("10.0.0.1:7770") - .unwrap() - .is_allowed(&filter)); - assert!(!NodeEndpoint::from_str("1.0.0.55:5550") - .unwrap() - .is_allowed(&filter)); + assert!( + NodeEndpoint::from_str("123.99.55.44:7770") + .unwrap() + .is_allowed(&filter) + ); + assert!( + !NodeEndpoint::from_str("10.0.0.1:7770") + .unwrap() + .is_allowed(&filter) + ); + assert!( + !NodeEndpoint::from_str("1.0.0.55:5550") + .unwrap() + .is_allowed(&filter) + ); } #[test] @@ -783,12 +827,16 @@ mod tests { custom_allow: vec![IpNetwork::from_str(&"fc00::/8").unwrap()], custom_block: vec![], }; - assert!(NodeEndpoint::from_str("[fc00::]:5550") - .unwrap() - .is_allowed(&filter)); - assert!(!NodeEndpoint::from_str("[fd00::]:5550") - .unwrap() - .is_allowed(&filter)); + assert!( + NodeEndpoint::from_str("[fc00::]:5550") + .unwrap() + .is_allowed(&filter) + ); + assert!( + !NodeEndpoint::from_str("[fd00::]:5550") + .unwrap() + .is_allowed(&filter) + ); } #[test] @@ -798,11 +846,15 @@ mod tests { custom_allow: vec![], custom_block: vec![IpNetwork::from_str(&"fc00::/8").unwrap()], }; - assert!(!NodeEndpoint::from_str("[fc00::]:5550") - .unwrap() - .is_allowed(&filter)); - assert!(NodeEndpoint::from_str("[fd00::]:5550") - .unwrap() - .is_allowed(&filter)); + assert!( + !NodeEndpoint::from_str("[fc00::]:5550") + .unwrap() + .is_allowed(&filter) + ); + assert!( + NodeEndpoint::from_str("[fd00::]:5550") + .unwrap() + .is_allowed(&filter) + ); } } diff --git a/crates/net/network-devp2p/src/service.rs b/crates/net/network-devp2p/src/service.rs index 7ebd31521c..644cd83ca4 100644 --- a/crates/net/network-devp2p/src/service.rs +++ b/crates/net/network-devp2p/src/service.rs @@ -15,14 +15,18 @@ // along with OpenEthereum. If not, see . use ansi_term::Colour; -use host::Host; -use io::*; -use network::{ + +use crate::io::*; + +use crate::network::{ ConnectionFilter, Error, NetworkConfiguration, NetworkContext, NetworkIoMessage, NetworkProtocolHandler, NonReservedPeerMode, PeerId, ProtocolId, }; use parking_lot::RwLock; -use std::{net::SocketAddr, ops::RangeInclusive, sync::Arc}; +use stats::{PrometheusMetrics, PrometheusRegistry}; +use std::{net::SocketAddr, ops::RangeInclusive, sync::Arc, time::Duration}; + +use crate::host::Host; struct HostHandler { public_url: RwLock>, @@ -60,7 +64,7 @@ impl NetworkService { let host_handler = Arc::new(HostHandler { public_url: RwLock::new(None), }); - let io_service = IoService::::start("devp2p")?; + let io_service = IoService::::start("devp2p", 4)?; Ok(NetworkService { io_service, @@ -225,3 +229,14 @@ impl NetworkService { .map(|ref host| host.with_context_eval(protocol, &io, action)) } } + +impl PrometheusMetrics for NetworkService { + fn prometheus_metrics(&self, r: &mut PrometheusRegistry) { + if let Some(host_o) = self.host.try_read_for(Duration::from_millis(50)) { + if let Some(host) = host_o.as_ref() { + host.prometheus_metrics(r); + } + } + //self.connected_peers() + } +} diff --git a/crates/net/network-devp2p/src/session.rs b/crates/net/network-devp2p/src/session.rs index 80539be3d2..bece885fbd 100644 --- a/crates/net/network-devp2p/src/session.rs +++ b/crates/net/network-devp2p/src/session.rs @@ -22,22 +22,24 @@ use std::{ time::{Duration, Instant}, }; -use connection::{Connection, EncryptedConnection, Packet, MAX_PAYLOAD_SIZE}; +use crate::{ + connection::{Connection, EncryptedConnection, MAX_PAYLOAD_SIZE, Packet}, + handshake::Handshake, + host::*, + io::{IoContext, StreamToken}, + node_table::NodeId, +}; use ethereum_types::H256; -use handshake::Handshake; -use host::*; -use io::{IoContext, StreamToken}; use mio::{ deprecated::{EventLoop, Handler}, tcp::*, *, }; use network::{ - client_version::ClientVersion, DisconnectReason, Error, ErrorKind, PeerCapabilityInfo, - ProtocolId, SessionCapabilityInfo, SessionInfo, + DisconnectReason, Error, ErrorKind, PeerCapabilityInfo, ProtocolId, SessionCapabilityInfo, + SessionInfo, client_version::ClientVersion, }; -use node_table::NodeId; -use rlp::{Rlp, RlpStream, EMPTY_LIST_RLP}; +use rlp::{EMPTY_LIST_RLP, Rlp, RlpStream}; use snappy; // Timeout must be less than (interval - 1). @@ -60,6 +62,7 @@ enum ProtocolState { pub struct Session { /// Shared session information pub info: SessionInfo, + /// Session ready flag. Set after successful Hello packet exchange had_hello: bool, /// Session is no longer active flag. @@ -126,17 +129,8 @@ impl Session { Ok(Session { state: State::Handshake(handshake), had_hello: false, - info: SessionInfo { - id: id.cloned(), - client_version: ClientVersion::from(""), - protocol_version: 0, - capabilities: Vec::new(), - peer_capabilities: Vec::new(), - ping: None, - originated, - remote_address: "Handshake".to_owned(), - local_address: local_addr, - }, + + info: SessionInfo::new(id, local_addr, originated), ping_time: Instant::now(), pong_time: None, expired: false, @@ -149,6 +143,7 @@ impl Session { &mut self, io: &IoContext, host: &HostInfo, + session_uid: H256, ) -> Result<(), Error> where Message: Send + Sync + Clone, @@ -160,6 +155,7 @@ impl Session { } else { panic!("Unexpected state"); }; + self.info.session_uid = Some(session_uid); self.state = State::Session(connection); self.write_hello(io, host)?; Ok(()) @@ -214,13 +210,16 @@ impl Session { if self.expired() { return Ok(SessionData::None); } - let mut create_session = false; + let mut create_session_with_uid: Option = None; let mut packet_data = None; match self.state { State::Handshake(ref mut h) => { h.readable(io, host)?; if h.done() { - create_session = true; + // the Nonce Id is a unique ID shared on both endpoints + // it can be used to order sessions for example duplicate removal, + // so both Nodes can use the same algorithm to decide wich connection to keep. + create_session_with_uid = Some(h.nonce ^ h.remote_nonce); } } State::Session(ref mut c) => match c.readable(io)? { @@ -231,8 +230,8 @@ impl Session { if let Some(data) = packet_data { return Ok(self.read_packet(io, &data, host)?); } - if create_session { - self.complete_handshake(io, host)?; + if let Some(session_uid) = create_session_with_uid { + self.complete_handshake(io, host, session_uid)?; io.update_registration(self.token()) .unwrap_or_else(|e| debug!(target: "network", "Token registration error: {:?}", e)); } @@ -257,7 +256,7 @@ impl Session { /// Checks if peer supports given capability pub fn have_capability(&self, protocol: ProtocolId) -> bool { self.info - .capabilities + .capabilities() .iter() .any(|c| c.protocol == protocol) } @@ -265,7 +264,7 @@ impl Session { /// Checks if peer supports given capability pub fn capability_version(&self, protocol: ProtocolId) -> Option { self.info - .capabilities + .capabilities() .iter() .filter_map(|c| { if c.protocol == protocol { @@ -320,24 +319,25 @@ impl Session { where Message: Send + Sync + Clone, { - if protocol.is_some() && (self.info.capabilities.is_empty() || !self.had_hello) { + if protocol.is_some() && (self.info.capabilities().is_empty() || !self.had_hello) { debug!(target: "network", "Sending to unconfirmed session {}, protocol: {:?}, packet: {}", self.token(), protocol.map(|p| str::from_utf8(&p.as_u64().to_ne_bytes()).unwrap_or("??").to_string()), packet_id); bail!(ErrorKind::BadProtocol); } if self.expired() { + debug!(target: "network", "Unable to send to expired session {}", self.token()); return Err(ErrorKind::Expired.into()); } let mut i = 0usize; let pid = match protocol { Some(protocol) => { - while protocol != self.info.capabilities[i].protocol { + while protocol != self.info.capabilities()[i].protocol { i += 1; - if i == self.info.capabilities.len() { + if i == self.info.capabilities().len() { debug!(target: "network", "Unknown protocol: {:?}", protocol); return Ok(()); } } - self.info.capabilities[i].id_offset + packet_id + self.info.capabilities()[i].id_offset + packet_id } None => packet_id, }; @@ -455,19 +455,20 @@ impl Session { PACKET_PEERS => Ok(SessionData::None), PACKET_USER..=PACKET_LAST => { let mut i = 0usize; - while packet_id - >= self.info.capabilities[i].id_offset + self.info.capabilities[i].packet_count - { + let capabilities = self.info.capabilities(); + let mut capability = &capabilities[i]; + while packet_id >= capability.id_offset + capability.packet_count { i += 1; - if i == self.info.capabilities.len() { + if i == self.info.capabilities().len() { debug!(target: "network", "Unknown packet: {:?}", packet_id); return Ok(SessionData::Continue); } + capability = &capabilities[i]; } // map to protocol - let protocol = self.info.capabilities[i].protocol; - let protocol_packet_id = packet_id - self.info.capabilities[i].id_offset; + let protocol = capability.protocol; + let protocol_packet_id = packet_id - capability.id_offset; match *self .protocol_states @@ -475,7 +476,7 @@ impl Session { .or_insert_with(|| ProtocolState::Pending(Vec::new())) { ProtocolState::Connected => { - trace!(target: "network", "Packet {} mapped to {:?}:{}, i={}, capabilities={:?}", packet_id, protocol, protocol_packet_id, i, self.info.capabilities); + trace!(target: "network", "Packet {} mapped to {:?}:{}, i={}, capabilities={:?}", packet_id, protocol, protocol_packet_id, i, self.info.capabilities()); Ok(SessionData::Packet { data, protocol, @@ -575,13 +576,13 @@ impl Session { offset += caps[i].packet_count; i += 1; } - debug!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); + debug!(target: "network", "Hello: {} {} v{} {} {:?}", self.token(), client_version, protocol, id, caps); let protocol = ::std::cmp::min(protocol, host.protocol_version); self.info.protocol_version = protocol; self.info.client_version = client_version; - self.info.capabilities = caps; - self.info.peer_capabilities = peer_caps; - if self.info.capabilities.is_empty() { + self.info.set_capabilities(caps, peer_caps); + + if self.info.capabilities().is_empty() { trace!(target: "network", "No common capabilities with peer."); return Err(self.disconnect(io, DisconnectReason::UselessPeer)); } @@ -644,4 +645,14 @@ impl Session { } Ok(()) } + + pub(crate) fn update_token_id(&mut self, token: StreamToken) -> Result<(), Error> { + match self.state { + State::Handshake(ref _h) => return Err(ErrorKind::HostCacheInconsistency.into()), + State::Session(ref mut s) => { + s.connection.token = token; + return Ok(()); + } + } + } } diff --git a/crates/net/network-devp2p/src/session_container.rs b/crates/net/network-devp2p/src/session_container.rs new file mode 100644 index 0000000000..1ce747766e --- /dev/null +++ b/crates/net/network-devp2p/src/session_container.rs @@ -0,0 +1,494 @@ +use crate::host::HostInfo; +use ethereum_types::H256; +use lru_cache::LruCache; +use mio::net::TcpStream; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use crate::{io::*, node_table::*, session::Session}; +use network::{Error, ErrorKind, NetworkIoMessage, PeerId}; +use parking_lot::{Mutex, RwLock, RwLockReadGuard}; + +pub type SharedSession = Arc>; + +fn socket_address_to_string(socket: &TcpStream) -> String { + socket + .peer_addr() + .map_or("unknown".to_string(), |a| a.to_string()) +} + +/// SessionContainer manages handshakes, and their upgrade to regular encrypted sessions. +/// It has high performance lookup capabilities for NodeIDs by using a hashmap, instead of linear locking iteration of sessions. +pub struct SessionContainer { + max_sessions: usize, + max_handshakes: usize, + first_handshake: usize, + last_handshake: usize, + // the handshake cursor is a improvement to find new available handshake slots. it defines the next starting search position. + current_handshake_cursor: Mutex, + sessions: RwLock>, + handshakes: RwLock>, // Separate map for handshakes + // for egress handshakes, we know the Node ID we want to do a handshake with, so we can do efficient lookups. + handshakes_egress_map: RwLock>, + node_id_to_session: Mutex>, // used to map Node IDs to last used session tokens. + sessions_token_max: Mutex, // curent last used token for regular sessions. +} + +impl SessionContainer { + pub fn new( + first_handshake_token: usize, + max_sessions: usize, + max_node_mappings: usize, + max_handshakes: usize, + ) -> Self { + SessionContainer { + sessions: RwLock::new(std::collections::BTreeMap::new()), + handshakes: RwLock::new(std::collections::BTreeMap::new()), + handshakes_egress_map: RwLock::new(BTreeMap::new()), + current_handshake_cursor: Mutex::new(first_handshake_token), + first_handshake: first_handshake_token, + last_handshake: first_handshake_token + max_handshakes, + node_id_to_session: Mutex::new(LruCache::new(max_node_mappings)), + sessions_token_max: Mutex::new(first_handshake_token + max_handshakes + 1), + max_sessions, + max_handshakes, + } + } + + /// Returns a Read guard to the sessions. + pub fn read(&self) -> RwLockReadGuard> { + self.sessions.read() + } + + /// Returns a Read guard to the sessions. + pub fn read_handshakes(&self) -> RwLockReadGuard> { + self.handshakes.read() + } + + /// gets the next token ID and store this information + fn create_token_id( + &self, + node_id: &NodeId, + tokens: &mut LruCache, + ) -> usize { + let mut session_token_max = self.sessions_token_max.lock(); + let next_id = session_token_max.clone(); + + *session_token_max += 1; + + // TODO: if we run out of token ids, + // we need to recycle Ids that are not used anymore. + + if let Some(old) = tokens.insert(node_id.clone(), next_id) { + warn!(target: "network", "Node ID {} already exists with token {}, overwriting with {}", node_id, old, next_id); + } + + return next_id; + } + + fn create_token_id_for_handshake( + &self, + handshakes: &std::collections::BTreeMap, + ) -> Result { + let mut cursor_lock = self.current_handshake_cursor.lock(); + let start_cursor = *cursor_lock; + + for _ in 0..self.max_handshakes { + let current_token = *cursor_lock; + *cursor_lock = if current_token + 1 < self.last_handshake { + current_token + 1 + } else { + self.first_handshake + }; + + if !handshakes.contains_key(¤t_token) { + // Found an available token + return Ok(current_token); + } + + if *cursor_lock == start_cursor { + // We've looped through all possible handshake tokens and found none free. + break; + } + } + + // If we reach here, it means no available handshake token was found. + Err(ErrorKind::TooManyConnections.into()) + } + + pub(crate) fn session_count(&self) -> (usize, usize, usize) { + let mut egress = 0; + let mut ingress = 0; + + // we avoid an intensive read lock on the sessions, and take a snapshot of current sessions. + for s in self.sessions.read().clone().iter() { + match s.1.lock() { + ref s if s.is_ready() && s.info.originated => egress += 1, + ref s if s.is_ready() && !s.info.originated => ingress += 1, + _ => {} + } + } + + (self.handshakes.read().len(), egress, ingress) + } + + // like session count, but does not block if read can not be achieved. + pub fn session_count_try(&self, lock_duration: Duration) -> Option<(usize, usize, usize)> { + let mut egress = 0; + let mut ingress = 0; + + let deadline = time_utils::DeadlineStopwatch::new(lock_duration); + + if let Some(lock) = self.sessions.try_read_for(deadline.time_left()) { + for s in lock.iter() { + match s.1.try_lock_for(deadline.time_left()) { + Some(ref s) => { + if s.is_ready() { + if s.info.originated { + egress += 1 + } else { + ingress += 1; + } + } + } + None => return None, + } + } + } else { + return None; + } + + if let Some(lock) = self.handshakes.try_read_for(deadline.time_left()) { + return Some((lock.len(), egress, ingress)); + } else { + return None; + } + } + + /// Creates a new session and adds it to the session container. + /// returns the token ID of the new session, or an Error if not successful. + pub fn create_handshake_connection( + &self, + socket: TcpStream, + id: Option<ðereum_types::H512>, + io: &IoContext, + nonce: &H256, + host: &HostInfo, + ) -> Result { + let mut node_ids = self.node_id_to_session.lock(); + + // if we known the ID, we also require a lock on the egress map in the same order as we do use to read the egress map + + let handshakes_egress_map = id.map(|_| self.handshakes_egress_map.write()); + + let mut handshakes = self.handshakes.write(); + + if self.sessions.read().len() >= self.max_sessions { + return Err(ErrorKind::TooManyConnections.into()); + } + + if handshakes.len() >= self.max_handshakes { + return Err(ErrorKind::TooManyConnections.into()); + } + + if let Some(node_id) = id { + // check if there is already a connection for the given node id. + if let Some(existing_peer_id) = node_ids.get_mut(node_id) { + let existing_session_mutex_o = self.get_session(existing_peer_id.clone()); + + if let Some(existing_session_mutex) = existing_session_mutex_o { + let session = existing_session_mutex.lock(); + if let Some(id_from_session) = &session.info.id { + if session.info.id == Some(*node_id) { + // we got already got a session for the specified node. + // maybe the old session is already scheduled for getting deleted. + if !session.expired() { + return Err(ErrorKind::AlreadyExists.into()); + } + } else { + error!(target: "network", "host cache inconsistency: Session node id mismatch. expected: {} is {}.", existing_peer_id, id_from_session); + return Err(ErrorKind::HostCacheInconsistency.into()); + } + } else { + error!(target: "network", "host cache inconsistency: Session has no Node_id defined where it should for {}", existing_peer_id); + return Err(ErrorKind::HostCacheInconsistency.into()); + } + // session guard is dropped here + } + } + } + + let next_free_token = self.create_token_id_for_handshake(&mut handshakes)?; + + trace!(target: "network", "creating session for handshaking peer: {} token: {}", socket_address_to_string(&socket), next_free_token); + // we dont know the NodeID, + // we still need a session to do the handshake. + + let new_session = Session::new(io, socket, next_free_token.clone(), id, nonce, host); + // the token is already registerd. + match new_session { + Ok(session) => { + let session = Arc::new(Mutex::new(session)); + handshakes.insert(next_free_token, session.clone()); // Insert into handshakes map + if let Some(mut egress_map) = handshakes_egress_map { + egress_map.insert(id.unwrap().clone(), next_free_token); + } + // register the stream for the new session. + if let Err(err) = io.register_stream(next_free_token) { + debug!(target: "network", "Failed to register stream for token: {} : {}", next_free_token, err); + } + return Ok(next_free_token); + } + Err(e) => { + error!(target: "network", "Failed to create handshake session for: {}", next_free_token); + return Err(e); + } + } + } + + pub fn get_session_for(&self, id: &NodeId) -> Option { + self.node_id_to_session + .lock() + .get_mut(id) + .cloned() + .map_or(None, |peer_id| { + let sessions = self.sessions.read(); + sessions.get(&peer_id).cloned() + }) + } + + pub fn get_handshake_for(&self, id: &NodeId) -> Option { + self.handshakes_egress_map + .read() + .get(id) + .cloned() + .map_or(None, |peer_id| { + self.handshakes.read().get(&peer_id).cloned() + }) + } + + pub fn node_id_to_peer_id( + &self, + node_id: &NodeId, + only_available_sessions: bool, + ) -> Option { + self.node_id_to_session + .lock() + .get_mut(node_id) + .map_or(None, |peer_id| { + if !only_available_sessions { + return Some(*peer_id); + } + let sessions = self.sessions.read(); + + // we can do additional checks: + // we could ensure that the Node ID matches. + // we could also read the flag and check if it is not marked for + // getting disconnected. + + if sessions.contains_key(peer_id) { + return Some(*peer_id); + } + + return None; + }) + } + + // This method will now handle both registration and promotion if applicable + pub fn register_finalized_handshake( + &self, + session: &mut Session, + io: &IoContext, + ) -> Result { + let token = session.token(); + let id = session.id(); + + trace!(target: "network", "register_finalized_handshake for token: {} with id: {:?}", token,id); + let node_id = match id { + Some(id) => id.clone(), + None => { + error!(target: "network", "Tried to register finalized handshake without node id"); + // We have no Node ID, so we can't promote it to a full session mapped by Node ID. + // This might indicate an error state, or a handshake that failed to yield a Node ID. + // For now, we'll just log and return. + return Err(ErrorKind::HostCacheInconsistency.into()); + } + }; + + let mut node_ids_lock = self.node_id_to_session.lock(); + let mut sessions_lock = self.sessions.write(); + let mut handshakes_egress_map = self.handshakes_egress_map.write(); + let mut handshakes_lock = self.handshakes.write(); + + // 1. Try to promote from handshakes map + if let Some(handshake_session) = handshakes_lock.remove(&token) { + // we remove the known handshake here. + // for ingress handshakes, this call doesnt do anything, + // because we can only track egress handshakes. + // but thats fine. + handshakes_egress_map.remove(&node_id); + + // Check session limit before promoting + if sessions_lock.len() >= self.max_sessions { + error!(target: "network", "Failed to promote handshake {}: too many active sessions.", token); + // The handshake session is removed from 'handshakes_lock' but not added to 'sessions_lock'. + // This session will effectively be dropped, and eventually cleaned up by `deregister_session_stream`. + return Err(ErrorKind::TooManyConnections.into()); + } + + // either we reuse an old token, or we create a new token. + let upgraded_token = match node_ids_lock.get_mut(&node_id) { + Some(t) => t.clone(), + None => self.create_token_id(&node_id, &mut node_ids_lock), + }; + + io.register_stream(upgraded_token.clone())?; + session.update_token_id(upgraded_token)?; + + // Move to the sessions map + sessions_lock.insert(upgraded_token, handshake_session.clone()); + + // Register/update the NodeId to session token mapping + if let Some(old_token) = node_ids_lock.insert(node_id.clone(), upgraded_token) { + if old_token != upgraded_token { + debug!(target: "network", "Handshake completed: changed primary session for node id {} from {} to {}", node_id, old_token, upgraded_token); + } + } else { + debug!(target: "network", "Handshake completed: node id {} registered primary session token {}", node_id, upgraded_token); + } + return Ok(upgraded_token); + } else { + return Err(ErrorKind::HostCacheInconsistency.into()); + } + } + + // handles duplicated sessions and desides wich one to be deleted. a duplicated session if it exists in a deterministic way, so both sides agree on the same session to keep. + // returns if this session is marked for deletion, and not being accepted by the SessionContainer. + // see: https://github.com/DMDcoin/diamond-node/issues/252 + pub fn should_delete_duplicate_session(&self, session: &SharedSession) -> Option { + let (node_id, peer_id, uid) = { + let lock = session.lock(); + let peer_id = lock.token().clone(); + + let node_id = match lock.id() { + Some(id) => id.clone(), + None => { + // based on the control flow of the software, this should never happen. + warn!(target: "network", "Tried to delete duplicate session without node id"); + return None; // we have no node id, so we can not delete it. + } + }; + + let uid = match lock.info.session_uid { + Some(u) => u.clone(), + None => { + // based on the control flow of the software, this should never happen. + warn!(target: "network", "Tried to delete duplicate session without session uid"); + return None; // we have no session uid, so we can not delete it. + } + }; + + (node_id, peer_id, uid) + }; + + if let Some(existing_peer_id) = self.node_id_to_peer_id(&node_id, true) { + if existing_peer_id != peer_id { + // there may be an active session for this peer id. + let existing_session = self.get_session_for(&node_id); + if let Some(existing_session_mutex) = existing_session { + let existing_lock = existing_session_mutex.lock(); + if existing_lock.expired() { + // other session is already about to get deleted. + trace!(target:"network", "existing peer session {existing_peer_id} is already about to get deleted."); + return None; + } + if let Some(existing_uid) = existing_lock.info.session_uid { + // the highest RNG wins. + if existing_uid.lt(&uid) { + // we keep the existing session, and delete the new one. + trace!(target: "network", "Session {peer_id} has a duplicate :{existing_peer_id} for {node_id}, deleting this session"); + // we savely mark this connection to get killed softly. + return Some(peer_id); + } else { + trace!(target: "network", "Session {peer_id} has a duplicate :{existing_peer_id} for {node_id}, deleting duplicated session"); + // and delete the existing one. + return Some(existing_peer_id); + } + } + } else { + trace!(target: "network", "No session active for {node_id} with peer id {existing_peer_id}"); + } + + trace!(target: "network", "Session {peer_id} has a duplicate :{existing_peer_id} {node_id}"); + return Some(existing_peer_id); + } + } else { + trace!(target: "network", "No session known for {node_id}"); + } + + return None; + } + + pub(crate) fn deregister_handshake_stream( + &self, + stream: usize, + event_loop: &mut mio::deprecated::EventLoop, + ) { + if stream < self.first_handshake || stream >= self.last_handshake { + warn!(target: "network", "Tried to deregister handshake stream {} but it is out of range.", stream); + return; + } + + if let Some(connection) = self.get_handshake(stream) { + let c = connection.lock(); + if c.expired() { + // make sure it is the same connection that the event was generated for + c.deregister_socket(event_loop) + .expect("Error deregistering socket"); + drop(c); + + self.handshakes.write().remove(&stream); + //RwLockUpgradableReadGuard::<'_, parking_lot::RawRwLock, BTreeMap>>>::upgrade(connections).remove(&stream); + } else { + debug!(target: "network", "Tried to deregister handshake stream {} but it is not expired.", stream); + } + } else { + debug!(target: "network", "Tried to deregister handshake stream {} but it does not exist.", stream); + } + } + + pub(crate) fn deregister_session_stream( + &self, + stream: usize, + + event_loop: &mut mio::deprecated::EventLoop, + ) { + if stream < self.last_handshake + 1 { + warn!(target: "network", "Tried to deregister session stream {} but it is out of range.", stream); + return; + } + + if let Some(connection) = self.get_session(stream) { + let c = connection.lock(); + if c.expired() { + // make sure it is the same connection that the event was generated for + c.deregister_socket(event_loop) + .expect("Error deregistering socket"); + drop(c); + self.sessions.write().remove(&stream); + } else { + debug!(target: "network", "Tried to deregister session stream {} but it is not expired.", stream); + } + } else { + debug!(target: "network", "Tried to deregister session stream {} but it does not exist.", stream); + } + } + + pub(crate) fn get_handshake(&self, stream: usize) -> Option { + trace!(target: "network", "get_handshake for stream: {}. total handshakes: {}", stream, self.handshakes.read().len() ); + self.handshakes.read().get(&stream).cloned() + } + + pub(crate) fn get_session(&self, stream: usize) -> Option { + self.sessions.read().get(&stream).cloned() + } +} diff --git a/crates/net/network-devp2p/tests/tests.rs b/crates/net/network-devp2p/tests/tests.rs index ea501ad310..52533b50bf 100644 --- a/crates/net/network-devp2p/tests/tests.rs +++ b/crates/net/network-devp2p/tests/tests.rs @@ -18,22 +18,22 @@ extern crate env_logger; extern crate ethcore_io as io; extern crate ethcore_network; extern crate ethcore_network_devp2p; -extern crate ethereum_types; +use ethereum_types; extern crate parity_bytes; extern crate parity_crypto as crypto; extern crate parking_lot; +use crate::io::TimerToken; use crypto::publickey::{Generator, Random}; use ethcore_network::*; use ethcore_network_devp2p::NetworkService; use ethereum_types::U64; -use io::TimerToken; use parity_bytes::Bytes; use parking_lot::Mutex; use std::{ sync::{ - atomic::{AtomicBool, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicBool, Ordering as AtomicOrdering}, }, thread, time::*, diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 5cc8a560a5..b6ee75a951 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -1,14 +1,16 @@ [package] description = "Ethcore network library" -homepage = "https://github.com/openethereum/openethereum" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore-network" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] error-chain = { version = "0.12", default-features = false } parity-crypto = { version = "0.6.2", features = [ "publickey"] } +parity-version = { path = "../../../crates/util/version" } ethcore-io = { path = "../../runtime/io" } ethereum-types = "0.9.2" ethkey = { path = "../../../crates/accounts/ethkey" } @@ -20,6 +22,7 @@ parity-snappy = "0.1" semver = {version="0.9.0", features=["serde"]} serde = "1.0" serde_derive = "1.0" +log = "0.4" [dev-dependencies] assert_matches = "1.2" diff --git a/crates/net/network/src/client_version.rs b/crates/net/network/src/client_version.rs index 1de0006dfe..f853e51987 100644 --- a/crates/net/network/src/client_version.rs +++ b/crates/net/network/src/client_version.rs @@ -14,23 +14,15 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -#![warn(missing_docs)] - //! Parse ethereum client ID strings and provide querying functionality -use semver::{Identifier, Version}; +use semver::Version; use std::fmt; /// Parity client string prefix const LEGACY_CLIENT_ID_PREFIX: &str = "Parity-Ethereum"; const CURRENT_CLIENT_ID_PREFIX: &str = "OpenEthereum"; -lazy_static! { -/// Parity versions starting from this will accept block bodies requests -/// of 256 bodies - static ref PARITY_CLIENT_LARGE_REQUESTS_VERSION: Version = Version::parse("2.4.0").unwrap(); -} - /// Description of the software version running in a peer /// according to https://github.com/ethereum/wiki/wiki/Client-Version-Strings /// This structure as it is represents the format used by Parity clients. Other @@ -57,9 +49,6 @@ impl ParityClientData { os: String, compiler: String, ) -> Self { - // Flags logic - let can_handle_large_requests = &semver >= &PARITY_CLIENT_LARGE_REQUESTS_VERSION; - // Instantiate and return ParityClientData { name: name, @@ -67,8 +56,7 @@ impl ParityClientData { semver: semver, os: os, compiler: compiler, - - can_handle_large_requests: can_handle_large_requests, + can_handle_large_requests: true, // all diamond-nodes can handle large requests } } @@ -122,7 +110,7 @@ impl Default for ClientVersion { /// Provide information about what a particular version of a /// peer software can do pub trait ClientCapabilities { - /// Parity versions before PARITY_CLIENT_LARGE_REQUESTS_VERSION would not + /// Old Parity versions would not /// check the accumulated size of a packet when building a response to a /// GET_BLOCK_BODIES request. If the packet was larger than a given limit, /// instead of sending fewer blocks no packet would get sent at all. Query @@ -157,17 +145,7 @@ impl ClientCapabilities for ClientVersion { fn is_hbbft(&self) -> bool { match self { ClientVersion::ParityClient(client) => { - for id in client.semver.pre.iter() { - match id { - Identifier::AlphaNumeric(alpha) => { - if alpha.contains("hbbft") { - return true; - } - } - Identifier::Numeric(_) => {} - } - } - return false; + return client.name() == parity_version::NODE_SOFTWARE_NAME; } ClientVersion::ParityUnknownFormat(_) => false, ClientVersion::Other(_) => false, @@ -179,6 +157,7 @@ impl ClientCapabilities for ClientVersion { fn is_parity(client_id: &str) -> bool { client_id.starts_with(LEGACY_CLIENT_ID_PREFIX) || client_id.starts_with(CURRENT_CLIENT_ID_PREFIX) + || client_id.starts_with(parity_version::NODE_SOFTWARE_NAME) } fn is_nethermind(client_id: &str) -> bool { @@ -391,8 +370,8 @@ pub mod tests { } #[test] - pub fn client_version_when_str_parity_long_format_and_valid_and_identity_multiple_tokens_then_all_fields_match( - ) { + pub fn client_version_when_str_parity_long_format_and_valid_and_identity_multiple_tokens_then_all_fields_match() + { let client_version_string = make_multitoken_identity_long_version_string(); if let ClientVersion::ParityClient(client_version) = @@ -434,8 +413,8 @@ pub mod tests { } #[test] - pub fn client_version_when_parity_format_and_invalid_then_equals_parity_unknown_client_version_string( - ) { + pub fn client_version_when_parity_format_and_invalid_then_equals_parity_unknown_client_version_string() + { // This is invalid because version has no leading 'v' let client_version_string = format!( "{}/{}/{}/{}", @@ -453,8 +432,8 @@ pub mod tests { } #[test] - pub fn client_version_when_parity_format_without_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string( - ) { + pub fn client_version_when_parity_format_without_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string() + { let client_version_string = format!( "{}/v{}/{}", CURRENT_CLIENT_ID_PREFIX, PARITY_CLIENT_SEMVER, PARITY_CLIENT_OS, @@ -468,8 +447,8 @@ pub mod tests { } #[test] - pub fn client_version_when_parity_format_with_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string( - ) { + pub fn client_version_when_parity_format_with_identity_and_missing_compiler_field_then_equals_parity_unknown_client_version_string() + { let client_version_string = format!( "{}/{}/v{}/{}", CURRENT_CLIENT_ID_PREFIX, @@ -515,28 +494,6 @@ pub mod tests { assert_eq!(client_version.to_string(), client_version_string); } - #[test] - pub fn client_capabilities_when_parity_old_version_then_handles_large_requests_false() { - let client_version_string: String = make_old_semver_version_string(); - - let client_version = ClientVersion::from(client_version_string.as_str()); - - assert!(!client_version.can_handle_large_requests()); - } - - #[test] - pub fn client_capabilities_when_parity_beta_version_then_not_handles_large_requests_true() { - let client_version_string: String = format!( - "{}/v{}/{}/{}", - "Parity-Ethereum", "2.4.0-beta", "x86_64-linux-gnu", "rustc1.31.1" - ) - .to_string(); - - let client_version = ClientVersion::from(client_version_string.as_str()); - - assert!(!client_version.can_handle_large_requests()); - } - #[test] pub fn client_version_when_to_owned_then_both_objects_equal() { let client_version_string: String = make_old_semver_version_string(); diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index f37ebd6a82..b460763167 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -18,8 +18,8 @@ // https://github.com/openethereum/openethereum/issues/10302 #![allow(deprecated)] +use crate::io::IoError; use crypto; -use io::IoError; use libc::{EMFILE, ENFILE}; use rlp; use snappy; @@ -160,6 +160,24 @@ error_chain! { display("Too many open files on system. Consider closing some processes/release some file handlers or increas the system-wide resource limits and restart openethereum."), } + #[doc = "A connection to the specified nodeId already exists."] + AlreadyExists { + description("A connection to the specified nodeId already exists."), + display("A connection to the specified nodeId already exists."), + } + + #[doc = "Reached maximum connections"] + TooManyConnections { + description("The maximum number of connections has been reached."), + display("The hardcoded maximum number of connections has been reached on this host."), + } + + #[doc = "A connection to the specified NodeId exists, but there is a mismatch in the host cache."] + HostCacheInconsistency { + description("A connection to the specified nodeId already exists."), + display("A connection to the specified NodeId exists, but there is a mismatch in the host cache."), + } + #[doc = "An unknown IO error occurred."] Io(err: io::Error) { description("IO Error"), diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 1082004a62..c10c0f9f54 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -17,7 +17,7 @@ #![recursion_limit = "128"] extern crate ethcore_io as io; -extern crate ethereum_types; +use ethereum_types::{self, H256}; extern crate ethkey; extern crate ipnetwork; extern crate libc; @@ -37,7 +37,6 @@ extern crate assert_matches; #[macro_use] extern crate error_chain; -#[macro_use] extern crate lazy_static; pub mod client_version; @@ -45,11 +44,13 @@ pub mod client_version; mod connection_filter; mod error; +pub use crate::{ + error::{DisconnectReason, Error, ErrorKind}, + io::TimerToken, +}; pub use connection_filter::{ConnectionDirection, ConnectionFilter}; -pub use error::{DisconnectReason, Error, ErrorKind}; -pub use io::TimerToken; -use client_version::ClientVersion; +use crate::client_version::ClientVersion; use crypto::publickey::Secret; use ethereum_types::{H512, U64}; use ipnetwork::{IpNetwork, IpNetworkError}; @@ -116,9 +117,9 @@ pub struct SessionInfo { /// Peer RLPx protocol version pub protocol_version: u32, /// Session protocol capabilities - pub capabilities: Vec, + capabilities: Vec, /// Peer protocol capabilities - pub peer_capabilities: Vec, + peer_capabilities: Vec, /// Peer ping delay pub ping: Option, /// True if this session was originated by us. @@ -127,6 +128,61 @@ pub struct SessionInfo { pub remote_address: String, /// Local endpoint address of the session pub local_address: String, + + /// A unique identifier that is the same on both sessions endpoints after the handshake is completed. + /// it is the XOR of the Nonces for the handshake that initialized this Session. + pub session_uid: Option, + + /// peer is capable of doing EIP 2464 transaction gossiping: https://eips.ethereum.org/EIPS/eip-2464 + is_pooled_transactions_capable: bool, +} + +impl SessionInfo { + /// new, SessionInfo that did not handshake yet. + pub fn new(id: Option<&NodeId>, local_addr: String, originated: bool) -> Self { + return Self { + id: id.cloned(), + client_version: ClientVersion::from(""), + protocol_version: 0, + capabilities: Vec::new(), + peer_capabilities: Vec::new(), + ping: None, + originated, + remote_address: "Handshake".to_owned(), + local_address: local_addr, + is_pooled_transactions_capable: false, // we don't know yet, we will know once we get the capabilities + session_uid: None, // session-uid is set after the handshake has completed. + }; + } + + /// on handshake, we get the peer id and the client version. + pub fn set_capabilities( + &mut self, + session_capabilities: Vec, + peer_capabilities: Vec, + ) { + self.capabilities = session_capabilities; + self.peer_capabilities = peer_capabilities; + + // ETH_PROTOCOL_VERSION_65 + self.is_pooled_transactions_capable = self + .peer_capabilities + .iter() + .any(|x| x.protocol.low_u64() == 0x657468 /* hex for "eth" */ && x.version == 65); + } + + pub fn capabilities(&self) -> &Vec { + &self.capabilities + } + + pub fn peer_capabilities(&self) -> &Vec { + &self.peer_capabilities + } + + /// Returns if the peer is capable of doing EIP 2464 transaction gossiping: https://eips.ethereum.org/EIPS/eip-2464 + pub fn is_pooled_transactions_capable(&self) -> bool { + return self.is_pooled_transactions_capable; + } } #[derive(Debug, Clone, PartialEq, Eq)] @@ -316,7 +372,7 @@ pub trait NetworkContext { fn is_reserved_peer(&self, peer: PeerId) -> bool; /// Returns the peer ID for a given node id, if a corresponding peer exists. - fn node_id_to_peer_id(&self, node_id: NodeId) -> Option; + fn node_id_to_peer_id(&self, node_id: &NodeId) -> Option; } impl<'a, T> NetworkContext for &'a T @@ -377,8 +433,8 @@ where (**self).is_reserved_peer(peer) } - fn node_id_to_peer_id(&self, node_id: NodeId) -> Option { - (**self).node_id_to_peer_id(node_id) + fn node_id_to_peer_id(&self, node_id: &NodeId) -> Option { + (**self).node_id_to_peer_id(&node_id) } } diff --git a/crates/net/node-filter/Cargo.toml b/crates/net/node-filter/Cargo.toml index 28d8b53211..3d65751f06 100644 --- a/crates/net/node-filter/Cargo.toml +++ b/crates/net/node-filter/Cargo.toml @@ -1,10 +1,11 @@ [package] -description = "OpenEthereum Smart Contract based Node Filter, Manage Permissions of Network Connections" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node Smart Contract based Node Filter, Manage Permissions of Network Connections" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "node-filter" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] ethcore = { path = "../../ethcore"} @@ -12,10 +13,10 @@ ethcore-network = { path = "../network" } ethcore-network-devp2p = { path = "../network-devp2p" } ethereum-types = "0.9.2" log = "0.4" -parking_lot = "0.11.1" +parking_lot = "0.12" ethabi = "12.0.0" ethabi-derive = { git = 'https://github.com/rimrakhimov/ethabi', branch = 'rimrakhimov/remove-syn-export-span' } -ethabi-contract = "11.0.0" +ethabi-contract = "16.0.0" lru-cache = "0.1" [dev-dependencies] diff --git a/crates/net/node-filter/src/lib.rs b/crates/net/node-filter/src/lib.rs index 6d72290a11..cd2b55110b 100644 --- a/crates/net/node-filter/src/lib.rs +++ b/crates/net/node-filter/src/lib.rs @@ -20,7 +20,7 @@ extern crate ethabi; extern crate ethcore; extern crate ethcore_network as network; extern crate ethcore_network_devp2p as devp2p; -extern crate ethereum_types; +use ethereum_types; extern crate lru_cache; extern crate parking_lot; @@ -97,14 +97,15 @@ impl ConnectionFilter for NodeFilter { #[cfg(test)] mod test { use super::NodeFilter; + use crate::io::IoChannel; use ethcore::{ client::{BlockChainClient, Client, ClientConfig}, + exit::ShutdownManager, miner::Miner, spec::Spec, test_helpers, }; use ethereum_types::Address; - use io::IoChannel; use network::{ConnectionDirection, ConnectionFilter, NodeId}; use std::{ str::FromStr, @@ -127,6 +128,7 @@ mod test { client_db, Arc::new(Miner::new_for_tests(&spec, None)), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let filter = NodeFilter::new( diff --git a/crates/rpc-common/Cargo.toml b/crates/rpc-common/Cargo.toml index 2105b8793e..e9cd18850f 100644 --- a/crates/rpc-common/Cargo.toml +++ b/crates/rpc-common/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "oe-rpc-common" version = "0.0.0" -edition = "2021" +edition = "2018" description = "Modules common to RPC APIs" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/crates/rpc-common/src/lib.rs b/crates/rpc-common/src/lib.rs index e14cfa3377..5fe37a0dbf 100644 --- a/crates/rpc-common/src/lib.rs +++ b/crates/rpc-common/src/lib.rs @@ -16,4 +16,4 @@ mod types; -pub use types::bytes::Bytes; +pub use crate::types::bytes::Bytes; diff --git a/crates/rpc-servers/Cargo.toml b/crates/rpc-servers/Cargo.toml index 8c90f86c38..e3c3fe70f6 100644 --- a/crates/rpc-servers/Cargo.toml +++ b/crates/rpc-servers/Cargo.toml @@ -1,9 +1,9 @@ [package] -description = "OpenEthereum RPC servers (WS, HTTP, IPC)" +description = "diamond-node RPC servers (WS, HTTP, IPC)" name = "oe-rpc-servers" version = "0.0.0" license = "GPL-3.0" -edition = "2021" +edition = "2018" [lib] diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 0ab40ff95f..b3cb4bd87b 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -4,6 +4,7 @@ name = "parity-rpc" version = "1.12.0" license = "GPL-3.0" authors = ["Parity Technologies "] +edition = "2024" [lib] @@ -12,7 +13,7 @@ ansi_term = "0.10" futures = "0.1.6" log = "0.4" order-stat = "0.1" -parking_lot = "0.11.1" +parking_lot = "0.12" rand = "0.7.3" rand_xorshift = "0.2.0" rustc-hex = "1.0" @@ -72,3 +73,4 @@ tempdir = "0.3.7" [features] accounts = ["ethcore-accounts"] +test-helpers = [] \ No newline at end of file diff --git a/crates/rpc/src/authcodes.rs b/crates/rpc/src/authcodes.rs index 97240cae66..e7713c3820 100644 --- a/crates/rpc/src/authcodes.rs +++ b/crates/rpc/src/authcodes.rs @@ -25,7 +25,7 @@ use std::{ use ethereum_types::H256; use hash::keccak; use itertools::Itertools; -use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; +use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; /// Providing current time in seconds pub trait TimeProvider { diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 64eb4b9a65..278f138513 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -17,81 +17,47 @@ //! OpenEthereum JSON-RPC Servers (WS, HTTP, IPC). #![warn(missing_docs, unused_extern_crates)] -#![cfg_attr(feature = "cargo-clippy", warn(clippy::all, clippy::pedantic))] -#![cfg_attr( - feature = "cargo-clippy", - allow( - // things are often more readable this way - clippy::cast_lossless, - clippy::module_name_repetitions, - clippy::single_match_else, - clippy::type_complexity, - clippy::use_self, - // not practical - clippy::match_bool, - clippy::needless_pass_by_value, - clippy::similar_names, - // don't require markdown syntax for docs - clippy::doc_markdown, - ), - warn(clippy::indexing_slicing) +#![warn(clippy::all, clippy::pedantic)] +#![allow( + // things are often more readable this way + clippy::cast_lossless, + clippy::module_name_repetitions, + clippy::single_match_else, + clippy::type_complexity, + clippy::use_self, + // not practical + clippy::match_bool, + clippy::needless_pass_by_value, + clippy::similar_names, + // don't require markdown syntax for docs + clippy::doc_markdown, )] +#![warn(clippy::indexing_slicing)] #[macro_use] extern crate futures; -extern crate ansi_term; -extern crate itertools; -extern crate order_stat; -extern crate parking_lot; -extern crate rand; -extern crate rustc_hex; -extern crate serde; -extern crate serde_json; -extern crate tokio_timer; -extern crate transient_hashmap; - -extern crate jsonrpc_core; -extern crate jsonrpc_derive; extern crate jsonrpc_http_server as http; extern crate jsonrpc_ipc_server as ipc; -extern crate jsonrpc_pubsub; +use jsonrpc_pubsub; extern crate common_types as types; -extern crate eip_712; -extern crate ethash; -extern crate ethcore; -extern crate ethcore_logger; extern crate ethcore_miner as miner; extern crate ethcore_network as network; extern crate ethcore_sync as sync; -extern crate ethereum_types; -extern crate ethkey; -extern crate ethstore; -extern crate fetch; extern crate keccak_hash as hash; extern crate parity_bytes as bytes; extern crate parity_crypto as crypto; -extern crate parity_runtime; extern crate parity_version as version; -extern crate rlp; -extern crate stats; -extern crate vm; #[cfg(any(test, feature = "ethcore-accounts"))] extern crate ethcore_accounts as accounts; -#[cfg(any(test, feature = "ethcore-accounts"))] -extern crate tiny_keccak; - #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; -#[cfg(test)] -extern crate ethjson; - #[cfg(test)] #[macro_use] extern crate pretty_assertions; @@ -100,23 +66,12 @@ extern crate pretty_assertions; #[macro_use] extern crate macros; -#[cfg(test)] -extern crate fake_fetch; - #[cfg(test)] extern crate ethcore_io as io; #[cfg(test)] extern crate ethcore_call_contract as call_contract; -// #[cfg(test)] -extern crate tempdir; - -#[cfg(test)] -extern crate rpc_servers; - -extern crate rpc_common; - pub extern crate jsonrpc_ws_server as ws; mod authcodes; @@ -126,20 +81,23 @@ pub mod v1; pub mod tests; pub use http::{ - cors::AccessControlAllowHeaders, hyper, AccessControlAllowOrigin, DomainsValidation, Host, - RequestMiddleware, RequestMiddlewareAction, + AccessControlAllowOrigin, DomainsValidation, Host, RequestMiddleware, RequestMiddlewareAction, + cors::AccessControlAllowHeaders, hyper, }; pub use ipc::{ MetaExtractor as IpcMetaExtractor, RequestContext as IpcRequestContext, Server as IpcServer, }; pub use jsonrpc_pubsub::Session as PubSubSession; -pub use authcodes::{AuthCodes, TimeProvider}; -pub use v1::{ - block_import::{is_major_importing, is_major_importing_or_waiting}, - dispatch, - extractors::{RpcExtractor, WsDispatcher, WsExtractor, WsStats}, - informant, signer, Metadata, NetworkSettings, Origin, +pub use crate::{ + authcodes::{AuthCodes, TimeProvider}, + v1::{ + Metadata, NetworkSettings, Origin, + block_import::{is_major_importing, is_major_importing_or_waiting}, + dispatch, + extractors::{RpcExtractor, WsDispatcher, WsExtractor, WsStats}, + informant, signer, + }, }; /// RPC HTTP Server instance diff --git a/crates/rpc/src/tests/helpers.rs b/crates/rpc/src/tests/helpers.rs index 923b5a92c8..92d940bf7a 100644 --- a/crates/rpc/src/tests/helpers.rs +++ b/crates/rpc/src/tests/helpers.rs @@ -22,7 +22,7 @@ use tempdir::TempDir; use parity_runtime::{Runtime, TaskExecutor}; -use authcodes::AuthCodes; +use crate::authcodes::AuthCodes; /// Server with event loop pub struct Server { diff --git a/crates/rpc/src/tests/rpc.rs b/crates/rpc/src/tests/rpc.rs index 53935aa5a9..4183c1632a 100644 --- a/crates/rpc/src/tests/rpc.rs +++ b/crates/rpc/src/tests/rpc.rs @@ -21,10 +21,10 @@ use http::{self, hyper}; use rpc_servers::{HttpServer, MetaIoHandler}; #[cfg(any(test, feature = "test-helpers"))] -use tests::{helpers::Server, http_client}; +use crate::tests::{helpers::Server, http_client}; #[cfg(any(test, feature = "test-helpers"))] -use v1::{extractors, Metadata}; +use crate::v1::{Metadata, extractors}; #[cfg(any(test, feature = "test-helpers"))] fn serve(handler: Option>) -> Server { @@ -58,11 +58,11 @@ fn request(server: Server, request: &str) -> http_client::Response { #[cfg(test)] mod tests { - use super::{request, Server}; + use super::{Server, request}; + use crate::v1::Metadata; use jsonrpc_core::{MetaIoHandler, Value}; - use v1::Metadata; - fn serve() -> (Server<::HttpServer>, ::std::net::SocketAddr) { + fn serve() -> (Server, ::std::net::SocketAddr) { let mut io = MetaIoHandler::default(); io.add_method_with_meta("hello", |_, meta: Metadata| { Ok(Value::String(format!("{}", meta.origin))) diff --git a/crates/rpc/src/tests/ws.rs b/crates/rpc/src/tests/ws.rs index fb922df42f..f0a74c6bc7 100644 --- a/crates/rpc/src/tests/ws.rs +++ b/crates/rpc/src/tests/ws.rs @@ -21,11 +21,13 @@ use std::sync::Arc; use jsonrpc_core::MetaIoHandler; use ws; -use tests::{ - helpers::{GuardedAuthCodes, Server}, - http_client, +use crate::{ + tests::{ + helpers::{GuardedAuthCodes, Server}, + http_client, + }, + v1::{extractors, informant}, }; -use v1::{extractors, informant}; /// Setup a mock signer for tests pub fn serve() -> (Server, usize, GuardedAuthCodes) { diff --git a/crates/rpc/src/v1/extractors.rs b/crates/rpc/src/v1/extractors.rs index bd7bb05ff4..166931504b 100644 --- a/crates/rpc/src/v1/extractors.rs +++ b/crates/rpc/src/v1/extractors.rs @@ -21,7 +21,7 @@ use std::{ sync::Arc, }; -use authcodes; +use crate::authcodes; use ethereum_types::H256; use http::hyper; use ipc; @@ -30,7 +30,7 @@ use jsonrpc_core::futures::future::Either; use jsonrpc_pubsub::Session; use ws; -use v1::{informant::RpcStats, Metadata, Origin}; +use crate::v1::{Metadata, Origin, informant::RpcStats}; /// Common HTTP & IPC metadata extractor. pub struct RpcExtractor; @@ -184,11 +184,7 @@ fn auth_token_hash(codes_path: &Path, protocol: &str, save_file: bool) -> Option } } - if res { - Some(auth) - } else { - None - } + if res { Some(auth) } else { None } }); } @@ -261,11 +257,11 @@ impl> core::Middleware for WsDispatcher< #[cfg(test)] mod tests { use super::RpcExtractor; + use crate::Origin; use http::{ - hyper::{Body, Request}, MetaExtractor, + hyper::{Body, Request}, }; - use Origin; #[test] fn should_extract_rpc_origin() { diff --git a/crates/rpc/src/v1/helpers/dispatch/full.rs b/crates/rpc/src/v1/helpers/dispatch/full.rs index 30daae0859..516e6d40ac 100644 --- a/crates/rpc/src/v1/helpers/dispatch/full.rs +++ b/crates/rpc/src/v1/helpers/dispatch/full.rs @@ -16,26 +16,26 @@ use std::sync::Arc; +use crate::types::transaction::{PendingTransaction, SignedTransaction}; use ethcore::{ client::BlockChainClient, miner::{self, MinerService}, }; use ethereum_types::{Address, H256, U256}; use parking_lot::Mutex; -use types::transaction::{PendingTransaction, SignedTransaction}; +use crate::v1::{ + helpers::{FilledTransactionRequest, TransactionRequest, errors, nonce}, + types::RichRawTransaction as RpcRichRawTransaction, +}; use jsonrpc_core::{ - futures::{future, Future, IntoFuture}, BoxFuture, Result, -}; -use v1::{ - helpers::{errors, nonce, FilledTransactionRequest, TransactionRequest}, - types::RichRawTransaction as RpcRichRawTransaction, + futures::{Future, IntoFuture, future}, }; use super::{ - default_gas_price, prospective_signer::ProspectiveSigner, Accounts, Dispatcher, PostSign, - SignWith, + Accounts, Dispatcher, PostSign, SignWith, default_gas_price, + prospective_signer::ProspectiveSigner, }; /// A dispatcher which uses references to a client and miner in order to sign diff --git a/crates/rpc/src/v1/helpers/dispatch/mod.rs b/crates/rpc/src/v1/helpers/dispatch/mod.rs index 34b77b04d9..7f7a01807b 100644 --- a/crates/rpc/src/v1/helpers/dispatch/mod.rs +++ b/crates/rpc/src/v1/helpers/dispatch/mod.rs @@ -24,7 +24,7 @@ mod signing; #[cfg(not(any(test, feature = "accounts")))] mod signing { use super::*; - use v1::helpers::errors; + use crate::v1::helpers::errors; /// Dummy signer implementation #[derive(Debug, Clone)] @@ -81,26 +81,22 @@ mod signing { } pub use self::{full::FullDispatcher, signing::Signer}; -pub use v1::helpers::nonce::Reservations; +pub use crate::v1::helpers::nonce::Reservations; use std::{fmt::Debug, ops::Deref, sync::Arc}; +use crate::types::{ + BlockNumber, + transaction::{PendingTransaction, SignedTransaction}, +}; use bytes::Bytes; use crypto::publickey::Signature; use ethcore::{client::BlockChainClient, miner::MinerService}; use ethereum_types::{Address, H256, H520, U256}; use ethkey::Password; use hash::keccak; -use types::{ - transaction::{PendingTransaction, SignedTransaction}, - BlockNumber, -}; -use jsonrpc_core::{ - futures::{future, Future, IntoFuture}, - BoxFuture, Error, Result, -}; -use v1::{ +use crate::v1::{ helpers::{ConfirmationPayload, FilledTransactionRequest, TransactionRequest}, types::{ Bytes as RpcBytes, ConfirmationPayload as RpcConfirmationPayload, ConfirmationResponse, @@ -108,6 +104,10 @@ use v1::{ EthSignRequest as RpcEthSignRequest, RichRawTransaction as RpcRichRawTransaction, }, }; +use jsonrpc_core::{ + BoxFuture, Error, Result, + futures::{Future, IntoFuture, future}, +}; /// Has the capability to dispatch, sign, and decrypt. /// diff --git a/crates/rpc/src/v1/helpers/dispatch/prospective_signer.rs b/crates/rpc/src/v1/helpers/dispatch/prospective_signer.rs index 8768ad6413..7bb57da1c5 100644 --- a/crates/rpc/src/v1/helpers/dispatch/prospective_signer.rs +++ b/crates/rpc/src/v1/helpers/dispatch/prospective_signer.rs @@ -16,15 +16,15 @@ use std::sync::Arc; +use crate::types::transaction::SignedTransaction; use ethereum_types::U256; use jsonrpc_core::{ - futures::{Async, Future, IntoFuture, Poll}, Error, Result, + futures::{Async, Future, IntoFuture, Poll}, }; -use types::transaction::SignedTransaction; use super::{Accounts, PostSign, SignWith, WithToken}; -use v1::helpers::{errors, nonce, FilledTransactionRequest}; +use crate::v1::helpers::{FilledTransactionRequest, errors, nonce}; #[derive(Debug, Clone, Copy)] enum ProspectiveSignerState { diff --git a/crates/rpc/src/v1/helpers/dispatch/signing.rs b/crates/rpc/src/v1/helpers/dispatch/signing.rs index 03f244ba76..e69be7c082 100644 --- a/crates/rpc/src/v1/helpers/dispatch/signing.rs +++ b/crates/rpc/src/v1/helpers/dispatch/signing.rs @@ -16,20 +16,20 @@ use std::sync::Arc; +use crate::types::transaction::{ + AccessListTx, Action, EIP1559TransactionTx, SignedTransaction, Transaction, TypedTransaction, + TypedTxId, +}; use accounts::AccountProvider; use bytes::Bytes; -use crypto::{publickey::Signature, DEFAULT_MAC}; +use crypto::{DEFAULT_MAC, publickey::Signature}; use ethereum_types::{Address, H256, U256}; use jsonrpc_core::{Error, ErrorCode}; -use types::transaction::{ - AccessListTx, Action, EIP1559TransactionTx, SignedTransaction, Transaction, TypedTransaction, - TypedTxId, -}; +use crate::v1::helpers::{FilledTransactionRequest, errors}; use jsonrpc_core::Result; -use v1::helpers::{errors, FilledTransactionRequest}; -use super::{eth_data_hash, SignMessage, SignWith, WithToken}; +use super::{SignMessage, SignWith, WithToken, eth_data_hash}; /// Account-aware signer pub struct Signer { diff --git a/crates/rpc/src/v1/helpers/eip191.rs b/crates/rpc/src/v1/helpers/eip191.rs index 51bd1580d6..00365b6e58 100644 --- a/crates/rpc/src/v1/helpers/eip191.rs +++ b/crates/rpc/src/v1/helpers/eip191.rs @@ -15,16 +15,16 @@ // along with OpenEthereum. If not, see . //! EIP-191 compliant decoding + hashing -use eip_712::{hash_structured_data, EIP712}; +use crate::v1::{ + helpers::{dispatch::eth_data_hash, errors}, + types::{Bytes, EIP191Version, PresignedTransaction}, +}; +use eip_712::{EIP712, hash_structured_data}; use ethereum_types::H256; use hash::keccak; use jsonrpc_core::Error; -use serde_json::{from_value, Value}; +use serde_json::{Value, from_value}; use std::fmt::Display; -use v1::{ - helpers::{dispatch::eth_data_hash, errors}, - types::{Bytes, EIP191Version, PresignedTransaction}, -}; /// deserializes and hashes the message depending on the version specifier pub fn hash_message(version: EIP191Version, message: Value) -> Result { @@ -33,7 +33,7 @@ pub fn hash_message(version: EIP191Version, message: Value) -> Result(message).map_err(map_serde_err("StructuredData"))?; - hash_structured_data(typed_data).map_err(|err| errors::invalid_call_data(err.kind()))? + hash_structured_data(typed_data).map_err(|err| errors::invalid_call_data(err))? } EIP191Version::PresignedTransaction => { diff --git a/crates/rpc/src/v1/helpers/errors.rs b/crates/rpc/src/v1/helpers/errors.rs index 94585e03c8..d67b4d8094 100644 --- a/crates/rpc/src/v1/helpers/errors.rs +++ b/crates/rpc/src/v1/helpers/errors.rs @@ -18,14 +18,16 @@ use std::fmt; +use crate::{ + types::{blockchain_info::BlockChainInfo, transaction::Error as TransactionError}, + v1::{impls::EthClientOptions, types::BlockNumber}, +}; use ethcore::{ client::{BlockChainClient, BlockId}, error::{CallError, Error as EthcoreError, ErrorKind}, }; use jsonrpc_core::{Error, ErrorCode, Result as RpcResult, Value}; use rlp::DecoderError; -use types::{blockchain_info::BlockChainInfo, transaction::Error as TransactionError}; -use v1::{impls::EthClientOptions, types::BlockNumber}; use vm::Error as VMError; mod codes { @@ -532,10 +534,16 @@ pub fn require_experimental(allow_experimental_rpcs: bool, eip: &str) -> Result< Ok(()) } else { Err(Error { - code: ErrorCode::ServerError(codes::EXPERIMENTAL_RPC), - message: format!("This method is not part of the official RPC API yet (EIP-{}). Run with `--jsonrpc-experimental` to enable it.", eip), - data: Some(Value::String(format!("See EIP: https://eips.ethereum.org/EIPS/eip-{}", eip))), - }) + code: ErrorCode::ServerError(codes::EXPERIMENTAL_RPC), + message: format!( + "This method is not part of the official RPC API yet (EIP-{}). Run with `--jsonrpc-experimental` to enable it.", + eip + ), + data: Some(Value::String(format!( + "See EIP: https://eips.ethereum.org/EIPS/eip-{}", + eip + ))), + }) } } diff --git a/crates/rpc/src/v1/helpers/external_signer/oneshot.rs b/crates/rpc/src/v1/helpers/external_signer/oneshot.rs index 948951aad3..f3a975ae01 100644 --- a/crates/rpc/src/v1/helpers/external_signer/oneshot.rs +++ b/crates/rpc/src/v1/helpers/external_signer/oneshot.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::v1::helpers::errors; use jsonrpc_core::{ - futures::{self, sync::oneshot, Future}, Error, + futures::{self, Future, sync::oneshot}, }; -use v1::helpers::errors; pub type Res = Result; diff --git a/crates/rpc/src/v1/helpers/external_signer/signing_queue.rs b/crates/rpc/src/v1/helpers/external_signer/signing_queue.rs index 23ddb4a2b5..ce09b59fa6 100644 --- a/crates/rpc/src/v1/helpers/external_signer/signing_queue.rs +++ b/crates/rpc/src/v1/helpers/external_signer/signing_queue.rs @@ -17,15 +17,15 @@ use std::collections::BTreeMap; use super::oneshot; -use ethereum_types::U256; -use parking_lot::{Mutex, RwLock}; -use v1::{ +use crate::v1::{ helpers::{ errors, requests::{ConfirmationPayload, ConfirmationRequest}, }, types::{ConfirmationResponse, Origin}, }; +use ethereum_types::U256; +use parking_lot::{Mutex, RwLock}; use jsonrpc_core::Error; @@ -88,6 +88,7 @@ pub trait SigningQueue: Send + Sync { fn len(&self) -> usize; /// Returns true if there are no requests awaiting confirmation. + #[allow(dead_code)] fn is_empty(&self) -> bool; } @@ -242,17 +243,17 @@ impl SigningQueue for ConfirmationsQueue { #[cfg(test)] mod test { - use ethereum_types::{Address, H256, U256}; - use jsonrpc_core::futures::Future; - use parking_lot::Mutex; - use std::sync::Arc; - use v1::{ + use crate::v1::{ helpers::{ - external_signer::{ConfirmationsQueue, QueueEvent, SigningQueue}, ConfirmationPayload, FilledTransactionRequest, + external_signer::{ConfirmationsQueue, QueueEvent, SigningQueue}, }, types::ConfirmationResponse, }; + use ethereum_types::{Address, H256, U256}; + use jsonrpc_core::futures::Future; + use parking_lot::Mutex; + use std::sync::Arc; fn request() -> ConfirmationPayload { ConfirmationPayload::SendTransaction(FilledTransactionRequest { diff --git a/crates/rpc/src/v1/helpers/fake_sign.rs b/crates/rpc/src/v1/helpers/fake_sign.rs index 70f4d66734..58f6fd7e0d 100644 --- a/crates/rpc/src/v1/helpers/fake_sign.rs +++ b/crates/rpc/src/v1/helpers/fake_sign.rs @@ -14,15 +14,15 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use std::cmp::min; -use types::transaction::{ +use crate::types::transaction::{ AccessListTx, Action, EIP1559TransactionTx, SignedTransaction, Transaction, TypedTransaction, TypedTxId, }; +use std::cmp::min; +use crate::v1::helpers::CallRequest; use ethereum_types::U256; use jsonrpc_core::{Error, ErrorCode}; -use v1::helpers::CallRequest; pub fn sign_call(request: CallRequest) -> Result { let max_gas = U256::from(500_000_000); diff --git a/crates/rpc/src/v1/helpers/mod.rs b/crates/rpc/src/v1/helpers/mod.rs index 686ec77387..5091e18df6 100644 --- a/crates/rpc/src/v1/helpers/mod.rs +++ b/crates/rpc/src/v1/helpers/mod.rs @@ -40,9 +40,8 @@ mod subscription_manager; mod work; pub use self::{ - dispatch::{Dispatcher, FullDispatcher}, network_settings::NetworkSettings, - poll_filter::{limit_logs, PollFilter, SyncPollFilter}, + poll_filter::{PollFilter, SyncPollFilter, limit_logs}, poll_manager::PollManager, requests::{ CallRequest, ConfirmationPayload, ConfirmationRequest, FilledTransactionRequest, @@ -54,6 +53,6 @@ pub use self::{ work::submit_work_detail, }; -pub fn to_url(address: &Option<::Host>) -> Option { +pub fn to_url(address: &Option) -> Option { address.as_ref().map(|host| (**host).to_owned()) } diff --git a/crates/rpc/src/v1/helpers/nonce.rs b/crates/rpc/src/v1/helpers/nonce.rs index 47d9370294..a7ccbe401d 100644 --- a/crates/rpc/src/v1/helpers/nonce.rs +++ b/crates/rpc/src/v1/helpers/nonce.rs @@ -19,14 +19,13 @@ use std::{ collections::HashMap, mem, sync::{ - atomic, + Arc, atomic, atomic::{AtomicBool, AtomicUsize}, - Arc, }, }; use ethereum_types::{Address, U256}; -use futures::{future, future::Either, sync::oneshot, Async, Future, Poll}; +use futures::{Async, Future, Poll, future, future::Either, sync::oneshot}; use parity_runtime::Executor; /// Manages currently reserved and prospective nonces diff --git a/crates/rpc/src/v1/helpers/poll_filter.rs b/crates/rpc/src/v1/helpers/poll_filter.rs index 2c373399eb..5db6f128e3 100644 --- a/crates/rpc/src/v1/helpers/poll_filter.rs +++ b/crates/rpc/src/v1/helpers/poll_filter.rs @@ -16,14 +16,13 @@ //! Helper type with all filter state data. +use crate::{types::filter::Filter, v1::types::Log}; use ethereum_types::H256; use parking_lot::Mutex; use std::{ collections::{BTreeSet, HashSet, VecDeque}, sync::Arc, }; -use types::filter::Filter; -use v1::types::Log; pub type BlockNumber = u64; @@ -68,7 +67,7 @@ pub enum PollFilter { } impl PollFilter { - pub(in v1) const MAX_BLOCK_HISTORY_SIZE: usize = 32; + pub(in crate::v1) const MAX_BLOCK_HISTORY_SIZE: usize = 32; } /// Returns only last `n` logs diff --git a/crates/rpc/src/v1/helpers/poll_manager.rs b/crates/rpc/src/v1/helpers/poll_manager.rs index 0448440353..46983a9138 100644 --- a/crates/rpc/src/v1/helpers/poll_manager.rs +++ b/crates/rpc/src/v1/helpers/poll_manager.rs @@ -83,9 +83,9 @@ where #[cfg(test)] mod tests { + use crate::v1::helpers::PollManager; use std::cell::Cell; use transient_hashmap::Timer; - use v1::helpers::PollManager; struct TestTimer<'a> { time: &'a Cell, diff --git a/crates/rpc/src/v1/helpers/requests.rs b/crates/rpc/src/v1/helpers/requests.rs index e55644238b..ca84ae371e 100644 --- a/crates/rpc/src/v1/helpers/requests.rs +++ b/crates/rpc/src/v1/helpers/requests.rs @@ -15,9 +15,9 @@ // along with OpenEthereum. If not, see . use bytes::Bytes; -use ethereum_types::{Address, H256, U256, U64}; +use ethereum_types::{Address, H256, U64, U256}; -use v1::types::{AccessList, Origin, TransactionCondition}; +use crate::v1::types::{AccessList, Origin, TransactionCondition}; /// Transaction request coming from RPC #[derive(Debug, Clone, Default, Eq, PartialEq, Hash)] diff --git a/crates/rpc/src/v1/helpers/secretstore.rs b/crates/rpc/src/v1/helpers/secretstore.rs index 35df732473..f29171583b 100644 --- a/crates/rpc/src/v1/helpers/secretstore.rs +++ b/crates/rpc/src/v1/helpers/secretstore.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::v1::{helpers::errors, types::EncryptedDocumentKey}; use bytes::Bytes; -use crypto::publickey::{self, ec_math_utils, Generator, Public, Random, Secret}; +use crypto::publickey::{self, Generator, Public, Random, Secret, ec_math_utils}; use ethereum_types::{H256, H512}; use jsonrpc_core::Error; -use rand::{rngs::OsRng, RngCore}; +use rand::{RngCore, rngs::OsRng}; use std::collections::BTreeSet; use tiny_keccak::Keccak; -use v1::{helpers::errors, types::EncryptedDocumentKey}; /// Initialization vector length. const INIT_VEC_LEN: usize = 16; diff --git a/crates/rpc/src/v1/helpers/signature.rs b/crates/rpc/src/v1/helpers/signature.rs index c7844aac30..83fe5fa2bc 100644 --- a/crates/rpc/src/v1/helpers/signature.rs +++ b/crates/rpc/src/v1/helpers/signature.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use crypto::publickey::{public_to_address, recover, Signature}; -use ethereum_types::{H256, U64}; -use hash::keccak; -use jsonrpc_core::Result; -use v1::{ +use crate::v1::{ helpers::{dispatch::eth_data_hash, errors}, types::{Bytes, RecoveredAccount}, }; +use crypto::publickey::{Signature, public_to_address, recover}; +use ethereum_types::{H256, U64}; +use hash::keccak; +use jsonrpc_core::Result; /// helper method for parity_verifySignature pub fn verify_signature( diff --git a/crates/rpc/src/v1/helpers/subscribers.rs b/crates/rpc/src/v1/helpers/subscribers.rs index 2dac203a44..283b555041 100644 --- a/crates/rpc/src/v1/helpers/subscribers.rs +++ b/crates/rpc/src/v1/helpers/subscribers.rs @@ -18,8 +18,8 @@ use ethereum_types::H64; use jsonrpc_pubsub::{ - typed::{Sink, Subscriber}, SubscriptionId, + typed::{Sink, Subscriber}, }; use std::{collections::HashMap, ops, str}; @@ -54,9 +54,8 @@ mod random { #[cfg(test)] mod random { - extern crate rand_xorshift; - use self::rand_xorshift::XorShiftRng; use rand::SeedableRng; + use rand_xorshift::XorShiftRng; const RNG_SEED: [u8; 16] = [0u8; 16]; pub type Rng = XorShiftRng; pub fn new() -> Rng { diff --git a/crates/rpc/src/v1/helpers/subscription_manager.rs b/crates/rpc/src/v1/helpers/subscription_manager.rs index 5fa45ee7d6..88dc894cd9 100644 --- a/crates/rpc/src/v1/helpers/subscription_manager.rs +++ b/crates/rpc/src/v1/helpers/subscription_manager.rs @@ -18,22 +18,21 @@ use parking_lot::Mutex; use std::sync::{ - atomic::{self, AtomicBool}, Arc, + atomic::{self, AtomicBool}, }; use jsonrpc_core::{ - self as core, + self as core, MetaIoHandler, futures::{ + Future, Sink, future::{self, Either}, sync::mpsc, - Future, Sink, }, - MetaIoHandler, }; use jsonrpc_pubsub::SubscriptionId; -use v1::{helpers::Subscribers, metadata::Metadata}; +use crate::v1::{helpers::Subscribers, metadata::Metadata}; #[derive(Debug)] struct Subscription { @@ -160,8 +159,8 @@ mod tests { use http::tokio::runtime::Runtime; use jsonrpc_core::{ - futures::{Future, Stream}, MetaIoHandler, NoopMiddleware, Params, Value, + futures::{Future, Stream}, }; use jsonrpc_pubsub::SubscriptionId; diff --git a/crates/rpc/src/v1/helpers/work.rs b/crates/rpc/src/v1/helpers/work.rs index 9f9bb5df38..6a0eeeb96e 100644 --- a/crates/rpc/src/v1/helpers/work.rs +++ b/crates/rpc/src/v1/helpers/work.rs @@ -18,11 +18,11 @@ use std::sync::Arc; +use crate::v1::helpers::errors; use ethcore::miner::{BlockChainClient, MinerService}; -use ethereum_types::{H256, H64}; +use ethereum_types::{H64, H256}; use jsonrpc_core::Error; use rlp; -use v1::helpers::errors; // Submit a POW work and return the block's hash pub fn submit_work_detail( diff --git a/crates/rpc/src/v1/impls/debug.rs b/crates/rpc/src/v1/impls/debug.rs index d2998d6243..1425b26d9e 100644 --- a/crates/rpc/src/v1/impls/debug.rs +++ b/crates/rpc/src/v1/impls/debug.rs @@ -18,14 +18,14 @@ use std::sync::Arc; +use crate::types::{header::Header, transaction::LocalizedTransaction}; use ethcore::client::BlockChainClient; -use types::{header::Header, transaction::LocalizedTransaction}; -use jsonrpc_core::Result; -use v1::{ +use crate::v1::{ traits::Debug, types::{Block, BlockTransactions, Bytes, RichBlock, Transaction}, }; +use jsonrpc_core::Result; /// Debug rpc implementation. pub struct DebugClient { diff --git a/crates/rpc/src/v1/impls/eth.rs b/crates/rpc/src/v1/impls/eth.rs index 23c60636e0..7c219cbbcd 100644 --- a/crates/rpc/src/v1/impls/eth.rs +++ b/crates/rpc/src/v1/impls/eth.rs @@ -22,9 +22,18 @@ use std::{ time::{Duration, Instant, SystemTime, UNIX_EPOCH}, }; -use ethereum_types::{Address, BigEndianHash, H160, H256, H64, U256, U64}; +use ethereum_types::{Address, BigEndianHash, H64, H160, H256, U64, U256}; use parking_lot::Mutex; +use crate::{ + miner::external::ExternalMinerService, + types::{ + BlockNumber as EthBlockNumber, encoded, + filter::Filter as EthcoreFilter, + header::Header, + transaction::{LocalizedTransaction, SignedTransaction, TypedTransaction}, + }, +}; use ethash::{self, SeedHashCompute}; use ethcore::{ client::{ @@ -35,31 +44,23 @@ use ethcore::{ snapshot::SnapshotService, }; use hash::keccak; -use miner::external::ExternalMinerService; use sync::SyncProvider; -use types::{ - encoded, - filter::Filter as EthcoreFilter, - header::Header, - transaction::{LocalizedTransaction, SignedTransaction, TypedTransaction}, - BlockNumber as EthBlockNumber, -}; -use jsonrpc_core::{futures::future, BoxFuture, Result}; +use jsonrpc_core::{BoxFuture, Result, futures::future}; -use v1::{ +use crate::v1::{ helpers::{ self, block_import::is_major_importing, deprecated::{self, DeprecationNotice}, - dispatch::{default_gas_price, default_max_priority_fee_per_gas, FullDispatcher}, + dispatch::{FullDispatcher, default_gas_price, default_max_priority_fee_per_gas}, errors, fake_sign, limit_logs, }, traits::Eth, types::{ - block_number_to_id, Block, BlockNumber, BlockTransactions, Bytes, CallRequest, EthAccount, - EthFeeHistory, Filter, Index, Log, Receipt, RichBlock, StorageProof, SyncInfo, SyncStatus, - Transaction, Work, + Block, BlockNumber, BlockTransactions, Bytes, CallRequest, EthAccount, EthFeeHistory, + Filter, Index, Log, Receipt, RichBlock, StorageProof, SyncInfo, SyncStatus, Transaction, + Work, block_number_to_id, }, }; @@ -234,7 +235,9 @@ where ) } None => { - warn!("`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`"); + warn!( + "`Pending` is deprecated and may be removed in future versions. Falling back to `Latest`" + ); client_query(BlockId::Latest) } } @@ -562,7 +565,7 @@ fn check_known(client: &C, number: BlockNumber) -> Result<()> where C: BlockChainClient, { - use types::block_status::BlockStatus; + use crate::types::block_status::BlockStatus; let id = match number { BlockNumber::Pending => return Ok(()), @@ -1365,13 +1368,14 @@ where }; let state = try_bf!(self.client.state_at(id).ok_or_else(errors::state_pruned)); - let header = try_bf!(self - .client - .block_header(id) - .ok_or_else(errors::state_pruned) - .and_then(|h| h - .decode(self.client.engine().params().eip1559_transition) - .map_err(errors::decode))); + let header = try_bf!( + self.client + .block_header(id) + .ok_or_else(errors::state_pruned) + .and_then(|h| h + .decode(self.client.engine().params().eip1559_transition) + .map_err(errors::decode)) + ); (state, header) }; @@ -1408,13 +1412,14 @@ where }; let state = try_bf!(self.client.state_at(id).ok_or_else(errors::state_pruned)); - let header = try_bf!(self - .client - .block_header(id) - .ok_or_else(errors::state_pruned) - .and_then(|h| h - .decode(self.client.engine().params().eip1559_transition) - .map_err(errors::decode))); + let header = try_bf!( + self.client + .block_header(id) + .ok_or_else(errors::state_pruned) + .and_then(|h| h + .decode(self.client.engine().params().eip1559_transition) + .map_err(errors::decode)) + ); (state, header) }; diff --git a/crates/rpc/src/v1/impls/eth_filter.rs b/crates/rpc/src/v1/impls/eth_filter.rs index 417048a406..4f673b587a 100644 --- a/crates/rpc/src/v1/impls/eth_filter.rs +++ b/crates/rpc/src/v1/impls/eth_filter.rs @@ -21,24 +21,24 @@ use std::{ sync::Arc, }; +use crate::types::filter::Filter as EthcoreFilter; use ethcore::{ client::{BlockChainClient, BlockId}, miner::{self, MinerService}, }; use ethereum_types::{H256, U256}; use parking_lot::Mutex; -use types::filter::Filter as EthcoreFilter; -use jsonrpc_core::{ - futures::{future, future::Either, Future}, - BoxFuture, Result, -}; -use v1::{ - helpers::{errors, limit_logs, PollFilter, PollManager, SyncPollFilter}, +use crate::v1::{ + helpers::{PollFilter, PollManager, SyncPollFilter, errors, limit_logs}, impls::eth::pending_logs, traits::EthFilter, types::{BlockNumber, Filter, FilterChanges, Index, Log}, }; +use jsonrpc_core::{ + BoxFuture, Result, + futures::{Future, future, future::Either}, +}; /// Something which provides data that can be filtered over. pub trait Filterable { diff --git a/crates/rpc/src/v1/impls/eth_pubsub.rs b/crates/rpc/src/v1/impls/eth_pubsub.rs index 3e042d9331..6a32a56cfd 100644 --- a/crates/rpc/src/v1/impls/eth_pubsub.rs +++ b/crates/rpc/src/v1/impls/eth_pubsub.rs @@ -22,19 +22,19 @@ use std::{ }; use jsonrpc_core::{ - futures::{self, Future, IntoFuture}, Error, Result, + futures::{self, Future, IntoFuture}, }; use jsonrpc_pubsub::{ - typed::{Sink, Subscriber}, SubscriptionId, + typed::{Sink, Subscriber}, }; -use v1::{ - helpers::{errors, limit_logs, Subscribers}, +use crate::v1::{ + helpers::{Subscribers, errors, limit_logs}, metadata::Metadata, traits::EthPubSub, - types::{pubsub, Header, Log, RichHeader}, + types::{Header, Log, RichHeader, pubsub}, }; use ethcore::client::{ @@ -44,7 +44,7 @@ use ethereum_types::H256; use parity_runtime::Executor; use parking_lot::RwLock; -use types::{encoded, filter::Filter as EthFilter}; +use crate::types::{encoded, filter::Filter as EthFilter}; type Client = Sink; diff --git a/crates/rpc/src/v1/impls/net.rs b/crates/rpc/src/v1/impls/net.rs index 779c47773b..39e5ff3810 100644 --- a/crates/rpc/src/v1/impls/net.rs +++ b/crates/rpc/src/v1/impls/net.rs @@ -15,10 +15,10 @@ // along with OpenEthereum. If not, see . //! Net rpc implementation. +use crate::v1::traits::Net; use jsonrpc_core::Result; use std::sync::Arc; use sync::SyncProvider; -use v1::traits::Net; /// Net rpc implementation. pub struct NetClient { diff --git a/crates/rpc/src/v1/impls/parity.rs b/crates/rpc/src/v1/impls/parity.rs index f03270fef5..5f27ac1f3e 100644 --- a/crates/rpc/src/v1/impls/parity.rs +++ b/crates/rpc/src/v1/impls/parity.rs @@ -17,7 +17,26 @@ //! Parity-specific rpc implementation. use std::{collections::BTreeMap, str::FromStr, sync::Arc}; -use crypto::{publickey::ecies, DEFAULT_MAC}; +use crate::{ + Host, + types::ids::BlockId, + v1::{ + helpers::{ + self, NetworkSettings, + block_import::is_major_importing, + errors, + external_signer::{SignerService, SigningQueue}, + fake_sign, verify_signature, + }, + traits::Parity, + types::{ + BlockNumber, Bytes, CallRequest, ChainStatus, Header, Histogram, + LocalTransactionStatus, Peers, Receipt, RecoveredAccount, RichHeader, RpcSettings, + Transaction, TransactionStats, block_number_to_id, + }, + }, +}; +use crypto::{DEFAULT_MAC, publickey::ecies}; use ethcore::{ client::{BlockChainClient, Call, EngineInfo, StateClient}, miner::{self, MinerService, TransactionFilter}, @@ -25,30 +44,13 @@ use ethcore::{ state::StateInfo, }; use ethcore_logger::RotatingLogger; -use ethereum_types::{Address, H160, H256, H512, H64, U256, U64}; +use ethereum_types::{Address, H64, H160, H256, H512, U64, U256}; use ethkey::Brain; use ethstore::random_phrase; -use jsonrpc_core::{futures::future, BoxFuture, Result}; +use jsonrpc_core::{BoxFuture, Result, futures::future}; use stats::PrometheusMetrics; use sync::{ManageNetwork, SyncProvider}; -use types::ids::BlockId; -use v1::{ - helpers::{ - self, - block_import::is_major_importing, - errors, - external_signer::{SignerService, SigningQueue}, - fake_sign, verify_signature, NetworkSettings, - }, - traits::Parity, - types::{ - block_number_to_id, BlockNumber, Bytes, CallRequest, ChainStatus, Header, Histogram, - LocalTransactionStatus, Peers, Receipt, RecoveredAccount, RichHeader, RpcSettings, - Transaction, TransactionStats, - }, -}; use version::version_data; -use Host; /// Parity implementation. pub struct ParityClient @@ -352,8 +354,8 @@ where Ok(ChainStatus { block_gap: gap }) } - fn node_kind(&self) -> Result<::v1::types::NodeKind> { - use v1::types::{Availability, Capability, NodeKind}; + fn node_kind(&self) -> Result { + use crate::v1::types::{Availability, Capability, NodeKind}; Ok(NodeKind { availability: Availability::Personal, @@ -367,10 +369,11 @@ where let (header, extra) = if number == BlockNumber::Pending { let info = self.client.chain_info(); - let header = try_bf!(self - .miner - .pending_block_header(info.best_block_number) - .ok_or_else(errors::unknown_block)); + let header = try_bf!( + self.miner + .pending_block_header(info.best_block_number) + .ok_or_else(errors::unknown_block) + ); (header.encoded(), None) } else { @@ -382,10 +385,11 @@ where BlockNumber::Pending => unreachable!(), // Already covered }; - let header = try_bf!(self - .client - .block_header(id) - .ok_or_else(errors::unknown_block)); + let header = try_bf!( + self.client + .block_header(id) + .ok_or_else(errors::unknown_block) + ); let info = self.client.block_extra_info(id).expect(EXTRA_INFO_PROOF); (header, Some(info)) @@ -403,10 +407,11 @@ where let id = match number { BlockNumber::Pending => { let info = self.client.chain_info(); - let receipts = try_bf!(self - .miner - .pending_receipts(info.best_block_number) - .ok_or_else(errors::unknown_block)); + let receipts = try_bf!( + self.miner + .pending_receipts(info.best_block_number) + .ok_or_else(errors::unknown_block) + ); return Box::new(future::ok(receipts.into_iter().map(Into::into).collect())); } BlockNumber::Hash { hash, .. } => BlockId::Hash(hash), @@ -414,10 +419,11 @@ where BlockNumber::Earliest => BlockId::Earliest, BlockNumber::Latest => BlockId::Latest, }; - let receipts = try_bf!(self - .client - .localized_block_receipts(id) - .ok_or_else(errors::unknown_block)); + let receipts = try_bf!( + self.client + .localized_block_receipts(id) + .ok_or_else(errors::unknown_block) + ); Box::new(future::ok(receipts.into_iter().map(Into::into).collect())) } diff --git a/crates/rpc/src/v1/impls/parity_accounts.rs b/crates/rpc/src/v1/impls/parity_accounts.rs index 151c3f12a0..e9deefc4e8 100644 --- a/crates/rpc/src/v1/impls/parity_accounts.rs +++ b/crates/rpc/src/v1/impls/parity_accounts.rs @@ -17,19 +17,13 @@ //! Account management (personal) rpc implementation use std::{ collections::{ - btree_map::{BTreeMap, Entry}, HashSet, + btree_map::{BTreeMap, Entry}, }, sync::Arc, }; -use accounts::AccountProvider; -use crypto::publickey::Secret; -use ethereum_types::{Address, H160, H256, H520}; -use ethkey::{Brain, Password}; -use ethstore::KeyFile; -use jsonrpc_core::Result; -use v1::{ +use crate::v1::{ helpers::{ deprecated::{self, DeprecationNotice}, errors, @@ -37,6 +31,12 @@ use v1::{ traits::{ParityAccounts, ParityAccountsInfo}, types::{AccountInfo, Derive, DeriveHash, DeriveHierarchical, ExtAccountInfo}, }; +use accounts::AccountProvider; +use crypto::publickey::Secret; +use ethereum_types::{Address, H160, H256, H520}; +use ethkey::{Brain, Password}; +use ethstore::KeyFile; +use jsonrpc_core::Result; /// Account management (personal) rpc implementation. pub struct ParityAccountsClient { diff --git a/crates/rpc/src/v1/impls/parity_set.rs b/crates/rpc/src/v1/impls/parity_set.rs index 5e2b137ae5..083ef61bd1 100644 --- a/crates/rpc/src/v1/impls/parity_set.rs +++ b/crates/rpc/src/v1/impls/parity_set.rs @@ -27,20 +27,22 @@ use fetch::{self, Fetch}; use hash::keccak_buffer; use sync::ManageNetwork; -use jsonrpc_core::{futures::Future, BoxFuture, Result}; -use v1::{ +use crate::v1::{ helpers::errors, traits::ParitySet, types::{Bytes, Transaction}, }; +use jsonrpc_core::{BoxFuture, Result, futures::Future}; #[cfg(any(test, feature = "accounts"))] pub mod accounts { use super::*; - use accounts::AccountProvider; - use v1::{ - helpers::{deprecated::DeprecationNotice, engine_signer::EngineSigner}, - traits::ParitySetAccounts, + use crate::{ + accounts::AccountProvider, + v1::{ + helpers::{deprecated::DeprecationNotice, engine_signer::EngineSigner}, + traits::ParitySetAccounts, + }, }; /// Parity-specific account-touching RPC interfaces. diff --git a/crates/rpc/src/v1/impls/personal.rs b/crates/rpc/src/v1/impls/personal.rs index 813790333b..970adb9751 100644 --- a/crates/rpc/src/v1/impls/personal.rs +++ b/crates/rpc/src/v1/impls/personal.rs @@ -17,22 +17,17 @@ //! Account management (personal) rpc implementation use std::sync::Arc; +use crate::types::transaction::{PendingTransaction, SignedTransaction}; use accounts::AccountProvider; use bytes::Bytes; -use crypto::publickey::{public_to_address, recover, Signature}; -use eip_712::{hash_structured_data, EIP712}; +use crypto::publickey::{Signature, public_to_address, recover}; +use eip_712::{EIP712, hash_structured_data}; use ethereum_types::{Address, H160, H256, H520, U128}; -use types::transaction::{PendingTransaction, SignedTransaction}; -use jsonrpc_core::{ - futures::{future, Future}, - types::Value, - BoxFuture, Result, -}; -use v1::{ +use crate::v1::{ helpers::{ deprecated::{self, DeprecationNotice}, - dispatch::{self, eth_data_hash, Dispatcher, PostSign, SignWith, WithToken}, + dispatch::{self, Dispatcher, PostSign, SignWith, WithToken, eth_data_hash}, eip191, errors, }, metadata::Metadata, @@ -43,6 +38,11 @@ use v1::{ RichRawTransaction as RpcRichRawTransaction, TransactionRequest, }, }; +use jsonrpc_core::{ + BoxFuture, Result, + futures::{Future, future}, + types::Value, +}; /// Account management (personal) rpc implementation. pub struct PersonalClient { @@ -162,7 +162,7 @@ impl Personal for PersonalClient { return Err(errors::unsupported( "Time-unlocking is not supported when permanent unlock is disabled.", Some("Use personal_sendTransaction instead."), - )) + )); } }; match r { @@ -253,7 +253,7 @@ impl Personal for PersonalClient { let data = match hash_structured_data(typed_data) { Ok(d) => d, - Err(err) => return Box::new(future::err(errors::invalid_call_data(err.kind()))), + Err(err) => return Box::new(future::err(errors::invalid_call_data(err))), }; let dispatcher = self.dispatcher.clone(); let accounts = Arc::new(dispatch::Signer::new(self.accounts.clone())) as _; @@ -342,7 +342,9 @@ impl Personal for PersonalClient { "personal_signAndSendTransaction", Some("use personal_sendTransaction instead."), ); - warn!("Using deprecated personal_signAndSendTransaction, use personal_sendTransaction instead."); + warn!( + "Using deprecated personal_signAndSendTransaction, use personal_sendTransaction instead." + ); self.send_transaction(meta, request, password) } } diff --git a/crates/rpc/src/v1/impls/pubsub.rs b/crates/rpc/src/v1/impls/pubsub.rs index 7d39d1c14d..064bd382e7 100644 --- a/crates/rpc/src/v1/impls/pubsub.rs +++ b/crates/rpc/src/v1/impls/pubsub.rs @@ -20,15 +20,14 @@ use parking_lot::RwLock; use std::{sync::Arc, time::Duration}; use jsonrpc_core::{ - self as core, - futures::{future, Future, Sink, Stream}, - MetaIoHandler, Result, + self as core, MetaIoHandler, Result, + futures::{Future, Sink, Stream, future}, }; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpc_pubsub::{SubscriptionId, typed::Subscriber}; use tokio_timer; +use crate::v1::{helpers::GenericPollManager, metadata::Metadata, traits::PubSub}; use parity_runtime::Executor; -use v1::{helpers::GenericPollManager, metadata::Metadata, traits::PubSub}; /// Parity PubSub implementation. pub struct PubSubClient> { diff --git a/crates/rpc/src/v1/impls/rpc.rs b/crates/rpc/src/v1/impls/rpc.rs index 5218186208..2c2ffc1a42 100644 --- a/crates/rpc/src/v1/impls/rpc.rs +++ b/crates/rpc/src/v1/impls/rpc.rs @@ -15,9 +15,9 @@ // along with OpenEthereum. If not, see . //! RPC generic methods implementation. +use crate::v1::traits::Rpc; use jsonrpc_core::Result; use std::collections::BTreeMap; -use v1::traits::Rpc; /// RPC generic methods implementation. pub struct RpcClient { diff --git a/crates/rpc/src/v1/impls/secretstore.rs b/crates/rpc/src/v1/impls/secretstore.rs index 9c219e875a..fd57acf69d 100644 --- a/crates/rpc/src/v1/impls/secretstore.rs +++ b/crates/rpc/src/v1/impls/secretstore.rs @@ -19,12 +19,10 @@ use std::{collections::BTreeSet, sync::Arc}; use accounts::AccountProvider; -use crypto::{publickey::Secret, DEFAULT_MAC}; +use crypto::{DEFAULT_MAC, publickey::Secret}; use ethereum_types::{H160, H256, H512}; -use ethkey::Password; -use jsonrpc_core::Result; -use v1::{ +use crate::v1::{ helpers::{ errors, secretstore::{ @@ -35,6 +33,8 @@ use v1::{ traits::SecretStore, types::{Bytes, EncryptedDocumentKey}, }; +use ethkey::Password; +use jsonrpc_core::Result; /// Parity implementation. pub struct SecretStoreClient { diff --git a/crates/rpc/src/v1/impls/signer.rs b/crates/rpc/src/v1/impls/signer.rs index 8d785f8abf..b605f0a789 100644 --- a/crates/rpc/src/v1/impls/signer.rs +++ b/crates/rpc/src/v1/impls/signer.rs @@ -18,27 +18,19 @@ use std::sync::Arc; +use crate::types::transaction::{PendingTransaction, SignedTransaction, TypedTransaction}; use crypto::publickey; use ethereum_types::{H520, U256}; use parity_runtime::Executor; use parking_lot::Mutex; -use types::transaction::{PendingTransaction, SignedTransaction, TypedTransaction}; -use jsonrpc_core::{ - futures::{future, future::Either, Future, IntoFuture}, - BoxFuture, Error, Result, -}; -use jsonrpc_pubsub::{ - typed::{Sink, Subscriber}, - SubscriptionId, -}; -use v1::{ +use crate::v1::{ helpers::{ + ConfirmationPayload, FilledTransactionRequest, Subscribers, deprecated::{self, DeprecationNotice}, - dispatch::{self, eth_data_hash, Dispatcher, WithToken}, + dispatch::{self, Dispatcher, WithToken, eth_data_hash}, errors, external_signer::{SignerService, SigningQueue}, - ConfirmationPayload, FilledTransactionRequest, Subscribers, }, metadata::Metadata, traits::Signer, @@ -47,6 +39,14 @@ use v1::{ TransactionModification, }, }; +use jsonrpc_core::{ + BoxFuture, Error, Result, + futures::{Future, IntoFuture, future, future::Either}, +}; +use jsonrpc_pubsub::{ + SubscriptionId, + typed::{Sink, Subscriber}, +}; /// Transactions confirmation (personal) rpc implementation. pub struct SignerClient { diff --git a/crates/rpc/src/v1/impls/signing.rs b/crates/rpc/src/v1/impls/signing.rs index 3bb68910f1..b79bd6e1db 100644 --- a/crates/rpc/src/v1/impls/signing.rs +++ b/crates/rpc/src/v1/impls/signing.rs @@ -23,11 +23,11 @@ use transient_hashmap::TransientHashMap; use ethereum_types::{H160, H256, H520, U256}; use jsonrpc_core::{ - futures::{future, future::Either, Async, Future, Poll}, BoxFuture, Error, Result, + futures::{Async, Future, Poll, future, future::Either}, }; -use v1::{ +use crate::v1::{ helpers::{ deprecated::{self, DeprecationNotice}, dispatch::{self, Dispatcher}, diff --git a/crates/rpc/src/v1/impls/signing_unsafe.rs b/crates/rpc/src/v1/impls/signing_unsafe.rs index d515ef4518..a68704e4cd 100644 --- a/crates/rpc/src/v1/impls/signing_unsafe.rs +++ b/crates/rpc/src/v1/impls/signing_unsafe.rs @@ -18,12 +18,7 @@ use std::sync::Arc; -use ethereum_types::{Address, H160, H256, H520, U256}; -use jsonrpc_core::{ - futures::{future, Future}, - BoxFuture, Result, -}; -use v1::{ +use crate::v1::{ helpers::{ deprecated::{self, DeprecationNotice}, dispatch::{self, Dispatcher}, @@ -37,6 +32,11 @@ use v1::{ RichRawTransaction as RpcRichRawTransaction, TransactionRequest as RpcTransactionRequest, }, }; +use ethereum_types::{Address, H160, H256, H520, U256}; +use jsonrpc_core::{ + BoxFuture, Result, + futures::{Future, future}, +}; /// Implementation of functions that require signing when no trusted signer is used. pub struct SigningUnsafeClient { diff --git a/crates/rpc/src/v1/impls/traces.rs b/crates/rpc/src/v1/impls/traces.rs index c25ef7df08..f0e67cdfea 100644 --- a/crates/rpc/src/v1/impls/traces.rs +++ b/crates/rpc/src/v1/impls/traces.rs @@ -18,22 +18,22 @@ use std::sync::Arc; +use crate::types::transaction::{SignedTransaction, TypedTransaction}; use ethcore::client::{ BlockChainClient, BlockId, Call, CallAnalytics, EngineInfo, StateClient, StateInfo, TraceId, TransactionId, }; use ethereum_types::H256; -use types::transaction::{SignedTransaction, TypedTransaction}; -use jsonrpc_core::Result; -use v1::{ +use crate::v1::{ helpers::{errors, fake_sign}, traits::Traces, types::{ - block_number_to_id, BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, - TraceOptions, TraceResults, TraceResultsWithTransactionHash, + BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, TraceOptions, + TraceResults, TraceResultsWithTransactionHash, block_number_to_id, }, }; +use jsonrpc_core::Result; fn to_call_analytics(flags: TraceOptions) -> CallAnalytics { CallAnalytics { @@ -118,7 +118,7 @@ where return Err(errors::invalid_params( "`BlockNumber::Pending` is not supported", (), - )) + )); } }; @@ -167,7 +167,7 @@ where return Err(errors::invalid_params( "`BlockNumber::Pending` is not supported", (), - )) + )); } }; @@ -211,7 +211,7 @@ where return Err(errors::invalid_params( "`BlockNumber::Pending` is not supported", (), - )) + )); } }; @@ -263,7 +263,7 @@ where return Err(errors::invalid_params( "`BlockNumber::Pending` is not supported", (), - )) + )); } }; diff --git a/crates/rpc/src/v1/impls/web3.rs b/crates/rpc/src/v1/impls/web3.rs index 54d5059bce..eacd1a99e9 100644 --- a/crates/rpc/src/v1/impls/web3.rs +++ b/crates/rpc/src/v1/impls/web3.rs @@ -15,10 +15,10 @@ // along with OpenEthereum. If not, see . //! Web3 rpc implementation. +use crate::v1::{traits::Web3, types::Bytes}; use ethereum_types::H256; use hash::keccak; use jsonrpc_core::Result; -use v1::{traits::Web3, types::Bytes}; use version::version; /// Web3 rpc implementation. diff --git a/crates/rpc/src/v1/informant.rs b/crates/rpc/src/v1/informant.rs index 7e64dc7563..67175080bc 100644 --- a/crates/rpc/src/v1/informant.rs +++ b/crates/rpc/src/v1/informant.rs @@ -24,8 +24,8 @@ use parking_lot::RwLock; use std::{ fmt, sync::{ - atomic::{self, AtomicUsize}, Arc, + atomic::{self, AtomicUsize}, }, time, }; diff --git a/crates/rpc/src/v1/metadata.rs b/crates/rpc/src/v1/metadata.rs index 4e89a6c119..d969a1d393 100644 --- a/crates/rpc/src/v1/metadata.rs +++ b/crates/rpc/src/v1/metadata.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use jsonrpc_core; use jsonrpc_pubsub::{PubSubMetadata, Session}; -use v1::types::Origin; +use crate::v1::types::Origin; /// RPC methods metadata. #[derive(Clone, Default, Debug)] diff --git a/crates/rpc/src/v1/mod.rs b/crates/rpc/src/v1/mod.rs index fe4357c153..b4af8e1159 100644 --- a/crates/rpc/src/v1/mod.rs +++ b/crates/rpc/src/v1/mod.rs @@ -34,6 +34,7 @@ mod helpers; mod impls; #[cfg(test)] mod tests; + mod types; pub mod extractors; @@ -43,7 +44,7 @@ pub mod traits; pub use self::{ extractors::{RpcExtractor, WsDispatcher, WsExtractor, WsStats}, - helpers::{block_import, dispatch, NetworkSettings}, + helpers::{NetworkSettings, block_import, dispatch}, impls::*, metadata::Metadata, traits::{ diff --git a/crates/rpc/src/v1/tests/eth.rs b/crates/rpc/src/v1/tests/eth.rs index 0a5ae4d6cc..5bd05dd7f7 100644 --- a/crates/rpc/src/v1/tests/eth.rs +++ b/crates/rpc/src/v1/tests/eth.rs @@ -17,24 +17,22 @@ //! rpc integration tests. use std::{env, sync::Arc}; +use crate::{io::IoChannel, miner::external::ExternalMiner, types::ids::BlockId}; use accounts::AccountProvider; use ethcore::{ client::{BlockChainClient, ChainInfo, Client, ClientConfig, EvmTestClient, ImportBlock}, + exit::ShutdownManager, miner::Miner, spec::{Genesis, Spec}, test_helpers, - verification::{queue::kind::blocks::Unverified, VerifierType}, + verification::{VerifierType, queue::kind::blocks::Unverified}, }; use ethereum_types::{Address, H256, U256}; use ethjson::{blockchain::BlockChain, spec::ForkSpec}; -use io::IoChannel; -use miner::external::ExternalMiner; use parity_runtime::Runtime; use parking_lot::Mutex; -use types::ids::BlockId; -use jsonrpc_core::IoHandler; -use v1::{ +use crate::v1::{ helpers::{ dispatch::{self, FullDispatcher}, nonce, @@ -44,6 +42,7 @@ use v1::{ tests::helpers::{Config, TestSnapshotService, TestSyncProvider}, traits::{Eth, EthSigning}, }; +use jsonrpc_core::IoHandler; fn account_provider() -> Arc { Arc::new(AccountProvider::transient_provider()) @@ -130,6 +129,7 @@ impl EthTester { test_helpers::new_db(), miner_service.clone(), IoChannel::disconnected(), + Arc::new(ShutdownManager::null()), ) .unwrap(); let sync_provider = sync_provider(); @@ -224,12 +224,14 @@ fn eth_get_proof() { }"#; let res_latest = r#","address":"0xaaaf5374fce5edbc8e2a8697c15331677e6ebaaa","balance":"0x9","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","nonce":"0x0","storageHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","storageProof":[]},"id":1}"#.to_owned(); - assert!(tester - .handler - .handle_request_sync(req_latest) - .unwrap() - .to_string() - .ends_with(res_latest.as_str())); + assert!( + tester + .handler + .handle_request_sync(req_latest) + .unwrap() + .to_string() + .ends_with(res_latest.as_str()) + ); // non-existant account let req_new_acc = r#"{ @@ -240,12 +242,14 @@ fn eth_get_proof() { }"#; let res_new_acc = r#","address":"0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","balance":"0x0","codeHash":"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470","nonce":"0x0","storageHash":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","storageProof":[]},"id":3}"#.to_owned(); - assert!(tester - .handler - .handle_request_sync(req_new_acc) - .unwrap() - .to_string() - .ends_with(res_new_acc.as_str())); + assert!( + tester + .handler + .handle_request_sync(req_new_acc) + .unwrap() + .to_string() + .ends_with(res_new_acc.as_str()) + ); } #[test] diff --git a/crates/rpc/src/v1/tests/helpers/miner_service.rs b/crates/rpc/src/v1/tests/helpers/miner_service.rs index 192e806f04..2d91648664 100644 --- a/crates/rpc/src/v1/tests/helpers/miner_service.rs +++ b/crates/rpc/src/v1/tests/helpers/miner_service.rs @@ -19,34 +19,37 @@ use std::{ collections::{BTreeMap, BTreeSet, HashMap}, sync::Arc, + time::Duration, }; +use crate::{ + miner::pool::{ + QueueStatus, VerifiedTransaction, local_transactions::Status as LocalTransactionStatus, + verifier, + }, + types::{ + BlockNumber, + block::Block, + header::Header, + ids::BlockId, + receipt::RichReceipt, + transaction::{self, PendingTransaction, SignedTransaction, UnverifiedTransaction}, + }, +}; use bytes::Bytes; use call_contract::CallContract; use ethcore::{ block::SealedBlock, client::{ - test_client::TestState, traits::ForceUpdateSealing, BlockChain, EngineInfo, Nonce, - PrepareOpenBlock, StateClient, + BlockChain, EngineInfo, Nonce, PrepareOpenBlock, StateClient, test_client::TestState, + traits::ForceUpdateSealing, }, - engines::{signer::EngineSigner, EthEngine}, + engines::{EthEngine, signer::EngineSigner}, error::Error, miner::{self, AuthoringParams, MinerService, TransactionFilter}, }; use ethereum_types::{Address, H256, U256}; -use miner::pool::{ - local_transactions::Status as LocalTransactionStatus, verifier, QueueStatus, - VerifiedTransaction, -}; use parking_lot::{Mutex, RwLock}; -use types::{ - block::Block, - header::Header, - ids::BlockId, - receipt::RichReceipt, - transaction::{self, PendingTransaction, SignedTransaction, UnverifiedTransaction}, - BlockNumber, -}; /// Test miner service. pub struct TestMinerService { @@ -252,6 +255,18 @@ impl MinerService for TestMinerService { .map(|tx| Arc::new(VerifiedTransaction::from_pending_block_transaction(tx))) } + fn transaction_if_readable( + &self, + hash: &H256, + max_duration: &Duration, + ) -> Option> { + self.pending_transactions + .try_lock_for(*max_duration)? + .get(hash) + .cloned() + .map(|tx| Arc::new(VerifiedTransaction::from_pending_block_transaction(tx))) + } + fn remove_transaction(&self, hash: &H256) -> Option> { self.pending_transactions .lock() @@ -271,6 +286,10 @@ impl MinerService for TestMinerService { .collect() } + fn local_transaction_status(&self, tx_hash: &H256) -> Option { + self.local_transactions.lock().get(tx_hash).cloned() + } + fn ready_transactions_filtered( &self, chain: &C, diff --git a/crates/rpc/src/v1/tests/mocked/debug.rs b/crates/rpc/src/v1/tests/mocked/debug.rs index ea2ef979ee..2f1726e994 100644 --- a/crates/rpc/src/v1/tests/mocked/debug.rs +++ b/crates/rpc/src/v1/tests/mocked/debug.rs @@ -18,8 +18,8 @@ use std::sync::Arc; use ethcore::client::TestBlockChainClient; +use crate::v1::{Debug, DebugClient}; use jsonrpc_core::IoHandler; -use v1::{Debug, DebugClient}; fn io() -> IoHandler { let client = Arc::new(TestBlockChainClient::new()); diff --git a/crates/rpc/src/v1/tests/mocked/eth.rs b/crates/rpc/src/v1/tests/mocked/eth.rs index f8d70097bc..737d73ce66 100644 --- a/crates/rpc/src/v1/tests/mocked/eth.rs +++ b/crates/rpc/src/v1/tests/mocked/eth.rs @@ -21,30 +21,32 @@ use std::{ time::{Duration, Instant, SystemTime, UNIX_EPOCH}, }; +use crate::{ + miner::external::ExternalMiner, + types::{ + ids::{BlockId, TransactionId}, + log_entry::{LocalizedLogEntry, LogEntry}, + receipt::{LocalizedReceipt, RichReceipt, TransactionOutcome}, + transaction::{Action, Transaction, TypedTransaction, TypedTxId}, + }, +}; use accounts::AccountProvider; use ethcore::{ client::{BlockChainClient, EachBlockWith, EvmTestClient, Executed, TestBlockChainClient}, miner::{self, MinerService}, }; use ethereum_types::{Address, Bloom, H160, H256, U256}; -use miner::external::ExternalMiner; use parity_runtime::Runtime; use parking_lot::Mutex; use rustc_hex::{FromHex, ToHex}; use sync::SyncState; -use types::{ - ids::{BlockId, TransactionId}, - log_entry::{LocalizedLogEntry, LogEntry}, - receipt::{LocalizedReceipt, RichReceipt, TransactionOutcome}, - transaction::{Action, Transaction, TypedTransaction, TypedTxId}, -}; -use jsonrpc_core::IoHandler; -use v1::{ +use crate::v1::{ + Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, metadata::Metadata, tests::helpers::{Config, TestMinerService, TestSnapshotService, TestSyncProvider}, - Eth, EthClient, EthClientOptions, EthFilter, EthFilterClient, }; +use jsonrpc_core::IoHandler; fn blockchain_client() -> Arc { let client = TestBlockChainClient::new(); @@ -77,6 +79,7 @@ fn snapshot_service() -> Arc { } struct EthTester { + #[allow(dead_code)] pub runtime: Runtime, pub client: Arc, pub sync: Arc, @@ -750,8 +753,8 @@ fn rpc_eth_transaction_count_by_number_pending() { #[test] fn rpc_eth_pending_transaction_by_hash() { + use crate::types::transaction::SignedTransaction; use ethereum_types::H256; - use types::transaction::SignedTransaction; let tester = EthTester::default(); { diff --git a/crates/rpc/src/v1/tests/mocked/eth_pubsub.rs b/crates/rpc/src/v1/tests/mocked/eth_pubsub.rs index e19768c27d..71585b1d70 100644 --- a/crates/rpc/src/v1/tests/mocked/eth_pubsub.rs +++ b/crates/rpc/src/v1/tests/mocked/eth_pubsub.rs @@ -17,14 +17,14 @@ use std::sync::Arc; use jsonrpc_core::{ - futures::{self, Future, Stream}, MetaIoHandler, + futures::{self, Future, Stream}, }; use jsonrpc_pubsub::Session; use std::time::Duration; -use v1::{EthPubSub, EthPubSubClient, Metadata}; +use crate::v1::{EthPubSub, EthPubSubClient, Metadata}; use ethcore::client::{ ChainNotify, ChainRoute, ChainRouteType, EachBlockWith, NewBlocks, TestBlockChainClient, @@ -115,11 +115,11 @@ fn should_subscribe_to_new_heads() { #[test] fn should_subscribe_to_logs() { - use ethcore::client::BlockInfo; - use types::{ + use crate::types::{ ids::BlockId, log_entry::{LocalizedLogEntry, LogEntry}, }; + use ethcore::client::BlockInfo; // given let el = Runtime::with_thread_count(1); diff --git a/crates/rpc/src/v1/tests/mocked/manage_network.rs b/crates/rpc/src/v1/tests/mocked/manage_network.rs index e032979ead..346d9e8ee3 100644 --- a/crates/rpc/src/v1/tests/mocked/manage_network.rs +++ b/crates/rpc/src/v1/tests/mocked/manage_network.rs @@ -21,7 +21,7 @@ use std::{ }; use sync::ManageNetwork; -extern crate ethcore_network; +use ethcore_network; pub struct TestManageNetwork; diff --git a/crates/rpc/src/v1/tests/mocked/net.rs b/crates/rpc/src/v1/tests/mocked/net.rs index 87450bd83e..e38f0cd2ed 100644 --- a/crates/rpc/src/v1/tests/mocked/net.rs +++ b/crates/rpc/src/v1/tests/mocked/net.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use jsonrpc_core::IoHandler; -use std::sync::Arc; -use v1::{ - tests::helpers::{Config, TestSyncProvider}, +use crate::v1::{ Net, NetClient, + tests::helpers::{Config, TestSyncProvider}, }; +use jsonrpc_core::IoHandler; +use std::sync::Arc; fn sync_provider() -> Arc { Arc::new(TestSyncProvider::new(Config { diff --git a/crates/rpc/src/v1/tests/mocked/parity.rs b/crates/rpc/src/v1/tests/mocked/parity.rs index 861c9828fc..203425ce39 100644 --- a/crates/rpc/src/v1/tests/mocked/parity.rs +++ b/crates/rpc/src/v1/tests/mocked/parity.rs @@ -14,27 +14,31 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + miner::pool::local_transactions::Status as LocalTransactionStatus, + types::{ + receipt::{LocalizedReceipt, TransactionOutcome}, + transaction::TypedTxId, + }, +}; use crypto::publickey::{Generator, Random}; use ethcore::client::{Executed, TestBlockChainClient, TransactionId}; use ethcore_logger::RotatingLogger; use ethereum_types::{Address, BigEndianHash, Bloom, H256, U256}; -use miner::pool::local_transactions::Status as LocalTransactionStatus; use std::{str::FromStr, sync::Arc}; use sync::ManageNetwork; -use types::{ - receipt::{LocalizedReceipt, TransactionOutcome}, - transaction::TypedTxId, -}; use super::manage_network::TestManageNetwork; -use jsonrpc_core::IoHandler; -use v1::{ - helpers::{external_signer::SignerService, NetworkSettings}, - metadata::Metadata, - tests::helpers::{Config, TestMinerService, TestSyncProvider}, - Parity, ParityClient, +use crate::{ + Host, + v1::{ + Parity, ParityClient, + helpers::{NetworkSettings, external_signer::SignerService}, + metadata::Metadata, + tests::helpers::{Config, TestMinerService, TestSyncProvider}, + }, }; -use Host; +use jsonrpc_core::IoHandler; pub type TestParityClient = ParityClient; @@ -307,7 +311,7 @@ fn assert_txs_filtered(io: &IoHandler, filter: &str, expected: Vec #[test] fn rpc_parity_pending_transactions_with_filter() { - use types::transaction::{Action, Transaction, TypedTransaction}; + use crate::types::transaction::{Action, Transaction, TypedTransaction}; let deps = Dependencies::new(); let io = deps.default_client(); @@ -450,7 +454,7 @@ fn rpc_parity_transactions_stats() { #[test] fn rpc_parity_local_transactions() { - use types::transaction::{Transaction, TypedTransaction}; + use crate::types::transaction::{Transaction, TypedTransaction}; let deps = Dependencies::new(); let io = deps.default_client(); let tx = TypedTransaction::Legacy(Transaction { @@ -462,7 +466,7 @@ fn rpc_parity_local_transactions() { nonce: 0.into(), }) .fake_sign(Address::from_low_u64_be(3)); - let tx = Arc::new(::miner::pool::VerifiedTransaction::from_pending_block_transaction(tx)); + let tx = Arc::new(crate::miner::pool::VerifiedTransaction::from_pending_block_transaction(tx)); deps.miner.local_transactions.lock().insert( H256::from_low_u64_be(10), LocalTransactionStatus::Pending(tx.clone()), diff --git a/crates/rpc/src/v1/tests/mocked/parity_accounts.rs b/crates/rpc/src/v1/tests/mocked/parity_accounts.rs index c154a577b1..0cb55aa414 100644 --- a/crates/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/crates/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -18,11 +18,11 @@ use std::{str::FromStr, sync::Arc}; use accounts::{AccountProvider, AccountProviderSettings}; use ethereum_types::Address; -use ethstore::{accounts_dir::RootDiskDirectory, EthStore}; +use ethstore::{EthStore, accounts_dir::RootDiskDirectory}; use tempdir::TempDir; +use crate::v1::{ParityAccounts, ParityAccountsClient, ParityAccountsInfo}; use jsonrpc_core::IoHandler; -use v1::{ParityAccounts, ParityAccountsClient, ParityAccountsInfo}; struct ParityAccountsTester { accounts: Arc, @@ -154,7 +154,10 @@ fn should_be_able_to_get_account_info() { let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; let res = tester.io.handle_request_sync(request); - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); + let response = format!( + "{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", + address, uuid + ); assert_eq!(res, Some(response)); } @@ -188,7 +191,10 @@ fn should_be_able_to_set_name() { let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; let res = tester.io.handle_request_sync(request); - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); + let response = format!( + "{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", + address, uuid + ); assert_eq!(res, Some(response)); } @@ -222,7 +228,10 @@ fn should_be_able_to_set_meta() { let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#; let res = tester.io.handle_request_sync(request); - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid); + let response = format!( + "{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"\",\"uuid\":\"{}\"}}}},\"id\":1}}", + address, uuid + ); assert_eq!(res, Some(response)); } @@ -297,10 +306,12 @@ fn rpc_parity_new_vault() { Some(response.to_owned()) ); assert!(tester.accounts.close_vault("vault1").is_ok()); - assert!(tester - .accounts - .open_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .open_vault("vault1", &"password1".into()) + .is_ok() + ); } #[test] @@ -308,10 +319,12 @@ fn rpc_parity_open_vault() { let tempdir = TempDir::new("").unwrap(); let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); assert!(tester.accounts.close_vault("vault1").is_ok()); let request = r#"{"jsonrpc": "2.0", "method": "parity_openVault", "params":["vault1", "password1"], "id": 1}"#; @@ -328,10 +341,12 @@ fn rpc_parity_close_vault() { let tempdir = TempDir::new("").unwrap(); let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); let request = r#"{"jsonrpc": "2.0", "method": "parity_closeVault", "params":["vault1"], "id": 1}"#; @@ -348,10 +363,12 @@ fn rpc_parity_change_vault_password() { let tempdir = TempDir::new("").unwrap(); let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); let request = r#"{"jsonrpc": "2.0", "method": "parity_changeVaultPassword", "params":["vault1", "password2"], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; @@ -371,10 +388,12 @@ fn rpc_parity_change_vault() { .accounts .new_account_and_public(&"root_password".into()) .unwrap(); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); let request = format!( r#"{{"jsonrpc": "2.0", "method": "parity_changeVault", "params":["0x{:x}", "vault1"], "id": 1}}"#, @@ -403,10 +422,12 @@ fn rpc_parity_vault_adds_vault_field_to_acount_meta() { .unwrap() .uuid .unwrap(); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); assert!(tester.accounts.change_vault(address1, "vault1").is_ok()); let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#; @@ -440,14 +461,18 @@ fn rpc_parity_list_vaults() { let tempdir = TempDir::new("").unwrap(); let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); - assert!(tester - .accounts - .create_vault("vault2", &"password2".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); + assert!( + tester + .accounts + .create_vault("vault2", &"password2".into()) + .is_ok() + ); let request = r#"{"jsonrpc": "2.0", "method": "parity_listVaults", "params":[], "id": 1}"#; let response1 = r#"{"jsonrpc":"2.0","result":["vault1","vault2"],"id":1}"#; @@ -465,18 +490,24 @@ fn rpc_parity_list_opened_vaults() { let tempdir = TempDir::new("").unwrap(); let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); - assert!(tester - .accounts - .create_vault("vault2", &"password2".into()) - .is_ok()); - assert!(tester - .accounts - .create_vault("vault3", &"password3".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); + assert!( + tester + .accounts + .create_vault("vault2", &"password2".into()) + .is_ok() + ); + assert!( + tester + .accounts + .create_vault("vault3", &"password3".into()) + .is_ok() + ); assert!(tester.accounts.close_vault("vault2").is_ok()); let request = @@ -496,10 +527,12 @@ fn rpc_parity_get_set_vault_meta() { let tempdir = TempDir::new("").unwrap(); let tester = setup_with_vaults_support(tempdir.path().to_str().unwrap()); - assert!(tester - .accounts - .create_vault("vault1", &"password1".into()) - .is_ok()); + assert!( + tester + .accounts + .create_vault("vault1", &"password1".into()) + .is_ok() + ); // when no meta set let request = @@ -512,10 +545,12 @@ fn rpc_parity_get_set_vault_meta() { ); // when meta set - assert!(tester - .accounts - .set_vault_meta("vault1", "vault1_meta") - .is_ok()); + assert!( + tester + .accounts + .set_vault_meta("vault1", "vault1_meta") + .is_ok() + ); let request = r#"{"jsonrpc": "2.0", "method": "parity_getVaultMeta", "params":["vault1"], "id": 1}"#; diff --git a/crates/rpc/src/v1/tests/mocked/parity_set.rs b/crates/rpc/src/v1/tests/mocked/parity_set.rs index d382644ac7..a320ed334e 100644 --- a/crates/rpc/src/v1/tests/mocked/parity_set.rs +++ b/crates/rpc/src/v1/tests/mocked/parity_set.rs @@ -22,8 +22,8 @@ use ethcore::{client::TestBlockChainClient, miner::MinerService}; use sync::ManageNetwork; use super::manage_network::TestManageNetwork; +use crate::v1::{ParitySet, ParitySetClient, tests::helpers::TestMinerService}; use jsonrpc_core::IoHandler; -use v1::{tests::helpers::TestMinerService, ParitySet, ParitySetClient}; use fake_fetch::FakeFetch; @@ -178,7 +178,7 @@ fn rpc_parity_set_hash_content() { #[test] fn rpc_parity_remove_transaction() { - use types::transaction::{Action, Transaction, TypedTransaction}; + use crate::types::transaction::{Action, Transaction, TypedTransaction}; let miner = miner_service(); let client = client_service(); @@ -210,9 +210,9 @@ fn rpc_parity_remove_transaction() { #[test] fn rpc_parity_set_engine_signer() { + use crate::v1::{impls::ParitySetAccountsClient, traits::ParitySetAccounts}; use accounts::AccountProvider; use bytes::ToPretty; - use v1::{impls::ParitySetAccountsClient, traits::ParitySetAccounts}; let account_provider = Arc::new(AccountProvider::transient_provider()); account_provider @@ -239,5 +239,8 @@ fn rpc_parity_set_engine_signer() { .sign(::hash::keccak("x")) .unwrap() .to_vec(); - assert_eq!(&format!("{}", signature.pretty()), "6f46069ded2154af6e806706e4f7f6fd310ac45f3c6dccb85f11c0059ee20a09245df0a0008bb84a10882b1298284bc93058e7bc5938ea728e77620061687a6401"); + assert_eq!( + &format!("{}", signature.pretty()), + "6f46069ded2154af6e806706e4f7f6fd310ac45f3c6dccb85f11c0059ee20a09245df0a0008bb84a10882b1298284bc93058e7bc5938ea728e77620061687a6401" + ); } diff --git a/crates/rpc/src/v1/tests/mocked/personal.rs b/crates/rpc/src/v1/tests/mocked/personal.rs index eeaefed8f3..ec91f46092 100644 --- a/crates/rpc/src/v1/tests/mocked/personal.rs +++ b/crates/rpc/src/v1/tests/mocked/personal.rs @@ -16,6 +16,7 @@ use std::{str::FromStr, sync::Arc}; +use crate::types::transaction::{Action, Transaction, TypedTransaction}; use accounts::AccountProvider; use bytes::ToPretty; use crypto::publickey::Secret; @@ -25,18 +26,17 @@ use hash::keccak; use jsonrpc_core::IoHandler; use parity_runtime::Runtime; use parking_lot::Mutex; -use types::transaction::{Action, Transaction, TypedTransaction}; -use serde_json::to_value; -use v1::{ +use crate::v1::{ + Metadata, Personal, PersonalClient, helpers::{ - dispatch::{eth_data_hash, FullDispatcher}, + dispatch::{FullDispatcher, eth_data_hash}, eip191, nonce, }, tests::helpers::TestMinerService, types::{EIP191Version, PresignedTransaction}, - Metadata, Personal, PersonalClient, }; +use serde_json::to_value; struct PersonalTester { _runtime: Runtime, diff --git a/crates/rpc/src/v1/tests/mocked/pubsub.rs b/crates/rpc/src/v1/tests/mocked/pubsub.rs index 5e05c254f3..bde1b13b4e 100644 --- a/crates/rpc/src/v1/tests/mocked/pubsub.rs +++ b/crates/rpc/src/v1/tests/mocked/pubsub.rs @@ -14,17 +14,16 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use std::sync::{atomic, Arc}; +use std::sync::{Arc, atomic}; use jsonrpc_core::{ - self as core, + self as core, MetaIoHandler, futures::{self, Future, Stream}, - MetaIoHandler, }; use jsonrpc_pubsub::Session; +use crate::v1::{Metadata, PubSub, PubSubClient}; use parity_runtime::Runtime; -use v1::{Metadata, PubSub, PubSubClient}; fn rpc() -> MetaIoHandler { let mut io = MetaIoHandler::default(); diff --git a/crates/rpc/src/v1/tests/mocked/rpc.rs b/crates/rpc/src/v1/tests/mocked/rpc.rs index 6dc677c619..c609a2bf5c 100644 --- a/crates/rpc/src/v1/tests/mocked/rpc.rs +++ b/crates/rpc/src/v1/tests/mocked/rpc.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::v1::{Rpc, RpcClient}; use jsonrpc_core::IoHandler; use std::collections::BTreeMap; -use v1::{Rpc, RpcClient}; fn rpc_client() -> RpcClient { let mut modules = BTreeMap::new(); diff --git a/crates/rpc/src/v1/tests/mocked/secretstore.rs b/crates/rpc/src/v1/tests/mocked/secretstore.rs index 76dbbce500..f92b9e2f54 100644 --- a/crates/rpc/src/v1/tests/mocked/secretstore.rs +++ b/crates/rpc/src/v1/tests/mocked/secretstore.rs @@ -18,17 +18,17 @@ use std::sync::Arc; use accounts::AccountProvider; use crypto::{ - publickey::{verify_public, KeyPair, Signature}, DEFAULT_MAC, + publickey::{KeyPair, Signature, verify_public}, }; use ethereum_types::H256; +use crate::v1::{ + SecretStoreClient, helpers::secretstore::ordered_servers_keccak, metadata::Metadata, + traits::secretstore::SecretStore, types::EncryptedDocumentKey, +}; use jsonrpc_core::{IoHandler, Success}; use serde_json; -use v1::{ - helpers::secretstore::ordered_servers_keccak, metadata::Metadata, - traits::secretstore::SecretStore, types::EncryptedDocumentKey, SecretStoreClient, -}; struct Dependencies { pub accounts: Arc, @@ -195,13 +195,14 @@ fn rpc_secretstore_generate_document_key() { serde_json::from_str(&generation_response).unwrap(); // the only thing we can check is that 'encrypted_key' can be decrypted by passed account - assert!(deps - .accounts - .decrypt( - "00dfE63B22312ab4329aD0d28CaD8Af987A01932".parse().unwrap(), - Some("password".into()), - &DEFAULT_MAC, - &generation_response.encrypted_key.0 - ) - .is_ok()); + assert!( + deps.accounts + .decrypt( + "00dfE63B22312ab4329aD0d28CaD8Af987A01932".parse().unwrap(), + Some("password".into()), + &DEFAULT_MAC, + &generation_response.encrypted_key.0 + ) + .is_ok() + ); } diff --git a/crates/rpc/src/v1/tests/mocked/signer.rs b/crates/rpc/src/v1/tests/mocked/signer.rs index d35fdcde72..835d1c041b 100644 --- a/crates/rpc/src/v1/tests/mocked/signer.rs +++ b/crates/rpc/src/v1/tests/mocked/signer.rs @@ -18,25 +18,26 @@ use bytes::ToPretty; use ethereum_types::{Address, H520, U256}; use std::{str::FromStr, sync::Arc}; +use crate::types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}; use accounts::AccountProvider; use ethcore::client::TestBlockChainClient; use parity_runtime::Runtime; use parking_lot::Mutex; -use types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}; -use jsonrpc_core::IoHandler; -use serde_json; -use v1::{ +use crate::v1::{ + Origin, Signer, SignerClient, helpers::{ - dispatch::{self, eth_data_hash, FullDispatcher}, + ConfirmationPayload, FilledTransactionRequest, + dispatch::{self, FullDispatcher, eth_data_hash}, external_signer::{SignerService, SigningQueue}, - nonce, ConfirmationPayload, FilledTransactionRequest, + nonce, }, metadata::Metadata, tests::helpers::TestMinerService, types::Bytes as RpcBytes, - Origin, Signer, SignerClient, }; +use jsonrpc_core::IoHandler; +use serde_json; struct SignerTester { _runtime: Runtime, diff --git a/crates/rpc/src/v1/tests/mocked/signing.rs b/crates/rpc/src/v1/tests/mocked/signing.rs index a1a234605c..2ca444abc3 100644 --- a/crates/rpc/src/v1/tests/mocked/signing.rs +++ b/crates/rpc/src/v1/tests/mocked/signing.rs @@ -16,12 +16,11 @@ use std::{str::FromStr, sync::Arc, thread, time::Duration}; -use jsonrpc_core::{futures::Future, IoHandler, Success}; -use v1::{ +use crate::v1::{ helpers::{ dispatch, external_signer::{SignerService, SigningQueue}, - nonce, FullDispatcher, + nonce, }, impls::SigningQueueClient, metadata::Metadata, @@ -29,7 +28,12 @@ use v1::{ traits::{EthSigning, Parity, ParitySigning}, types::{ConfirmationResponse, RichRawTransaction}, }; +use jsonrpc_core::{IoHandler, Success, futures::Future}; +use crate::{ + dispatch::FullDispatcher, + types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}, +}; use accounts::AccountProvider; use bytes::ToPretty; use crypto::publickey::{Generator, Random, Secret}; @@ -38,11 +42,12 @@ use ethereum_types::{Address, H256, H520, U256}; use parity_runtime::{Executor, Runtime}; use parking_lot::Mutex; use serde_json; -use types::transaction::{Action, SignedTransaction, Transaction, TypedTransaction}; struct SigningTester { + #[allow(dead_code)] pub runtime: Runtime, pub signer: Arc, + #[allow(dead_code)] pub client: Arc, pub miner: Arc, pub accounts: Arc, @@ -151,17 +156,19 @@ fn should_add_sign_to_queue() { // the future must be polled at least once before request is queued. let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed( - sender, - Ok(ConfirmationResponse::Signature(H520::from_low_u64_be(0))), - ); - break; + ::std::thread::spawn(move || { + loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed( + sender, + Ok(ConfirmationResponse::Signature(H520::from_low_u64_be(0))), + ); + break; + } + ::std::thread::sleep(Duration::from_millis(100)) } - ::std::thread::sleep(Duration::from_millis(100)) }); let res = promise.wait().unwrap(); @@ -342,19 +349,21 @@ fn should_add_transaction_to_queue() { // the future must be polled at least once before request is queued. let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed( - sender, - Ok(ConfirmationResponse::SendTransaction( - H256::from_low_u64_be(0), - )), - ); - break; + ::std::thread::spawn(move || { + loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed( + sender, + Ok(ConfirmationResponse::SendTransaction( + H256::from_low_u64_be(0), + )), + ); + break; + } + ::std::thread::sleep(Duration::from_millis(100)) } - ::std::thread::sleep(Duration::from_millis(100)) }); let res = promise.wait().unwrap(); @@ -438,19 +447,21 @@ fn should_add_sign_transaction_to_the_queue() { // the future must be polled at least once before request is queued. let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed( - sender, - Ok(ConfirmationResponse::SignTransaction( - RichRawTransaction::from_signed(t.into()), - )), - ); - break; + ::std::thread::spawn(move || { + loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed( + sender, + Ok(ConfirmationResponse::SignTransaction( + RichRawTransaction::from_signed(t.into()), + )), + ); + break; + } + ::std::thread::sleep(Duration::from_millis(100)) } - ::std::thread::sleep(Duration::from_millis(100)) }); let res = promise.wait().unwrap(); @@ -579,17 +590,19 @@ fn should_add_decryption_to_the_queue() { // the future must be polled at least once before request is queued. let signer = tester.signer.clone(); - ::std::thread::spawn(move || loop { - if signer.requests().len() == 1 { - // respond - let sender = signer.take(&1.into()).unwrap(); - signer.request_confirmed( - sender, - Ok(ConfirmationResponse::Decrypt(vec![0x1, 0x2].into())), - ); - break; + ::std::thread::spawn(move || { + loop { + if signer.requests().len() == 1 { + // respond + let sender = signer.take(&1.into()).unwrap(); + signer.request_confirmed( + sender, + Ok(ConfirmationResponse::Decrypt(vec![0x1, 0x2].into())), + ); + break; + } + ::std::thread::sleep(Duration::from_millis(10)) } - ::std::thread::sleep(Duration::from_millis(10)) }); // check response: will deadlock if unsuccessful. diff --git a/crates/rpc/src/v1/tests/mocked/signing_unsafe.rs b/crates/rpc/src/v1/tests/mocked/signing_unsafe.rs index b8ddbc603a..02f9e24889 100644 --- a/crates/rpc/src/v1/tests/mocked/signing_unsafe.rs +++ b/crates/rpc/src/v1/tests/mocked/signing_unsafe.rs @@ -16,23 +16,23 @@ use std::{str::FromStr, sync::Arc}; +use crate::types::transaction::{Action, Transaction, TypedTransaction}; use accounts::AccountProvider; use ethcore::client::TestBlockChainClient; use ethereum_types::{Address, U256}; use parity_runtime::Runtime; use parking_lot::Mutex; -use types::transaction::{Action, Transaction, TypedTransaction}; -use jsonrpc_core::IoHandler; -use v1::{ +use crate::v1::{ + EthClientOptions, EthSigning, SigningUnsafeClient, helpers::{ dispatch::{self, FullDispatcher}, nonce, }, metadata::Metadata, tests::helpers::TestMinerService, - EthClientOptions, EthSigning, SigningUnsafeClient, }; +use jsonrpc_core::IoHandler; fn blockchain_client() -> Arc { let client = TestBlockChainClient::new(); @@ -48,7 +48,9 @@ fn miner_service() -> Arc { } struct EthTester { + #[allow(dead_code)] pub runtime: Runtime, + #[allow(dead_code)] pub client: Arc, pub accounts_provider: Arc, pub miner: Arc, diff --git a/crates/rpc/src/v1/tests/mocked/traces.rs b/crates/rpc/src/v1/tests/mocked/traces.rs index f9363da5be..e84628a90b 100644 --- a/crates/rpc/src/v1/tests/mocked/traces.rs +++ b/crates/rpc/src/v1/tests/mocked/traces.rs @@ -20,16 +20,16 @@ use ethcore::{ client::TestBlockChainClient, executed::{CallError, Executed}, trace::{ - trace::{Action, Call, Res}, LocalizedTrace, + trace::{Action, Call, Res}, }, }; use ethereum_types::{Address, H256}; use vm::CallType; +use crate::v1::{Metadata, Traces, TracesClient, tests::helpers::TestMinerService}; use jsonrpc_core::IoHandler; -use v1::{tests::helpers::TestMinerService, Metadata, Traces, TracesClient}; struct Tester { client: Arc, diff --git a/crates/rpc/src/v1/tests/mocked/web3.rs b/crates/rpc/src/v1/tests/mocked/web3.rs index 61158db3e4..0c448b5725 100644 --- a/crates/rpc/src/v1/tests/mocked/web3.rs +++ b/crates/rpc/src/v1/tests/mocked/web3.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::v1::{Web3, Web3Client}; use jsonrpc_core::IoHandler; -use v1::{Web3, Web3Client}; use version::version; #[test] diff --git a/crates/rpc/src/v1/traits/debug.rs b/crates/rpc/src/v1/traits/debug.rs index 98687c3d29..bfc7668270 100644 --- a/crates/rpc/src/v1/traits/debug.rs +++ b/crates/rpc/src/v1/traits/debug.rs @@ -19,7 +19,7 @@ use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use v1::types::RichBlock; +use crate::v1::types::RichBlock; /// Debug RPC interface. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/eth.rs b/crates/rpc/src/v1/traits/eth.rs index 8c699d6a2a..bf5515330c 100644 --- a/crates/rpc/src/v1/traits/eth.rs +++ b/crates/rpc/src/v1/traits/eth.rs @@ -15,11 +15,11 @@ // along with OpenEthereum. If not, see . //! Eth rpc interface. -use ethereum_types::{H160, H256, H64, U256, U64}; +use ethereum_types::{H64, H160, H256, U64, U256}; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use v1::types::{ +use crate::v1::types::{ BlockNumber, Bytes, CallRequest, EthAccount, EthFeeHistory, Filter, FilterChanges, Index, Log, Receipt, RichBlock, SyncStatus, Transaction, Work, }; @@ -64,7 +64,7 @@ pub trait Eth { /// Returns transaction fee history. #[rpc(name = "eth_feeHistory")] fn fee_history(&self, _: U256, _: BlockNumber, _: Option>) - -> BoxFuture; + -> BoxFuture; /// Returns accounts list. #[rpc(name = "eth_accounts")] diff --git a/crates/rpc/src/v1/traits/eth_pubsub.rs b/crates/rpc/src/v1/traits/eth_pubsub.rs index 06b9fa2794..bbb65ec722 100644 --- a/crates/rpc/src/v1/traits/eth_pubsub.rs +++ b/crates/rpc/src/v1/traits/eth_pubsub.rs @@ -18,9 +18,9 @@ use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed, SubscriptionId}; +use jsonrpc_pubsub::{SubscriptionId, typed}; -use v1::types::pubsub; +use crate::v1::types::pubsub; /// Eth PUB-SUB rpc interface. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/eth_signing.rs b/crates/rpc/src/v1/traits/eth_signing.rs index 1622fcb956..4c1a077f69 100644 --- a/crates/rpc/src/v1/traits/eth_signing.rs +++ b/crates/rpc/src/v1/traits/eth_signing.rs @@ -19,8 +19,8 @@ use jsonrpc_core::BoxFuture; use jsonrpc_derive::rpc; +use crate::v1::types::{Bytes, RichRawTransaction, TransactionRequest}; use ethereum_types::{H160, H256, H520}; -use v1::types::{Bytes, RichRawTransaction, TransactionRequest}; /// Signing methods implementation relying on unlocked accounts. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/parity.rs b/crates/rpc/src/v1/traits/parity.rs index 7199fc7a7d..3d895599d4 100644 --- a/crates/rpc/src/v1/traits/parity.rs +++ b/crates/rpc/src/v1/traits/parity.rs @@ -18,15 +18,15 @@ use std::collections::BTreeMap; -use ethereum_types::{H160, H256, H512, H64, U256, U64}; +use ethereum_types::{H64, H160, H256, H512, U64, U256}; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use ethcore::miner::TransactionFilter; -use v1::types::{ +use crate::v1::types::{ BlockNumber, Bytes, CallRequest, ChainStatus, Histogram, LocalTransactionStatus, Peers, Receipt, RecoveredAccount, RichHeader, RpcSettings, Transaction, TransactionStats, }; +use ethcore::miner::TransactionFilter; /// Parity-specific rpc interface. #[rpc(server)] @@ -137,6 +137,16 @@ pub trait Parity { _: Option, ) -> Result>; + /// Returns all pending transactions from transaction queue. + #[rpc(name = "eth_pendingTransactions")] + fn pending_transactions_eth( + &self, + size: Option, + filter: Option, + ) -> Result> { + self.pending_transactions(size, filter) + } + /// Returns all transactions from transaction queue. /// /// Some of them might not be ready to be included in a block yet. @@ -189,7 +199,7 @@ pub trait Parity { /// Get node kind info. #[rpc(name = "parity_nodeKind")] - fn node_kind(&self) -> Result<::v1::types::NodeKind>; + fn node_kind(&self) -> Result; /// Get block header. /// Same as `eth_getBlockByNumber` but without uncles and transactions. diff --git a/crates/rpc/src/v1/traits/parity_accounts.rs b/crates/rpc/src/v1/traits/parity_accounts.rs index d856f53e1c..5f4ebf3746 100644 --- a/crates/rpc/src/v1/traits/parity_accounts.rs +++ b/crates/rpc/src/v1/traits/parity_accounts.rs @@ -17,12 +17,12 @@ //! Parity Accounts-related rpc interface. use std::collections::BTreeMap; +use crate::v1::types::{AccountInfo, DeriveHash, DeriveHierarchical, ExtAccountInfo}; use ethereum_types::{H160, H256, H520}; use ethkey::Password; use ethstore::KeyFile; use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use v1::types::{AccountInfo, DeriveHash, DeriveHierarchical, ExtAccountInfo}; /// Parity-specific read-only accounts rpc interface. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/parity_set.rs b/crates/rpc/src/v1/traits/parity_set.rs index fbaacb0ca5..3f905b9f83 100644 --- a/crates/rpc/src/v1/traits/parity_set.rs +++ b/crates/rpc/src/v1/traits/parity_set.rs @@ -20,7 +20,7 @@ use ethereum_types::{H160, H256, U256}; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use v1::types::{Bytes, Transaction}; +use crate::v1::types::{Bytes, Transaction}; /// Parity-specific rpc interface for operations altering the account-related settings. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/parity_signing.rs b/crates/rpc/src/v1/traits/parity_signing.rs index 058c1536d9..7838bc8a85 100644 --- a/crates/rpc/src/v1/traits/parity_signing.rs +++ b/crates/rpc/src/v1/traits/parity_signing.rs @@ -18,8 +18,8 @@ use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; +use crate::v1::types::{Bytes, ConfirmationResponse, Either, TransactionRequest}; use ethereum_types::{H160, U256}; -use v1::types::{Bytes, ConfirmationResponse, Either, TransactionRequest}; /// Signing methods implementation. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/personal.rs b/crates/rpc/src/v1/traits/personal.rs index b3b61ec557..91795f8822 100644 --- a/crates/rpc/src/v1/traits/personal.rs +++ b/crates/rpc/src/v1/traits/personal.rs @@ -15,13 +15,13 @@ // along with OpenEthereum. If not, see . //! Personal rpc interface. +use crate::v1::types::{ + Bytes, EIP191Version, RichRawTransaction as RpcRichRawTransaction, TransactionRequest, +}; use eip_712::EIP712; use ethereum_types::{H160, H256, H520, U128}; -use jsonrpc_core::{types::Value, BoxFuture, Result}; +use jsonrpc_core::{BoxFuture, Result, types::Value}; use jsonrpc_derive::rpc; -use v1::types::{ - Bytes, EIP191Version, RichRawTransaction as RpcRichRawTransaction, TransactionRequest, -}; /// Personal rpc interface. Safe (read-only) functions. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/pubsub.rs b/crates/rpc/src/v1/traits/pubsub.rs index 429b11565d..8d0356ab1f 100644 --- a/crates/rpc/src/v1/traits/pubsub.rs +++ b/crates/rpc/src/v1/traits/pubsub.rs @@ -18,7 +18,7 @@ use jsonrpc_core::{Params, Result, Value}; use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpc_pubsub::{SubscriptionId, typed::Subscriber}; /// Parity-specific PUB-SUB rpc interface. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/secretstore.rs b/crates/rpc/src/v1/traits/secretstore.rs index b65efa0a1a..f6e0401fe7 100644 --- a/crates/rpc/src/v1/traits/secretstore.rs +++ b/crates/rpc/src/v1/traits/secretstore.rs @@ -18,11 +18,11 @@ use std::collections::BTreeSet; +use crate::v1::types::{Bytes, EncryptedDocumentKey}; use ethereum_types::{H160, H256, H512}; use ethkey::Password; use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use v1::types::{Bytes, EncryptedDocumentKey}; /// Parity-specific rpc interface. #[rpc(server)] diff --git a/crates/rpc/src/v1/traits/signer.rs b/crates/rpc/src/v1/traits/signer.rs index d679c34130..b3dbdf99cb 100644 --- a/crates/rpc/src/v1/traits/signer.rs +++ b/crates/rpc/src/v1/traits/signer.rs @@ -19,9 +19,9 @@ use ethereum_types::U256; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpc_pubsub::{SubscriptionId, typed::Subscriber}; -use v1::types::{ +use crate::v1::types::{ Bytes, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, TransactionModification, }; diff --git a/crates/rpc/src/v1/traits/traces.rs b/crates/rpc/src/v1/traits/traces.rs index db8dfa1987..67b8053c43 100644 --- a/crates/rpc/src/v1/traits/traces.rs +++ b/crates/rpc/src/v1/traits/traces.rs @@ -16,13 +16,13 @@ //! Traces specific rpc interface. -use ethereum_types::H256; -use jsonrpc_core::Result; -use jsonrpc_derive::rpc; -use v1::types::{ +use crate::v1::types::{ BlockNumber, Bytes, CallRequest, Index, LocalizedTrace, TraceFilter, TraceOptions, TraceResults, TraceResultsWithTransactionHash, }; +use ethereum_types::H256; +use jsonrpc_core::Result; +use jsonrpc_derive::rpc; /// Traces specific rpc interface. #[rpc(server)] @@ -46,7 +46,7 @@ pub trait Traces { /// Executes the given call and returns a number of possible traces for it. #[rpc(name = "trace_call")] fn call(&self, _: CallRequest, _: TraceOptions, _: Option) - -> Result; + -> Result; /// Executes all given calls and returns a number of possible traces for each of it. #[rpc(name = "trace_callMany")] diff --git a/crates/rpc/src/v1/traits/web3.rs b/crates/rpc/src/v1/traits/web3.rs index a2323a26b4..01d8329b4b 100644 --- a/crates/rpc/src/v1/traits/web3.rs +++ b/crates/rpc/src/v1/traits/web3.rs @@ -19,7 +19,7 @@ use ethereum_types::H256; use jsonrpc_core::Result; use jsonrpc_derive::rpc; -use v1::types::Bytes; +use crate::v1::types::Bytes; /// Web3 rpc interface. #[rpc(server)] diff --git a/crates/rpc/src/v1/types/account_info.rs b/crates/rpc/src/v1/types/account_info.rs index dd1eb612c6..c8a8d6cb5b 100644 --- a/crates/rpc/src/v1/types/account_info.rs +++ b/crates/rpc/src/v1/types/account_info.rs @@ -16,8 +16,8 @@ //! Return types for RPC calls -use ethereum_types::{Address, Public, H160, H256, U256}; -use v1::types::Bytes; +use crate::v1::types::Bytes; +use ethereum_types::{Address, H160, H256, Public, U256}; /// Account information. #[derive(Debug, Default, Clone, PartialEq, Serialize)] diff --git a/crates/rpc/src/v1/types/block.rs b/crates/rpc/src/v1/types/block.rs index 0286e5272e..a84b2e8eed 100644 --- a/crates/rpc/src/v1/types/block.rs +++ b/crates/rpc/src/v1/types/block.rs @@ -16,10 +16,12 @@ use std::{collections::BTreeMap, ops::Deref}; +use crate::{ + types::{BlockNumber, encoded::Header as EthHeader}, + v1::types::{Bytes, Transaction}, +}; use ethereum_types::{Bloom as H2048, H160, H256, U256}; -use serde::{ser::Error, Serialize, Serializer}; -use types::{encoded::Header as EthHeader, BlockNumber}; -use v1::types::{Bytes, Transaction}; +use serde::{Serialize, Serializer, ser::Error}; /// Block Transactions #[derive(Debug)] @@ -199,7 +201,7 @@ impl Serialize for Rich { where S: Serializer, { - use serde_json::{to_value, Value}; + use serde_json::{Value, to_value}; let serialized = (to_value(&self.inner), to_value(&self.extra_info)); if let (Ok(Value::Object(mut value)), Ok(Value::Object(extras))) = serialized { @@ -218,10 +220,10 @@ impl Serialize for Rich { #[cfg(test)] mod tests { use super::{Block, BlockTransactions, Header, RichBlock, RichHeader}; - use ethereum_types::{Bloom as H2048, H160, H256, H64, U256}; + use crate::v1::types::{Bytes, Transaction}; + use ethereum_types::{Bloom as H2048, H64, H160, H256, U256}; use serde_json; use std::collections::BTreeMap; - use v1::types::{Bytes, Transaction}; #[test] fn test_serialize_block_transactions() { diff --git a/crates/rpc/src/v1/types/block_number.rs b/crates/rpc/src/v1/types/block_number.rs index 6ac3afb576..a18445935e 100644 --- a/crates/rpc/src/v1/types/block_number.rs +++ b/crates/rpc/src/v1/types/block_number.rs @@ -17,8 +17,8 @@ use ethcore::client::BlockId; use hash::H256; use serde::{ - de::{Error, MapAccess, Visitor}, Deserialize, Deserializer, Serialize, Serializer, + de::{Error, MapAccess, Visitor}, }; use std::fmt; diff --git a/crates/rpc/src/v1/types/call_request.rs b/crates/rpc/src/v1/types/call_request.rs index 296210642d..498c9b6b12 100644 --- a/crates/rpc/src/v1/types/call_request.rs +++ b/crates/rpc/src/v1/types/call_request.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use ethereum_types::{H160, U256, U64}; -use v1::{ +use crate::v1::{ helpers::CallRequest as Request, types::{AccessList, Bytes}, }; +use ethereum_types::{H160, U64, U256}; /// Call request #[derive(Debug, Default, PartialEq, Deserialize)] diff --git a/crates/rpc/src/v1/types/confirmations.rs b/crates/rpc/src/v1/types/confirmations.rs index 5471d8cc50..96904763e6 100644 --- a/crates/rpc/src/v1/types/confirmations.rs +++ b/crates/rpc/src/v1/types/confirmations.rs @@ -21,12 +21,12 @@ use bytes::ToPretty; use serde::{Serialize, Serializer}; use std::fmt; -use ethereum_types::{H160, H256, H520, U256}; -use ethkey::Password; -use v1::{ +use crate::v1::{ helpers, types::{Bytes, Origin, RichRawTransaction, TransactionCondition, TransactionRequest}, }; +use ethereum_types::{H160, H256, H520, U256}; +use ethkey::Password; /// Confirmation waiting in a queue #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] @@ -302,10 +302,10 @@ where #[cfg(test)] mod tests { use super::*; + use crate::v1::{helpers, types::TransactionCondition}; use ethereum_types::{Address, H256, U256}; use serde_json; use std::str::FromStr; - use v1::{helpers, types::TransactionCondition}; #[test] fn should_serialize_sign_confirmation() { diff --git a/crates/rpc/src/v1/types/derivation.rs b/crates/rpc/src/v1/types/derivation.rs index c1f6340402..c89f3ab46f 100644 --- a/crates/rpc/src/v1/types/derivation.rs +++ b/crates/rpc/src/v1/types/derivation.rs @@ -15,8 +15,8 @@ // along with OpenEthereum. If not, see . use serde::{ - de::{Error, Visitor}, Deserialize, Deserializer, + de::{Error, Visitor}, }; use std::fmt; @@ -75,6 +75,7 @@ impl From for Derive { #[cfg(any(test, feature = "accounts"))] #[derive(Debug)] pub enum ConvertError { + #[allow(dead_code)] IndexOverlfow(u64), } diff --git a/crates/rpc/src/v1/types/eip191.rs b/crates/rpc/src/v1/types/eip191.rs index 6733cc778d..cfa9b60f98 100644 --- a/crates/rpc/src/v1/types/eip191.rs +++ b/crates/rpc/src/v1/types/eip191.rs @@ -16,9 +16,9 @@ //! EIP-191 specific types +use crate::v1::types::Bytes; use ethereum_types::H160; -use serde::{de, Deserialize, Deserializer}; -use v1::types::Bytes; +use serde::{Deserialize, Deserializer, de}; /// EIP-191 version specifier #[derive(Debug)] @@ -55,7 +55,7 @@ impl<'de> Deserialize<'de> for EIP191Version { return Err(de::Error::custom(format!( "Invalid byte version '{}'", other - ))) + ))); } }; Ok(byte_version) diff --git a/crates/rpc/src/v1/types/fee_history.rs b/crates/rpc/src/v1/types/fee_history.rs index 6c7eaf8653..4e4db27b39 100644 --- a/crates/rpc/src/v1/types/fee_history.rs +++ b/crates/rpc/src/v1/types/fee_history.rs @@ -16,8 +16,8 @@ //! Return types for RPC calls +use crate::v1::types::BlockNumber; use ethereum_types::U256; -use v1::types::BlockNumber; /// Account information. #[derive(Debug, Default, Clone, PartialEq, Serialize)] diff --git a/crates/rpc/src/v1/types/filter.rs b/crates/rpc/src/v1/types/filter.rs index 7e610e1849..fa77e86102 100644 --- a/crates/rpc/src/v1/types/filter.rs +++ b/crates/rpc/src/v1/types/filter.rs @@ -14,16 +14,16 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::types::{filter::Filter as EthFilter, ids::BlockId}; use ethereum_types::{H160, H256}; use jsonrpc_core::Error as RpcError; use serde::{ - de::{DeserializeOwned, Error}, Deserialize, Deserializer, Serialize, Serializer, + de::{DeserializeOwned, Error}, }; -use serde_json::{from_value, Value}; -use types::{filter::Filter as EthFilter, ids::BlockId}; +use serde_json::{Value, from_value}; -use v1::{ +use crate::v1::{ helpers::errors::invalid_params, types::{BlockNumber, Log}, }; @@ -174,11 +174,13 @@ impl Serialize for FilterChanges { #[cfg(test)] mod tests { use super::{Filter, Topic, VariadicValue}; + use crate::{ + types::{filter::Filter as EthFilter, ids::BlockId}, + v1::types::BlockNumber, + }; use ethereum_types::H256; use serde_json; use std::str::FromStr; - use types::{filter::Filter as EthFilter, ids::BlockId}; - use v1::types::BlockNumber; #[test] fn topic_deserialization() { @@ -257,10 +259,12 @@ mod tests { address: Some(vec![]), topics: vec![ None, - Some(vec![H256::from_str( - "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b" - ) - .unwrap()]), + Some(vec![ + H256::from_str( + "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b" + ) + .unwrap() + ]), None, None, ], diff --git a/crates/rpc/src/v1/types/index.rs b/crates/rpc/src/v1/types/index.rs index 9c25b7b61a..2a890a8cb6 100644 --- a/crates/rpc/src/v1/types/index.rs +++ b/crates/rpc/src/v1/types/index.rs @@ -15,8 +15,8 @@ // along with OpenEthereum. If not, see . use serde::{ - de::{Error, Visitor}, Deserialize, Deserializer, + de::{Error, Visitor}, }; use std::fmt; diff --git a/crates/rpc/src/v1/types/log.rs b/crates/rpc/src/v1/types/log.rs index 72278ca5d2..3ebc5ef9bb 100644 --- a/crates/rpc/src/v1/types/log.rs +++ b/crates/rpc/src/v1/types/log.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + types::log_entry::{LocalizedLogEntry, LogEntry}, + v1::types::Bytes, +}; use ethereum_types::{H160, H256, U256}; -use types::log_entry::{LocalizedLogEntry, LogEntry}; -use v1::types::Bytes; /// Log #[derive(Debug, Serialize, PartialEq, Eq, Hash, Clone)] @@ -86,10 +88,10 @@ impl From for Log { #[cfg(test)] mod tests { + use crate::v1::types::Log; use ethereum_types::{H160, H256, U256}; use serde_json; use std::str::FromStr; - use v1::types::Log; #[test] fn log_serialization() { diff --git a/crates/rpc/src/v1/types/mod.rs b/crates/rpc/src/v1/types/mod.rs index df1e7d535c..7be2e544de 100644 --- a/crates/rpc/src/v1/types/mod.rs +++ b/crates/rpc/src/v1/types/mod.rs @@ -20,8 +20,8 @@ pub use rpc_common::Bytes; pub use self::{ account_info::{AccountInfo, EthAccount, ExtAccountInfo, RecoveredAccount, StorageProof}, - block::{Block, BlockTransactions, Header, Rich, RichBlock, RichHeader}, - block_number::{block_number_to_id, BlockNumber}, + block::{Block, BlockTransactions, Header, RichBlock, RichHeader}, + block_number::{BlockNumber, block_number_to_id}, call_request::CallRequest, confirmations::{ ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, @@ -40,14 +40,11 @@ pub use self::{ receipt::Receipt, rpc_settings::RpcSettings, secretstore::EncryptedDocumentKey, - sync::{ - ChainStatus, EthProtocolInfo, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, Peers, - SyncInfo, SyncStatus, TransactionStats, - }, + sync::{ChainStatus, Peers, SyncInfo, SyncStatus, TransactionStats}, trace::{LocalizedTrace, TraceResults, TraceResultsWithTransactionHash}, trace_filter::TraceFilter, transaction::{LocalTransactionStatus, RichRawTransaction, Transaction}, - transaction_access_list::{AccessList, AccessListItem}, + transaction_access_list::AccessList, transaction_condition::TransactionCondition, transaction_request::TransactionRequest, work::Work, diff --git a/crates/rpc/src/v1/types/pubsub.rs b/crates/rpc/src/v1/types/pubsub.rs index 17fa3b8785..aa403d5a5a 100644 --- a/crates/rpc/src/v1/types/pubsub.rs +++ b/crates/rpc/src/v1/types/pubsub.rs @@ -16,10 +16,10 @@ //! Pub-Sub types. +use crate::v1::types::{Filter, Log, RichHeader}; use ethereum_types::H256; -use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; -use serde_json::{from_value, Value}; -use v1::types::{Filter, Log, RichHeader}; +use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; +use serde_json::{Value, from_value}; /// Subscription result. #[derive(Debug, Clone, PartialEq, Eq)] @@ -95,8 +95,8 @@ impl<'a> Deserialize<'a> for Params { #[cfg(test)] mod tests { use super::{Kind, Params, Result}; + use crate::v1::types::{Filter, Header, RichHeader, filter::VariadicValue}; use serde_json; - use v1::types::{filter::VariadicValue, Filter, Header, RichHeader}; #[test] fn should_deserialize_kind() { diff --git a/crates/rpc/src/v1/types/receipt.rs b/crates/rpc/src/v1/types/receipt.rs index b17f4fca74..1afeb1e95a 100644 --- a/crates/rpc/src/v1/types/receipt.rs +++ b/crates/rpc/src/v1/types/receipt.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use ethereum_types::{Bloom as H2048, H160, H256, U256, U64}; -use types::receipt::{LocalizedReceipt, RichReceipt, TransactionOutcome, TypedReceipt}; -use v1::types::Log; +use crate::{ + types::receipt::{LocalizedReceipt, RichReceipt, TransactionOutcome, TypedReceipt}, + v1::types::Log, +}; +use ethereum_types::{Bloom as H2048, H160, H256, U64, U256}; /// Receipt #[derive(Debug, Serialize)] @@ -145,10 +147,12 @@ impl From for Receipt { #[cfg(test)] mod tests { + use crate::{ + types::transaction::TypedTxId, + v1::types::{Log, Receipt}, + }; use ethereum_types::{Bloom, H256}; use serde_json; - use types::transaction::TypedTxId; - use v1::types::{Log, Receipt}; #[test] fn receipt_serialization() { diff --git a/crates/rpc/src/v1/types/secretstore.rs b/crates/rpc/src/v1/types/secretstore.rs index bf77ca0a00..a5a7385377 100644 --- a/crates/rpc/src/v1/types/secretstore.rs +++ b/crates/rpc/src/v1/types/secretstore.rs @@ -14,8 +14,8 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::v1::types::Bytes; use ethereum_types::H512; -use v1::types::Bytes; /// Encrypted document key. #[derive(Default, Debug, Serialize, PartialEq)] diff --git a/crates/rpc/src/v1/types/trace.rs b/crates/rpc/src/v1/types/trace.rs index 7e055c5423..38a131aabf 100644 --- a/crates/rpc/src/v1/types/trace.rs +++ b/crates/rpc/src/v1/types/trace.rs @@ -16,17 +16,17 @@ use std::collections::BTreeMap; +use crate::types::{account_diff, state_diff}; use ethcore::{ client::Executed, trace as et, - trace::{trace, FlatTrace, LocalizedTrace as EthLocalizedTrace, TraceError}, + trace::{FlatTrace, LocalizedTrace as EthLocalizedTrace, TraceError, trace}, }; use ethereum_types::{H160, H256, U256}; -use serde::{ser::SerializeStruct, Serialize, Serializer}; -use types::{account_diff, state_diff}; +use serde::{Serialize, Serializer, ser::SerializeStruct}; use vm; -use v1::types::Bytes; +use crate::v1::types::Bytes; #[derive(Debug, Serialize)] /// A diff of some chunk of memory. @@ -678,11 +678,11 @@ impl From<(H256, Executed)> for TraceResultsWithTransactionHash { #[cfg(test)] mod tests { use super::*; + use crate::v1::types::Bytes; use ethcore::trace::TraceError; use ethereum_types::{Address, H256}; use serde_json; use std::collections::BTreeMap; - use v1::types::Bytes; #[test] fn should_serialize_trace_results() { diff --git a/crates/rpc/src/v1/types/trace_filter.rs b/crates/rpc/src/v1/types/trace_filter.rs index 7c61c78a3d..4c8699d170 100644 --- a/crates/rpc/src/v1/types/trace_filter.rs +++ b/crates/rpc/src/v1/types/trace_filter.rs @@ -16,9 +16,9 @@ //! Trace filter deserialization. +use crate::v1::types::BlockNumber; use ethcore::{client, client::BlockId}; use ethereum_types::H160; -use v1::types::BlockNumber; /// Trace filter #[derive(Debug, PartialEq, Deserialize)] @@ -47,7 +47,9 @@ impl Into for TraceFilter { BlockNumber::Earliest => BlockId::Earliest, BlockNumber::Latest => BlockId::Latest, BlockNumber::Pending => { - warn!("Pending traces are not supported and might be removed in future versions. Falling back to Latest"); + warn!( + "Pending traces are not supported and might be removed in future versions. Falling back to Latest" + ); BlockId::Latest } }; @@ -69,9 +71,9 @@ impl Into for TraceFilter { #[cfg(test)] mod tests { + use crate::v1::types::{BlockNumber, TraceFilter}; use ethereum_types::Address; use serde_json; - use v1::types::{BlockNumber, TraceFilter}; #[test] fn test_empty_trace_filter_deserialize() { diff --git a/crates/rpc/src/v1/types/transaction.rs b/crates/rpc/src/v1/types/transaction.rs index 220a701a90..ca4a983d76 100644 --- a/crates/rpc/src/v1/types/transaction.rs +++ b/crates/rpc/src/v1/types/transaction.rs @@ -16,15 +16,17 @@ use std::sync::Arc; -use ethcore::{contract_address, CreateContractAddress}; -use ethereum_types::{H160, H256, H512, U256, U64}; -use miner; -use serde::{ser::SerializeStruct, Serialize, Serializer}; -use types::transaction::{ - Action, LocalizedTransaction, PendingTransaction, SignedTransaction, TypedTransaction, - TypedTxId, +use crate::{ + miner, + types::transaction::{ + Action, LocalizedTransaction, PendingTransaction, SignedTransaction, TypedTransaction, + TypedTxId, + }, + v1::types::{AccessList, Bytes, TransactionCondition}, }; -use v1::types::{AccessList, Bytes, TransactionCondition}; +use ethcore::{CreateContractAddress, contract_address}; +use ethereum_types::{H160, H256, H512, U64, U256}; +use serde::{Serialize, Serializer, ser::SerializeStruct}; /// Transaction #[derive(Debug, Default, Clone, PartialEq, Serialize)] @@ -340,7 +342,7 @@ impl LocalTransactionStatus { let convert = |tx: Arc| { Transaction::from_signed(tx.signed().clone()) }; - use miner::pool::local_transactions::Status::*; + use crate::miner::pool::local_transactions::Status::*; match s { Pending(_) => LocalTransactionStatus::Pending, Mined(tx) => LocalTransactionStatus::Mined(convert(tx)), @@ -360,11 +362,12 @@ impl LocalTransactionStatus { #[cfg(test)] mod tests { + use crate::v1::types::transaction_access_list::AccessListItem; + use super::{LocalTransactionStatus, Transaction}; + use crate::types::transaction::TypedTxId; use ethereum_types::H256; use serde_json; - use types::transaction::TypedTxId; - use v1::types::AccessListItem; #[test] fn test_transaction_serialize() { diff --git a/crates/rpc/src/v1/types/transaction_access_list.rs b/crates/rpc/src/v1/types/transaction_access_list.rs index 810537e694..0746888a82 100644 --- a/crates/rpc/src/v1/types/transaction_access_list.rs +++ b/crates/rpc/src/v1/types/transaction_access_list.rs @@ -1,7 +1,7 @@ +use crate::types::transaction::AccessListItem as InnerAccessListItem; use ethereum_types::{H160, H256}; use serde::Serialize; use std::vec::Vec; -use types::transaction::AccessListItem as InnerAccessListItem; pub type AccessList = Vec; #[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize)] diff --git a/crates/rpc/src/v1/types/transaction_condition.rs b/crates/rpc/src/v1/types/transaction_condition.rs index 55b1d613ce..bf0c8759d9 100644 --- a/crates/rpc/src/v1/types/transaction_condition.rs +++ b/crates/rpc/src/v1/types/transaction_condition.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use types::transaction; +use crate::types::transaction; /// Represents condition on minimum block number or block timestamp. #[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] diff --git a/crates/rpc/src/v1/types/transaction_request.rs b/crates/rpc/src/v1/types/transaction_request.rs index 1f24934055..96df20b5fd 100644 --- a/crates/rpc/src/v1/types/transaction_request.rs +++ b/crates/rpc/src/v1/types/transaction_request.rs @@ -16,12 +16,12 @@ //! `TransactionRequest` type -use ansi_term::Colour; -use ethereum_types::{H160, U256, U64}; -use v1::{ +use crate::v1::{ helpers, types::{AccessList, Bytes, TransactionCondition}, }; +use ansi_term::Colour; +use ethereum_types::{H160, U64, U256}; use std::fmt; @@ -167,11 +167,11 @@ impl Into for TransactionRequest { #[cfg(test)] mod tests { use super::*; + use crate::v1::types::TransactionCondition; use ethereum_types::{H160, U256}; use rustc_hex::FromHex; use serde_json; use std::str::FromStr; - use v1::types::TransactionCondition; #[test] fn transaction_request_deserialize() { diff --git a/crates/runtime/io/Cargo.toml b/crates/runtime/io/Cargo.toml index 730625796d..537316828f 100644 --- a/crates/runtime/io/Cargo.toml +++ b/crates/runtime/io/Cargo.toml @@ -1,19 +1,19 @@ [package] description = "Ethcore IO library" -homepage = "https://github.com/openethereum/openethereum" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "ethcore-io" version = "1.12.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] fnv = "1.0" mio = { version = "0.6.8", optional = true } -crossbeam-deque = "0.6" -parking_lot = "0.11.1" +crossbeam-deque = "0.7.4" +parking_lot = "0.12" log = "0.4" slab = "0.4" -num_cpus = "1.8" timer = "0.2" time = "0.1" tokio = "0.1" diff --git a/crates/runtime/io/src/lib.rs b/crates/runtime/io/src/lib.rs index 369b3b164f..0c155b2d0e 100644 --- a/crates/runtime/io/src/lib.rs +++ b/crates/runtime/io/src/lib.rs @@ -46,7 +46,7 @@ //! } //! //! fn main () { -//! let mut service = IoService::::start("name").expect("Error creating network service"); +//! let mut service = IoService::::start("name", 4).expect("Error creating network service"); //! service.register_handler(Arc::new(MyHandler)).unwrap(); //! //! // Wait for quit condition @@ -76,7 +76,6 @@ extern crate log as rlog; extern crate crossbeam_deque as deque; extern crate fnv; extern crate futures; -extern crate num_cpus; extern crate parking_lot; extern crate slab; extern crate time; @@ -90,10 +89,10 @@ mod service_non_mio; #[cfg(feature = "mio")] mod worker; -#[cfg(feature = "mio")] -use mio::deprecated::{EventLoop, NotifyError}; #[cfg(feature = "mio")] use mio::Token; +#[cfg(feature = "mio")] +use mio::deprecated::{EventLoop, NotifyError}; use std::{cell::Cell, error, fmt}; thread_local! { @@ -142,10 +141,10 @@ impl From>> for IoError where Message: Send, { - fn from(_err: NotifyError>) -> IoError { + fn from(err: NotifyError>) -> IoError { IoError::Mio(::std::io::Error::new( ::std::io::ErrorKind::ConnectionAborted, - "Network IO notification error", + format!("Network IO notification error {}", err), )) } } @@ -202,16 +201,16 @@ where #[cfg(feature = "mio")] pub use service_mio::{ - IoChannel, IoContext, IoManager, IoService, StreamToken, TimerToken, TOKENS_PER_HANDLER, + IoChannel, IoContext, IoManager, IoService, StreamToken, TOKENS_PER_HANDLER, TimerToken, }; #[cfg(not(feature = "mio"))] -pub use service_non_mio::{IoChannel, IoContext, IoService, TimerToken, TOKENS_PER_HANDLER}; +pub use service_non_mio::{IoChannel, IoContext, IoService, TOKENS_PER_HANDLER, TimerToken}; #[cfg(test)] mod tests { use super::*; use std::{ - sync::{atomic, Arc}, + sync::{Arc, atomic}, thread, time::Duration, }; @@ -240,7 +239,7 @@ mod tests { let handler = Arc::new(MyHandler(atomic::AtomicBool::new(false))); let service = - IoService::::start("Test").expect("Error creating network service"); + IoService::::start("Test", 4).expect("Error creating network service"); service.register_handler(handler.clone()).unwrap(); service.send_message(MyMessage { data: 5 }).unwrap(); @@ -254,9 +253,7 @@ mod tests { struct MyHandler(atomic::AtomicBool); #[derive(Clone)] - struct MyMessage { - data: u32, - } + struct MyMessage {} impl IoHandler for MyHandler { fn initialize(&self, io: &IoContext) { @@ -273,7 +270,7 @@ mod tests { let handler = Arc::new(MyHandler(atomic::AtomicBool::new(false))); let service = - IoService::::start("Test").expect("Error creating network service"); + IoService::::start("Test", 4).expect("Error creating network service"); service.register_handler(handler.clone()).unwrap(); thread::sleep(Duration::from_secs(2)); @@ -285,9 +282,7 @@ mod tests { struct MyHandler(atomic::AtomicUsize); #[derive(Clone)] - struct MyMessage { - data: u32, - } + struct MyMessage {} impl IoHandler for MyHandler { fn initialize(&self, io: &IoContext) { @@ -303,7 +298,7 @@ mod tests { let handler = Arc::new(MyHandler(atomic::AtomicUsize::new(0))); let service = - IoService::::start("Test").expect("Error creating network service"); + IoService::::start("Test", 4).expect("Error creating network service"); service.register_handler(handler.clone()).unwrap(); thread::sleep(Duration::from_secs(2)); diff --git a/crates/runtime/io/src/service_mio.rs b/crates/runtime/io/src/service_mio.rs index 41cd7b19eb..5b8d8d1cca 100644 --- a/crates/runtime/io/src/service_mio.rs +++ b/crates/runtime/io/src/service_mio.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + IoError, IoHandler, + worker::{Work, WorkType, Worker}, +}; use deque; use mio::{ deprecated::{EventLoop, EventLoopBuilder, Handler, Sender}, @@ -28,9 +32,6 @@ use std::{ thread::{self, JoinHandle}, time::Duration, }; -use worker::{Work, WorkType, Worker}; -use IoError; -use IoHandler; /// Timer ID pub type TimerToken = usize; @@ -213,9 +214,10 @@ where symbolic_name: &str, event_loop: &mut EventLoop>, handlers: Arc>>>>, + num_workers: i32, ) -> Result<(), IoError> { - let (worker, stealer) = deque::fifo(); - let num_workers = 4; + let worker = deque::Worker::new_fifo(); + let stealer = worker.stealer(); let work_ready_mutex = Arc::new(Mutex::new(())); let work_ready = Arc::new(Condvar::new()); let workers = (0..num_workers) @@ -303,7 +305,11 @@ where handler_id: handler_index, }); self.work_ready.notify_all(); + } else { + debug!(target: "io", "No timer available for token {}. handler_index {handler_index}, subtoken {token_id}", token.0); } + } else { + debug!(target: "io", "No handler for token {} registered. handler_index {handler_index}, subtoken {token_id}", token.0); } } @@ -344,6 +350,7 @@ where delay, once, } => { + trace!(target: "io", "Registering timer: handler_id={}, token={}, delay={:?}, once={}", handler_id, token, delay, once); let timer_id = token + handler_id * TOKENS_PER_HANDLER; let timeout = event_loop .timeout(Token(timer_id), delay) @@ -535,7 +542,10 @@ where Message: Send + Sync + 'static, { /// Starts IO event loop - pub fn start(symbolic_name: &'static str) -> Result, IoError> { + pub fn start( + symbolic_name: &'static str, + num_workers: i32, + ) -> Result, IoError> { let mut config = EventLoopBuilder::new(); config.messages_per_tick(1024); let mut event_loop = config.build().expect("Error creating event loop"); @@ -543,7 +553,7 @@ where let handlers = Arc::new(RwLock::new(Slab::with_capacity(MAX_HANDLERS))); let h = handlers.clone(); let thread = thread::spawn(move || { - IoManager::::start(symbolic_name, &mut event_loop, h) + IoManager::::start(symbolic_name, &mut event_loop, h, num_workers) .expect("Error starting IO service"); }); Ok(IoService { diff --git a/crates/runtime/io/src/service_non_mio.rs b/crates/runtime/io/src/service_non_mio.rs index 376abb9d96..c61b79fdea 100644 --- a/crates/runtime/io/src/service_non_mio.rs +++ b/crates/runtime/io/src/service_non_mio.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{IoError, IoHandler}; use deque; use fnv::FnvHashMap; -use num_cpus; use parking_lot::{Mutex, RwLock}; use slab::Slab; use std::{ @@ -26,8 +26,6 @@ use std::{ }; use time::Duration as TimeDuration; use timer::{Guard as TimerGuard, Timer}; -use IoError; -use IoHandler; /// Timer ID pub type TimerToken = usize; @@ -267,10 +265,14 @@ where Message: Send + Sync + 'static, { /// Starts IO event loop - pub fn start(_symbolic_name: &'static str) -> Result, IoError> { + pub fn start( + _symbolic_name: &'static str, + num_threads: i32, + ) -> Result, IoError> { // This minimal implementation of IoService does have named Workers // like the mio-dependent one does, so _symbolic_name is ignored. - let (tx, rx) = deque::fifo(); + let tx = deque::Worker::new_fifo(); + let rx = tx.stealer(); let shared = Arc::new(Shared { handlers: RwLock::new(Slab::with_capacity(MAX_HANDLERS)), @@ -280,7 +282,7 @@ where channel: Mutex::new(Some(tx)), }); - let thread_joins = (0..num_cpus::get()) + let thread_joins = (0..num_threads) .map(|_| { let rx = rx.clone(); let shared = shared.clone(); @@ -370,8 +372,8 @@ where match rx.steal() { deque::Steal::Retry => continue, deque::Steal::Empty => thread::park(), - deque::Steal::Data(WorkTask::Shutdown) => break, - deque::Steal::Data(WorkTask::UserMessage(message)) => { + deque::Steal::Success(WorkTask::Shutdown) => break, + deque::Steal::Success(WorkTask::UserMessage(message)) => { for id in 0..MAX_HANDLERS { if let Some(handler) = shared.handlers.read().get(id) { let ctxt = IoContext { @@ -382,7 +384,7 @@ where } } } - deque::Steal::Data(WorkTask::TimerTrigger { handler_id, token }) => { + deque::Steal::Success(WorkTask::TimerTrigger { handler_id, token }) => { if let Some(handler) = shared.handlers.read().get(handler_id) { let ctxt = IoContext { handler: handler_id, diff --git a/crates/runtime/io/src/worker.rs b/crates/runtime/io/src/worker.rs index 7d875121c6..99f176b3a0 100644 --- a/crates/runtime/io/src/worker.rs +++ b/crates/runtime/io/src/worker.rs @@ -14,19 +14,20 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . +use crate::{ + IoHandler, LOCAL_STACK_SIZE, + service_mio::{HandlerId, IoChannel, IoContext}, +}; use deque; use futures::future::{self, Loop}; -use service_mio::{HandlerId, IoChannel, IoContext}; use std::{ sync::{ - atomic::{AtomicBool, Ordering as AtomicOrdering}, Arc, + atomic::{AtomicBool, Ordering as AtomicOrdering}, }, thread::{self, JoinHandle}, }; use tokio::{self}; -use IoHandler; -use LOCAL_STACK_SIZE; use parking_lot::{Condvar, Mutex}; @@ -94,7 +95,7 @@ impl Worker { while !deleting.load(AtomicOrdering::SeqCst) { match stealer.steal() { - deque::Steal::Data(work) => { + deque::Steal::Success(work) => { Worker::do_work(work, channel.clone()) } deque::Steal::Retry => {} diff --git a/crates/runtime/runtime/Cargo.toml b/crates/runtime/runtime/Cargo.toml index 74c30b3cee..56f7991e5d 100644 --- a/crates/runtime/runtime/Cargo.toml +++ b/crates/runtime/runtime/Cargo.toml @@ -1,10 +1,11 @@ [package] -description = "OpenEthereum Runtime" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node Runtime" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "parity-runtime" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] futures = "0.1" diff --git a/crates/runtime/runtime/src/lib.rs b/crates/runtime/runtime/src/lib.rs index e8ea5ec3ba..04f0981b05 100644 --- a/crates/runtime/runtime/src/lib.rs +++ b/crates/runtime/runtime/src/lib.rs @@ -19,7 +19,7 @@ pub extern crate futures; pub extern crate tokio; -use futures::{future, Future, IntoFuture}; +use futures::{Future, IntoFuture, future}; use std::{ fmt, sync::mpsc, diff --git a/crates/transaction-pool/src/tests/mod.rs b/crates/transaction-pool/src/tests/mod.rs index 3e1d87c6a9..87b6f9c444 100644 --- a/crates/transaction-pool/src/tests/mod.rs +++ b/crates/transaction-pool/src/tests/mod.rs @@ -60,6 +60,7 @@ pub type SharedTransaction = Arc; type TestPool = Pool; impl TestPool { + /// Creates a new instance with a specified maximum count limit. pub fn with_limit(max_count: usize) -> Self { Self::with_options(Options { max_count, diff --git a/crates/util/EIP-152/Cargo.toml b/crates/util/EIP-152/Cargo.toml index ac46304d62..a867f42734 100644 --- a/crates/util/EIP-152/Cargo.toml +++ b/crates/util/EIP-152/Cargo.toml @@ -2,7 +2,7 @@ name = "eip-152" version = "0.1.0" authors = ["Parity Technologies "] -repository = "https://github.com/openethereum/openethereum" +repository = "https://github.com/dmdcoin/diamond-node" documentation = "https://docs.rs/eip-152" readme = "README.md" description = "eip-512 blake2 F compression function" diff --git a/crates/util/EIP-712/Cargo.lock b/crates/util/EIP-712/Cargo.lock index aeb4860cbf..8833269a3c 100644 --- a/crates/util/EIP-712/Cargo.lock +++ b/crates/util/EIP-712/Cargo.lock @@ -105,7 +105,6 @@ version = "0.1.0" dependencies = [ "ethabi", "ethereum-types", - "failure", "indexmap", "itertools", "keccak-hash", @@ -167,28 +166,6 @@ dependencies = [ "uint", ] -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2 1.0.27", - "quote 1.0.9", - "syn 1.0.73", - "synstructure", -] - [[package]] name = "fixed-hash" version = "0.6.1" diff --git a/crates/util/EIP-712/Cargo.toml b/crates/util/EIP-712/Cargo.toml index 2a2d0f8ae6..3da8f0ab35 100644 --- a/crates/util/EIP-712/Cargo.toml +++ b/crates/util/EIP-712/Cargo.toml @@ -2,7 +2,7 @@ name = "eip-712" version = "0.1.0" authors = ["Parity Technologies "] -repository = "https://github.com/openethereum/openethereum" +repository = "https://github.com/dmdcoin/diamond-node" documentation = "https://docs.rs/eip-712" readme = "README.md" description = "eip-712 encoding" @@ -18,7 +18,7 @@ ethabi = "12.0.0" keccak-hash = "0.5.0" ethereum-types = "0.9.2" logos = "0.12.0" -failure = "0.1.7" +thiserror = "1.0" itertools = "0.7" lazy_static = "1.1" regex = "1.0" diff --git a/crates/util/EIP-712/src/encode.rs b/crates/util/EIP-712/src/encode.rs index a63f8300cf..e09410687d 100644 --- a/crates/util/EIP-712/src/encode.rs +++ b/crates/util/EIP-712/src/encode.rs @@ -438,7 +438,7 @@ mod tests { let typed_data = from_str::(TEST).expect("alas error!"); assert_eq!( - hash_structured_data(typed_data).unwrap_err().kind(), + hash_structured_data(typed_data).unwrap_err(), ErrorKind::UnequalArrayItems(2, "Person[2]".into(), 1) ) } diff --git a/crates/util/EIP-712/src/error.rs b/crates/util/EIP-712/src/error.rs index 6ce839eb41..78861cd191 100644 --- a/crates/util/EIP-712/src/error.rs +++ b/crates/util/EIP-712/src/error.rs @@ -14,56 +14,32 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use failure::{Backtrace, Context, Fail}; -use std::fmt::{self, Display}; +use thiserror::Error; use validator::{ValidationErrors, ValidationErrorsKind}; -pub(crate) type Result = ::std::result::Result; -/// Error type -#[derive(Debug)] -pub struct Error { - inner: Context, -} -/// Possible errors encountered while hashing/encoding an EIP-712 compliant data structure -#[derive(Clone, Fail, Debug, PartialEq)] +pub(crate) type Result = ::std::result::Result; + +#[derive(Error, Debug, PartialEq)] pub enum ErrorKind { - /// if we fail to deserialize from a serde::Value as a type specified in message types - /// fail with this error. - #[fail(display = "Expected type '{}' for field '{}'", _0, _1)] + #[error("Expected type '{0}' for field '{1}'")] UnexpectedType(String, String), - /// the primary type supplied doesn't exist in the MessageTypes - #[fail(display = "The given primaryType wasn't found in the types field")] + #[error("The given primaryType wasn't found in the types field")] NonExistentType, - /// an invalid address was encountered during encoding - #[fail( - display = "Address string should be a 0x-prefixed 40 character string, got '{}'", - _0 - )] + #[error("Address string should be a 0x-prefixed 40 character string, got '{0}'")] InvalidAddressLength(usize), - /// a hex parse error occured - #[fail(display = "Failed to parse hex '{}'", _0)] + #[error("Failed to parse hex '{0}'")] HexParseError(String), - /// the field was declared with a unknown type - #[fail(display = "The field '{}' has an unknown type '{}'", _0, _1)] + #[error("The field '{0}' has an unknown type '{1}'")] UnknownType(String, String), - /// Unexpected token - #[fail(display = "Unexpected token '{}' while parsing typename '{}'", _0, _1)] + #[error("Unexpected token '{0}' while parsing typename '{1}'")] UnexpectedToken(String, String), - /// the user has attempted to define a typed array with a depth > 10 - #[fail(display = "Maximum depth for nested arrays is 10")] + #[error("Maximum depth for nested arrays is 10")] UnsupportedArrayDepth, - /// FieldType validation error - #[fail(display = "{}", _0)] + #[error("{0}")] ValidationError(String), - /// the typed array defined in message types was declared with a fixed length - /// that is of unequal length with the items to be encoded - #[fail( - display = "Expected {} items for array type {}, got {} items", - _0, _1, _2 - )] + #[error("Expected {0} items for array type {1}, got {2} items")] UnequalArrayItems(u64, String, u64), - /// Typed array length doesn't fit into a u64 - #[fail(display = "Attempted to declare fixed size with length {}", _0)] + #[error("Attempted to declare fixed size with length {0}")] InvalidArraySize(String), } @@ -71,44 +47,7 @@ pub(crate) fn serde_error(expected: &str, field: Option<&str>) -> ErrorKind { ErrorKind::UnexpectedType(expected.to_owned(), field.unwrap_or("").to_owned()) } -impl Fail for Error { - fn cause(&self) -> Option<&dyn Fail> { - self.inner.cause() - } - - fn backtrace(&self) -> Option<&Backtrace> { - self.inner.backtrace() - } -} - -impl Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.inner, f) - } -} - -impl Error { - /// extract the error kind - pub fn kind(&self) -> ErrorKind { - self.inner.get_context().clone() - } -} - -impl From for Error { - fn from(kind: ErrorKind) -> Error { - Error { - inner: Context::new(kind), - } - } -} - -impl From> for Error { - fn from(inner: Context) -> Error { - Error { inner } - } -} - -impl From for Error { +impl From for ErrorKind { fn from(error: ValidationErrors) -> Self { let mut string: String = "".into(); for (field_name, error_kind) in error.errors() { @@ -128,6 +67,6 @@ impl From for Error { ), } } - ErrorKind::ValidationError(string).into() + ErrorKind::ValidationError(string) } } diff --git a/crates/util/EIP-712/src/lib.rs b/crates/util/EIP-712/src/lib.rs index 21eafc3a47..ed7670e24d 100644 --- a/crates/util/EIP-712/src/lib.rs +++ b/crates/util/EIP-712/src/lib.rs @@ -172,5 +172,3 @@ mod parser; pub use crate::eip712::EIP712; /// the EIP-712 encoding function pub use crate::encode::hash_structured_data; -/// encoding Error types -pub use crate::error::{Error, ErrorKind}; diff --git a/crates/util/cli-signer/Cargo.toml b/crates/util/cli-signer/Cargo.toml index 83fe9c0c2f..653cf81a46 100644 --- a/crates/util/cli-signer/Cargo.toml +++ b/crates/util/cli-signer/Cargo.toml @@ -1,10 +1,11 @@ [package] -description = "OpenEthereum CLI Signer Tool" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node CLI Signer Tool" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "cli-signer" version = "1.4.0" authors = ["Parity "] +edition = "2024" [dependencies] ethereum-types = "0.9.2" diff --git a/crates/util/cli-signer/rpc-client/Cargo.toml b/crates/util/cli-signer/rpc-client/Cargo.toml index a689d37ec5..504d34fdf5 100644 --- a/crates/util/cli-signer/rpc-client/Cargo.toml +++ b/crates/util/cli-signer/rpc-client/Cargo.toml @@ -1,10 +1,11 @@ [package] -description = "OpenEthereum RPC Client" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node RPC Client" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "parity-rpc-client" version = "1.4.0" authors = ["Parity "] +edition = "2024" [dependencies] ethereum-types = "0.9.2" @@ -14,7 +15,7 @@ serde = "1.0" serde_json = "1.0" url = "2" matches = "0.1" -parking_lot = "0.11.1" +parking_lot = "0.12" jsonrpc-core = "15.0.0" jsonrpc-ws-server = "15.0.0" parity-rpc = { path = "../../../rpc" } diff --git a/crates/util/cli-signer/rpc-client/src/client.rs b/crates/util/cli-signer/rpc-client/src/client.rs index d278620365..9465cee774 100644 --- a/crates/util/cli-signer/rpc-client/src/client.rs +++ b/crates/util/cli-signer/rpc-client/src/client.rs @@ -19,8 +19,8 @@ use std::{ fmt::{Debug, Error as FmtError, Formatter}, io::{BufRead, BufReader}, sync::{ - atomic::{AtomicUsize, Ordering}, Arc, + atomic::{AtomicUsize, Ordering}, }, thread, time, }; @@ -30,7 +30,7 @@ use parking_lot::Mutex; use std::{fs::File, path::PathBuf}; use url::Url; -use ws::ws::{ +use crate::ws::ws::{ self, Error as WsError, ErrorKind as WsErrorKind, Handler, Handshake, Message, Request, Result as WsResult, Sender, }; @@ -38,15 +38,15 @@ use ws::ws::{ use serde::de::DeserializeOwned; use serde_json::{self as json, Error as JsonError, Value as JsonValue}; -use futures::{done, oneshot, Canceled, Complete, Future}; +use futures::{Canceled, Complete, Future, done, oneshot}; use jsonrpc_core::{ + Error as JsonRpcError, Id, Params, Version, request::MethodCall, response::{Failure, Output, Success}, - Error as JsonRpcError, Id, Params, Version, }; -use BoxFuture; +use crate::BoxFuture; /// The actual websocket connection handler, passed into the /// event loop of ws-rs diff --git a/crates/util/cli-signer/rpc-client/src/lib.rs b/crates/util/cli-signer/rpc-client/src/lib.rs index 3f4b4d2d6d..8cb66112c2 100644 --- a/crates/util/cli-signer/rpc-client/src/lib.rs +++ b/crates/util/cli-signer/rpc-client/src/lib.rs @@ -17,7 +17,6 @@ pub mod client; pub mod signer_client; -extern crate ethereum_types; extern crate futures; extern crate jsonrpc_core; extern crate jsonrpc_ws_server as ws; @@ -41,7 +40,7 @@ pub type BoxFuture = Box + Send>; #[cfg(test)] mod tests { - use client::{Rpc, RpcError}; + use crate::client::{Rpc, RpcError}; use futures::Future; use rpc; use std::path::PathBuf; diff --git a/crates/util/cli-signer/rpc-client/src/signer_client.rs b/crates/util/cli-signer/rpc-client/src/signer_client.rs index 6283948696..4dca446b72 100644 --- a/crates/util/cli-signer/rpc-client/src/signer_client.rs +++ b/crates/util/cli-signer/rpc-client/src/signer_client.rs @@ -14,14 +14,16 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use client::{Rpc, RpcError}; +use crate::{ + BoxFuture, + client::{Rpc, RpcError}, +}; use ethereum_types::U256; use futures::Canceled; use rpc::signer::{ConfirmationRequest, TransactionCondition, TransactionModification}; use serde; -use serde_json::{to_value, Value as JsonValue}; +use serde_json::{Value as JsonValue, to_value}; use std::path::PathBuf; -use BoxFuture; pub struct SignerRpc { rpc: Rpc, diff --git a/crates/util/cli-signer/src/lib.rs b/crates/util/cli-signer/src/lib.rs index fbd1b62cbb..ad909d6605 100644 --- a/crates/util/cli-signer/src/lib.rs +++ b/crates/util/cli-signer/src/lib.rs @@ -14,19 +14,19 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -extern crate ethereum_types; +use ethereum_types; extern crate futures; extern crate rpassword; extern crate parity_rpc as rpc; extern crate parity_rpc_client as client; -use client::signer_client::SignerRpc; +use crate::client::signer_client::SignerRpc; use ethereum_types::U256; use rpc::signer::ConfirmationRequest; use std::{ fs::File, - io::{stdin, stdout, BufRead, BufReader, Write}, + io::{BufRead, BufReader, Write, stdin, stdout}, path::PathBuf, }; diff --git a/crates/util/dir/Cargo.toml b/crates/util/dir/Cargo.toml index bfa27f23e6..1743143868 100644 --- a/crates/util/dir/Cargo.toml +++ b/crates/util/dir/Cargo.toml @@ -3,9 +3,10 @@ name = "dir" version = "0.1.2" authors = ["Parity Technologies "] license = "GPL3" +edition = "2024" [dependencies] ethereum-types = "0.9.2" journaldb = { path = "../../db/journaldb" } -app_dirs = { git = "https://github.com/openethereum/app-dirs-rs" } +app_dirs = { git = "https://github.com/dmdcoin/app-dirs-rs" } home = "0.3" diff --git a/crates/util/dir/src/helpers.rs b/crates/util/dir/src/helpers.rs index 10f1b49f75..71abe9710f 100644 --- a/crates/util/dir/src/helpers.rs +++ b/crates/util/dir/src/helpers.rs @@ -15,7 +15,7 @@ // along with OpenEthereum. If not, see . //! Directory helper functions -use home_dir; +use crate::home_dir; /// Replaces `$HOME` str with home directory path. pub fn replace_home(base: &str, arg: &str) -> String { diff --git a/crates/util/dir/src/lib.rs b/crates/util/dir/src/lib.rs index f105014b4f..247fc365ac 100644 --- a/crates/util/dir/src/lib.rs +++ b/crates/util/dir/src/lib.rs @@ -38,14 +38,14 @@ /// Unix: $BASE/openethereum/ /// extern crate app_dirs; -extern crate ethereum_types; +use ethereum_types; extern crate home; extern crate journaldb; pub mod helpers; -use app_dirs::{data_root, get_app_root, AppDataType, AppInfo}; -use ethereum_types::{H256, H64}; -use helpers::{replace_home, replace_home_and_local}; +use crate::helpers::{replace_home, replace_home_and_local}; +use app_dirs::{AppDataType, AppInfo, data_root, get_app_root}; +use ethereum_types::{H64, H256}; use journaldb::Algorithm; use std::{ fs, @@ -335,7 +335,7 @@ mod platform { #[cfg(test)] mod tests { use super::Directories; - use helpers::{replace_home, replace_home_and_local}; + use crate::helpers::{replace_home, replace_home_and_local}; #[test] fn test_default_directories() { diff --git a/crates/util/fastmap/Cargo.toml b/crates/util/fastmap/Cargo.toml index 6ca397d773..bfad656493 100644 --- a/crates/util/fastmap/Cargo.toml +++ b/crates/util/fastmap/Cargo.toml @@ -4,7 +4,9 @@ version = "0.1.0" authors = ["Parity Technologies "] description = "Specialized version of `HashMap` with H256 keys and fast hashing function." license = "GPL-3.0" +edition = "2024" [dependencies] ethereum-types = "0.9.2" plain_hasher = "0.2" +lru = "0.13.0" diff --git a/crates/util/fastmap/src/lib.rs b/crates/util/fastmap/src/lib.rs index cc39591e17..76e394a474 100644 --- a/crates/util/fastmap/src/lib.rs +++ b/crates/util/fastmap/src/lib.rs @@ -16,14 +16,17 @@ //! Provides a `H256FastMap` type with H256 keys and fast hashing function. -extern crate ethereum_types; +use ethereum_types; +extern crate lru; extern crate plain_hasher; +use self::lru::LruCache; use ethereum_types::H256; use plain_hasher::PlainHasher; use std::{ collections::{HashMap, HashSet}, hash, + num::NonZeroUsize, }; /// Specialized version of `HashMap` with H256 keys and fast hashing function. @@ -31,6 +34,12 @@ pub type H256FastMap = HashMap /// Specialized version of HashSet with H256 values and fast hashing function. pub type H256FastSet = HashSet>; +pub type H256FastLruMap = LruCache>; + +pub fn new_h256_fast_lru_map(cap: NonZeroUsize) -> H256FastLruMap { + LruCache::with_hasher(cap, hash::BuildHasherDefault::::default()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/util/keccak-hasher/Cargo.toml b/crates/util/keccak-hasher/Cargo.toml index ba3af0d3d2..3558e56342 100644 --- a/crates/util/keccak-hasher/Cargo.toml +++ b/crates/util/keccak-hasher/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.1" authors = ["Parity Technologies "] description = "Keccak-256 implementation of the Hasher trait" license = "GPL-3.0" +edition = "2024" [dependencies] ethereum-types = "0.9.2" diff --git a/crates/util/keccak-hasher/src/lib.rs b/crates/util/keccak-hasher/src/lib.rs index 727181b357..d9a0bb1989 100644 --- a/crates/util/keccak-hasher/src/lib.rs +++ b/crates/util/keccak-hasher/src/lib.rs @@ -15,7 +15,7 @@ // along with OpenEthereum. If not, see . //! Hasher implementation for the Keccak-256 hash -extern crate ethereum_types; +use ethereum_types; extern crate hash_db; extern crate plain_hasher; extern crate tiny_keccak; diff --git a/crates/util/len-caching-lock/Cargo.toml b/crates/util/len-caching-lock/Cargo.toml index 4d471000ef..fb838a7283 100644 --- a/crates/util/len-caching-lock/Cargo.toml +++ b/crates/util/len-caching-lock/Cargo.toml @@ -1,10 +1,11 @@ [package] description = "Atomically cached len(), for use with collections contained in parking_lot Mutex and RwLock" -homepage = "https://github.com/openethereum/openethereum" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "len-caching-lock" version = "0.1.1" authors = ["Parity Technologies "] +edition = "2024" [dependencies] -parking_lot = "0.11.1" +parking_lot = "0.12" diff --git a/crates/util/len-caching-lock/src/mutex.rs b/crates/util/len-caching-lock/src/mutex.rs index 7aec807391..67290573fa 100644 --- a/crates/util/len-caching-lock/src/mutex.rs +++ b/crates/util/len-caching-lock/src/mutex.rs @@ -21,7 +21,7 @@ use std::{ use parking_lot::{Mutex, MutexGuard}; -use Len; +use crate::Len; /// Can be used in place of a [`Mutex`](../../lock_api/struct.Mutex.html) where reading `T`'s `len()` without /// needing to lock, is advantageous. diff --git a/crates/util/len-caching-lock/src/rwlock.rs b/crates/util/len-caching-lock/src/rwlock.rs index 03551a26b4..a88bf3d268 100644 --- a/crates/util/len-caching-lock/src/rwlock.rs +++ b/crates/util/len-caching-lock/src/rwlock.rs @@ -21,7 +21,7 @@ use std::{ use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; -use Len; +use crate::Len; /// Can be used in place of a [`RwLock`](../../lock_api/struct.RwLock.html) where /// reading `T`'s `len()` without needing to lock, is advantageous. diff --git a/crates/util/macros/Cargo.toml b/crates/util/macros/Cargo.toml index fd6a130f37..202f782086 100644 --- a/crates/util/macros/Cargo.toml +++ b/crates/util/macros/Cargo.toml @@ -2,3 +2,4 @@ name = "macros" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" \ No newline at end of file diff --git a/crates/util/memory-cache/Cargo.toml b/crates/util/memory-cache/Cargo.toml index 213772857f..5594fe2104 100644 --- a/crates/util/memory-cache/Cargo.toml +++ b/crates/util/memory-cache/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] description = "An LRU-cache which operates on memory used" license = "GPL3" +edition = "2024" [dependencies] parity-util-mem = "0.7" diff --git a/crates/util/memzero/Cargo.toml b/crates/util/memzero/Cargo.toml index 9eb6f731c4..3829403b34 100644 --- a/crates/util/memzero/Cargo.toml +++ b/crates/util/memzero/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "A wrapper for zero-ing out memory when dropped" license = "GPL-3.0" homepage = "https://parity.io" -repository = "https://github.com/openethereum/openethereum" +repository = "https://github.com/dmdcoin/diamond-node" documentation = "https://docs.rs/crate/memzero" authors = ["Parity Technologies "] edition = "2018" diff --git a/crates/util/panic-hook/Cargo.toml b/crates/util/panic-hook/Cargo.toml index eac3d806a6..21525477d6 100644 --- a/crates/util/panic-hook/Cargo.toml +++ b/crates/util/panic-hook/Cargo.toml @@ -1,10 +1,11 @@ [package] -description = "OpenEthereum custom panic hook" -homepage = "https://github.com/openethereum/openethereum" +description = "diamond-node custom panic hook" +homepage = "https://github.com/dmdcoin/diamond-node" license = "GPL-3.0" name = "panic_hook" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] backtrace = "0.3" \ No newline at end of file diff --git a/crates/util/panic-hook/src/lib.rs b/crates/util/panic-hook/src/lib.rs index f30f9ac992..1d1ad414b0 100644 --- a/crates/util/panic-hook/src/lib.rs +++ b/crates/util/panic-hook/src/lib.rs @@ -20,7 +20,7 @@ extern crate backtrace; use backtrace::Backtrace; use std::{ - panic::{self, PanicInfo}, + panic::{self, PanicHookInfo}, process, thread, }; @@ -51,10 +51,10 @@ where static ABOUT_PANIC: &str = " This is a bug. Please report it at: - https://github.com/openethereum/openethereum/issues/new + https://github.com/dmdcoin/diamond-node/issues/new "; -fn gen_panic_msg(info: &PanicInfo) -> String { +fn gen_panic_msg(info: &PanicHookInfo) -> String { let location = info.location(); let file = location.as_ref().map(|l| l.file()).unwrap_or(""); let line = location.as_ref().map(|l| l.line()).unwrap_or(0); diff --git a/crates/util/rlp-compress/Cargo.toml b/crates/util/rlp-compress/Cargo.toml index e16c5b3de2..70c401bead 100644 --- a/crates/util/rlp-compress/Cargo.toml +++ b/crates/util/rlp-compress/Cargo.toml @@ -2,6 +2,7 @@ name = "rlp_compress" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] rlp = { version = "0.4.6" } diff --git a/crates/util/rlp-compress/src/common.rs b/crates/util/rlp-compress/src/common.rs index 846b66010d..1aafb3f50b 100644 --- a/crates/util/rlp-compress/src/common.rs +++ b/crates/util/rlp-compress/src/common.rs @@ -8,7 +8,7 @@ //! Contains RLPs used for compression. -use Swapper; +use crate::Swapper; lazy_static! { /// Swapper for snapshot compression. diff --git a/crates/util/rlp-compress/tests/compress.rs b/crates/util/rlp-compress/tests/compress.rs index 9f901a8e8d..44f126f303 100644 --- a/crates/util/rlp-compress/tests/compress.rs +++ b/crates/util/rlp-compress/tests/compress.rs @@ -17,7 +17,7 @@ extern crate rlp_compress; use rlp_compress::{ - blocks_swapper, compress, decompress, snapshot_swapper, Compressor, Decompressor, Swapper, + Compressor, Decompressor, Swapper, blocks_swapper, compress, decompress, snapshot_swapper, }; #[test] diff --git a/crates/util/rlp-derive/Cargo.toml b/crates/util/rlp-derive/Cargo.toml index d843d75a52..953c83feee 100644 --- a/crates/util/rlp-derive/Cargo.toml +++ b/crates/util/rlp-derive/Cargo.toml @@ -8,9 +8,9 @@ name = "rlp_derive" proc-macro = true [dependencies] -syn = "0.15" -quote = "0.6" -proc-macro2 = "0.4" +syn = "2.0" +quote = "1.0.40" +proc-macro2 = "1.0.40" [dev-dependencies] rlp = { version = "0.4.6" } diff --git a/crates/util/rlp-derive/src/de.rs b/crates/util/rlp-derive/src/de.rs index 484eb1d521..26678d4167 100644 --- a/crates/util/rlp-derive/src/de.rs +++ b/crates/util/rlp-derive/src/de.rs @@ -146,7 +146,6 @@ fn decodable_field(index: usize, field: &syn::Field, quotes: ParseQuotes) -> Tok .segments .first() .expect("there must be at least 1 segment") - .value() .ident; if &ident.to_string() == "Vec" { if quotes.takes_index { diff --git a/crates/util/rlp-derive/src/en.rs b/crates/util/rlp-derive/src/en.rs index dd61a5ae9a..b302c4f64a 100644 --- a/crates/util/rlp-derive/src/en.rs +++ b/crates/util/rlp-derive/src/en.rs @@ -116,22 +116,21 @@ fn encodable_field(index: usize, field: &syn::Field) -> TokenStream { .segments .first() .expect("there must be at least 1 segment"); - let ident = &top_segment.value().ident; + let ident = &top_segment.ident; if &ident.to_string() == "Vec" { - let inner_ident = match top_segment.value().arguments { + let inner_ident = match top_segment.arguments { syn::PathArguments::AngleBracketed(ref angle) => { let ty = angle .args .first() .expect("Vec has only one angle bracketed type; qed"); - match **ty.value() { + match ty { syn::GenericArgument::Type(syn::Type::Path(ref path)) => { &path .path .segments .first() .expect("there must be at least 1 segment") - .value() .ident } _ => panic!("rlp_derive not supported"), diff --git a/crates/util/stats/Cargo.toml b/crates/util/stats/Cargo.toml index 09fdeb8a3c..462302e3d3 100644 --- a/crates/util/stats/Cargo.toml +++ b/crates/util/stats/Cargo.toml @@ -2,7 +2,9 @@ name = "stats" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2018" [dependencies] log = "0.4" -prometheus = "0.13.0" +prometheus = "0.14" +vergen = "0.1" \ No newline at end of file diff --git a/crates/util/stats/src/lib.rs b/crates/util/stats/src/lib.rs index dbbaf2a7c6..33b40dd6c6 100644 --- a/crates/util/stats/src/lib.rs +++ b/crates/util/stats/src/lib.rs @@ -70,19 +70,34 @@ impl PrometheusRegistry { .expect("prometheus identifiers must be are unique"); } - /// Adds a new prometheus gauge with a label - pub fn register_gauge_with_label(&mut self, name: &str, help: &str, label: &str, value: i64) { - //let label_formated = format!("{}", label); - let name_formatted = format!("{}{}", self.prefix, name); - let mut opts = prometheus::Opts::new(name_formatted, help); + /// Adds a new prometheus gauge with a "other_node" label. + /// Designed for tracking communication partner values. + pub fn register_gauge_with_other_node_label( + &mut self, + name: &str, + help: &str, + other_node: &str, + value: i64, + ) { + self.register_gauge_with_label(name, help, "other_node", other_node, value); + } + /// Adds a new prometheus gauge with a label + pub fn register_gauge_with_label( + &mut self, + name: &str, + help: &str, + label: &str, + label_value: &str, + value: i64, + ) { + let opts = prometheus::Opts::new(name, help).const_label(label, label_value); // add labels here . - opts.variable_labels.push(label.to_string()); + //opts.variable_labels.push(label.to_string()); match prometheus::IntGauge::with_opts(opts) { Ok(g) => { g.set(value); - self.registry .register(Box::new(g)) .expect("prometheus identifiers must be are unique"); @@ -108,6 +123,27 @@ impl PrometheusRegistry { ); t } + + pub fn register_version(&mut self) { + let sha3 = vergen::SHORT_SHA; + let version = sha3.bits(); + self.register_gauge("version_sha3_bits", "version_sha3", version as i64); + self.register_gauge( + "version_semver_bits", + "Sementic Versioning bits", + vergen::SEMVER.bits() as i64, + ); + self.register_gauge( + "version_commit_date", + "commit date", + vergen::COMMIT_DATE.bits() as i64, + ); + self.register_gauge( + "version_vergen_target", + "vergen targets", + vergen::TARGET.bits() as i64, + ); + } } /// Implements a prometheus metrics collector diff --git a/crates/util/time-utils/src/lib.rs b/crates/util/time-utils/src/lib.rs index c413df7eeb..6dc15b906c 100644 --- a/crates/util/time-utils/src/lib.rs +++ b/crates/util/time-utils/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; /// Temporary trait for `checked operations` on SystemTime until these are available in the standard library pub trait CheckedSystemTime { @@ -49,6 +49,53 @@ impl CheckedSystemTime for SystemTime { } } +/// a DeadlineStopwatch helps to handle deadlines in a more convenient way. +pub struct DeadlineStopwatch { + started: Instant, + max_duration: Duration, +} + +impl DeadlineStopwatch { + pub fn new(max_duration: Duration) -> Self { + Self { + started: Instant::now(), + max_duration, + } + } + + pub fn elapsed(&self) -> Duration { + self.started.elapsed() + } + + pub fn started(&self) -> &Instant { + &self.started + } + + pub fn end_time(&self) -> Instant { + self.started + self.max_duration + } + + pub fn should_continue(&self) -> bool { + self.elapsed() < self.max_duration + } + + pub fn time_left(&self) -> Duration { + let elapsed = self.elapsed(); + + if elapsed >= self.max_duration { + Duration::from_secs(0) + } else if let Some(time_left) = self.max_duration.checked_sub(elapsed) { + time_left + } else { + Duration::from_secs(0) + } + } + + pub fn is_expired(&self) -> bool { + self.elapsed() >= self.max_duration + } +} + #[cfg(test)] mod tests { #[test] diff --git a/crates/util/triehash-ethereum/Cargo.toml b/crates/util/triehash-ethereum/Cargo.toml index 929baf1d2b..020a3c1a2f 100644 --- a/crates/util/triehash-ethereum/Cargo.toml +++ b/crates/util/triehash-ethereum/Cargo.toml @@ -4,6 +4,7 @@ version = "0.2.0" authors = ["Parity Technologies "] description = "Trie-root helpers, ethereum style" license = "GPL-3.0" +edition = "2024" [dependencies] triehash = { version = "0.5.0" } diff --git a/crates/util/triehash-ethereum/src/lib.rs b/crates/util/triehash-ethereum/src/lib.rs index 029f59f796..31a42cc53b 100644 --- a/crates/util/triehash-ethereum/src/lib.rs +++ b/crates/util/triehash-ethereum/src/lib.rs @@ -16,7 +16,7 @@ //! Generates Keccak-flavoured trie roots. -extern crate ethereum_types; +use ethereum_types; extern crate keccak_hasher; extern crate triehash; diff --git a/crates/util/unexpected/Cargo.toml b/crates/util/unexpected/Cargo.toml index d5caeadeed..10ee84ead6 100644 --- a/crates/util/unexpected/Cargo.toml +++ b/crates/util/unexpected/Cargo.toml @@ -2,3 +2,4 @@ name = "unexpected" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" \ No newline at end of file diff --git a/crates/util/version/Cargo.toml b/crates/util/version/Cargo.toml index 745282b492..868888bc2c 100644 --- a/crates/util/version/Cargo.toml +++ b/crates/util/version/Cargo.toml @@ -1,9 +1,14 @@ [package] name = "parity-version" # NOTE: this value is used for OpenEthereum version string (via env CARGO_PKG_VERSION) -version = "3.3.5-hbbft-0.8.9" -authors = ["Parity Technologies "] +version = "4.0.0" +authors = [ + "bit.diamonds developers", + "OpenEthereum developers", + "Parity Technologies " +] build = "build.rs" +edition = "2024" [package.metadata] diff --git a/crates/util/version/build.rs b/crates/util/version/build.rs index 91beb954f1..832d84a9ea 100644 --- a/crates/util/version/build.rs +++ b/crates/util/version/build.rs @@ -19,7 +19,7 @@ extern crate toml; extern crate vergen; use std::{env, fs::File, io::Write, path::Path}; -use vergen::{vergen, OutputFns}; +use vergen::{OutputFns, vergen}; const ERROR_MSG: &'static str = "Failed to generate metadata files"; diff --git a/crates/util/version/src/lib.rs b/crates/util/version/src/lib.rs index beca730df2..d3f9a68644 100644 --- a/crates/util/version/src/lib.rs +++ b/crates/util/version/src/lib.rs @@ -24,6 +24,9 @@ use bytes::Bytes; use rlp::RlpStream; use target_info::Target; +/// The name of the node software. +pub const NODE_SOFTWARE_NAME: &str = "diamond-node"; + mod vergen { #![allow(unused)] include!(concat!(env!("OUT_DIR"), "/version.rs")); @@ -55,7 +58,8 @@ pub fn version() -> String { let commit_date = vergen::commit_date().replace("-", ""); let date_dash = if commit_date.is_empty() { "" } else { "-" }; format!( - "OpenEthereum/v{}-{}{}{}{}{}/{}/rustc{}", + "{}/v{}-{}{}{}{}{}/{}/rustc{}", + NODE_SOFTWARE_NAME, env!("CARGO_PKG_VERSION"), THIS_TRACK, sha3_dash, @@ -82,7 +86,7 @@ pub fn version_data() -> Bytes { .parse::() .expect("Environment variables are known to be valid; qed"); s.append(&v); - s.append(&"OpenEthereum"); + s.append(&"diamond-node"); s.append(&generated::rustc_version()); s.append(&&Target::os()[0..2]); s.out() diff --git a/crates/vm/builtin/Cargo.toml b/crates/vm/builtin/Cargo.toml index 45167eb969..45098bb032 100644 --- a/crates/vm/builtin/Cargo.toml +++ b/crates/vm/builtin/Cargo.toml @@ -15,7 +15,7 @@ ethkey = { path = "../../accounts/ethkey" } keccak-hash = "0.5.0" log = "0.4" macros = { path = "../../util/macros" } -num = { version = "0.1", default-features = false, features = ["bigint"] } +num-bigint = { version = "0.4" } parity-bytes = "0.1" parity-crypto = { version = "0.6.2", features = [ "publickey" ] } eth_pairings = { git = "https://github.com/matter-labs/eip1962.git", default-features = false, features = ["eip_2537"], rev = "ece6cbabc41948db4200e41f0bfdab7ab94c7af8" } diff --git a/crates/vm/builtin/src/lib.rs b/crates/vm/builtin/src/lib.rs index c54d910f82..f0982dc0e4 100644 --- a/crates/vm/builtin/src/lib.rs +++ b/crates/vm/builtin/src/lib.rs @@ -37,7 +37,7 @@ use ethereum_types::{H256, U256}; use ethjson; use keccak_hash::keccak; use log::{trace, warn}; -use num::{BigUint, One, Zero}; +use num_bigint::BigUint; use parity_bytes::BytesRef; use parity_crypto::{ digest, @@ -924,8 +924,8 @@ fn modexp(mut base: BigUint, exp: Vec, modulus: BigUint) -> BigUint { const BITS_PER_DIGIT: usize = 8; // n^m % 0 || n^m % 1 - if modulus <= BigUint::one() { - return BigUint::zero(); + if modulus <= BigUint::from(1 as usize) { + return BigUint::from(0 as usize); } // normalize exponent @@ -933,24 +933,24 @@ fn modexp(mut base: BigUint, exp: Vec, modulus: BigUint) -> BigUint { // n^0 % m if exp.peek().is_none() { - return BigUint::one(); + return BigUint::from(1 as usize); } // 0^n % m, n > 0 - if base.is_zero() { - return BigUint::zero(); + if base.eq(&BigUint::from(0 as usize)) { + return BigUint::from(0 as usize); } base %= &modulus; // Fast path for base divisible by modulus. - if base.is_zero() { - return BigUint::zero(); + if base.eq(&BigUint::from(0 as usize)) { + return BigUint::from(0 as usize); } // Left-to-right binary exponentiation (Handbook of Applied Cryptography - Algorithm 14.79). // http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf - let mut result = BigUint::one(); + let mut result = BigUint::from(1 as usize); for digit in exp { let mut mask = 1 << (BITS_PER_DIGIT - 1); @@ -992,7 +992,7 @@ impl Implementation for Modexp { // Gas formula allows arbitrary large exp_len when base and modulus are empty, so we need to handle empty base first. let r = if base_len == 0 && mod_len == 0 { - BigUint::zero() + BigUint::from(0 as usize) } else { // read the numbers themselves. let mut buf = vec![0; max(mod_len, max(base_len, exp_len))]; @@ -1381,7 +1381,7 @@ mod tests { use hex_literal::hex; use macros::map; use maplit::btreemap; - use num::{BigUint, One, Zero}; + use num_bigint::BigUint; use parity_bytes::BytesRef; use rustc_hex::FromHex; use std::convert::TryFrom; @@ -1525,28 +1525,41 @@ mod tests { #[test] fn modexp_func() { // n^0 % m == 1 + let mut base = BigUint::parse_bytes(b"12345", 10).unwrap(); - let mut exp = BigUint::zero(); + let mut exp = BigUint::from(0 as usize); let mut modulus = BigUint::parse_bytes(b"789", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::one()); + assert_eq!( + me(base, exp.to_bytes_be(), modulus), + BigUint::from(1 as usize) + ); // 0^n % m == 0 - base = BigUint::zero(); + base = BigUint::from(0 as usize); exp = BigUint::parse_bytes(b"12345", 10).unwrap(); modulus = BigUint::parse_bytes(b"789", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); + assert_eq!( + me(base, exp.to_bytes_be(), modulus), + BigUint::from(0 as usize) + ); // n^m % 1 == 0 base = BigUint::parse_bytes(b"12345", 10).unwrap(); exp = BigUint::parse_bytes(b"789", 10).unwrap(); - modulus = BigUint::one(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); + modulus = BigUint::from(1 as usize); + assert_eq!( + me(base, exp.to_bytes_be(), modulus), + BigUint::from(0 as usize) + ); // if n % d == 0, then n^m % d == 0 base = BigUint::parse_bytes(b"12345", 10).unwrap(); exp = BigUint::parse_bytes(b"789", 10).unwrap(); modulus = BigUint::parse_bytes(b"15", 10).unwrap(); - assert_eq!(me(base, exp.to_bytes_be(), modulus), BigUint::zero()); + assert_eq!( + me(base, exp.to_bytes_be(), modulus), + BigUint::from(0 as usize) + ); // others base = BigUint::parse_bytes(b"12345", 10).unwrap(); diff --git a/crates/vm/evm/Cargo.toml b/crates/vm/evm/Cargo.toml index c492a9ab5c..baaa0cf4b3 100644 --- a/crates/vm/evm/Cargo.toml +++ b/crates/vm/evm/Cargo.toml @@ -3,6 +3,7 @@ description = "Parity Ethereum Virtual Machine (EVM) Rust Implementation" name = "evm" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2018" [dependencies] bit-set = "0.4" @@ -13,10 +14,10 @@ log = "0.4" vm = { path = "../vm" } keccak-hash = "0.5.0" parity-util-mem = "0.7" -parking_lot = "0.11.1" +parking_lot = "0.12" memory-cache = { path = "../../util/memory-cache" } ethcore-builtin = { path = "../builtin" } -num-bigint = "0.2" +num-bigint = "0.4" [dev-dependencies] rustc-hex = "1.0" diff --git a/crates/vm/evm/benches/basic.rs b/crates/vm/evm/benches/basic.rs index d6659f608a..0d7cac880b 100644 --- a/crates/vm/evm/benches/basic.rs +++ b/crates/vm/evm/benches/basic.rs @@ -19,14 +19,14 @@ #[macro_use] extern crate criterion; extern crate bit_set; -extern crate ethereum_types; -extern crate evm; +use ethereum_types; +use evm; extern crate keccak_hash as hash; extern crate memory_cache; extern crate parity_bytes as bytes; extern crate parking_lot; extern crate rustc_hex; -extern crate vm; +use vm; use bytes::Bytes; use criterion::{black_box, Bencher, Criterion}; diff --git a/crates/vm/evm/src/factory.rs b/crates/vm/evm/src/factory.rs index 9fa1e70825..fc353a317f 100644 --- a/crates/vm/evm/src/factory.rs +++ b/crates/vm/evm/src/factory.rs @@ -105,7 +105,6 @@ macro_rules! evm_test_ignore( ($name_test: ident: $name_int: ident) => { #[test] #[ignore] - #[cfg(feature = "ignored-tests")] fn $name_int() { $name_test(Factory::new(VMType::Interpreter, 1024 * 32)); } diff --git a/crates/vm/evm/src/interpreter/gasometer.rs b/crates/vm/evm/src/interpreter/gasometer.rs index aec51b4ecf..cfdb60cc40 100644 --- a/crates/vm/evm/src/interpreter/gasometer.rs +++ b/crates/vm/evm/src/interpreter/gasometer.rs @@ -19,9 +19,11 @@ use ethereum_types::{Address, BigEndianHash, U256}; use std::cmp; use super::stack::VecStack; -use evm; -use instructions::{self, Instruction, InstructionInfo}; -use interpreter::stack::Stack; +use crate::{ + evm, + instructions::{self, Instruction, InstructionInfo}, + interpreter::stack::Stack, +}; use vm::{self, Schedule}; macro_rules! overflowing { @@ -34,7 +36,7 @@ macro_rules! overflowing { }}; } -enum Request { +enum Request { Gas(Cost), GasMem(Cost, Cost), GasMemProvide(Cost, Cost, Option), diff --git a/crates/vm/evm/src/interpreter/informant.rs b/crates/vm/evm/src/interpreter/informant.rs index 03ae369515..5724afd044 100644 --- a/crates/vm/evm/src/interpreter/informant.rs +++ b/crates/vm/evm/src/interpreter/informant.rs @@ -43,7 +43,7 @@ mod inner { use ethereum_types::U256; - use instructions::{Instruction, InstructionInfo}; + use crate::instructions::{Instruction, InstructionInfo}; use interpreter::stack::Stack; use CostType; diff --git a/crates/vm/evm/src/interpreter/mod.rs b/crates/vm/evm/src/interpreter/mod.rs index 827393ac73..c9b43c9ff1 100644 --- a/crates/vm/evm/src/interpreter/mod.rs +++ b/crates/vm/evm/src/interpreter/mod.rs @@ -34,8 +34,10 @@ use vm::{ GasLeft, MessageCallResult, ParamsType, ReturnData, Schedule, TrapError, TrapKind, }; -use evm::CostType; -use instructions::{self, Instruction, InstructionInfo}; +use crate::{ + evm::CostType, + instructions::{self, Instruction, InstructionInfo}, +}; pub use self::shared_cache::SharedCache; use self::{ @@ -1543,8 +1545,8 @@ fn address_to_u256(value: Address) -> U256 { #[cfg(test)] mod tests { + use crate::{factory::Factory, vmtype::VMType}; use ethereum_types::Address; - use factory::Factory; use rustc_hex::FromHex; use std::sync::Arc; use vm::{ @@ -1552,7 +1554,6 @@ mod tests { tests::{test_finalize, FakeExt}, ActionParams, ActionValue, Exec, }; - use vmtype::VMType; fn interpreter(params: ActionParams, ext: &dyn vm::Ext) -> Box { Factory::new(VMType::Interpreter, 1).create(params, ext.schedule(), ext.depth()) diff --git a/crates/vm/evm/src/interpreter/shared_cache.rs b/crates/vm/evm/src/interpreter/shared_cache.rs index cc25ff464f..20cd808abc 100644 --- a/crates/vm/evm/src/interpreter/shared_cache.rs +++ b/crates/vm/evm/src/interpreter/shared_cache.rs @@ -61,7 +61,7 @@ impl SharedCache { code_hash: &Option, code: &[u8], ) -> (Arc, Arc) { - if let Some(ref code_hash) = code_hash { + if let Some(code_hash) = code_hash { if code_hash == &KECCAK_EMPTY { let cache_item = Self::find_jump_and_sub_destinations(code); return (cache_item.jump_destination.0, cache_item.sub_entrypoint.0); @@ -74,7 +74,7 @@ impl SharedCache { let d = Self::find_jump_and_sub_destinations(code); - if let Some(ref code_hash) = code_hash { + if let Some(code_hash) = code_hash { self.jump_destinations.lock().insert(*code_hash, d.clone()); } diff --git a/crates/vm/evm/src/interpreter/stack.rs b/crates/vm/evm/src/interpreter/stack.rs index 86c8908635..ee23c5a31a 100644 --- a/crates/vm/evm/src/interpreter/stack.rs +++ b/crates/vm/evm/src/interpreter/stack.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with OpenEthereum. If not, see . -use instructions; +use crate::instructions; use std::fmt; /// Stack trait with VM-friendly API diff --git a/crates/vm/evm/src/lib.rs b/crates/vm/evm/src/lib.rs index 66431fa3dc..0d0dc2f83b 100644 --- a/crates/vm/evm/src/lib.rs +++ b/crates/vm/evm/src/lib.rs @@ -18,14 +18,13 @@ extern crate bit_set; extern crate ethcore_builtin as builtin; -extern crate ethereum_types; extern crate keccak_hash as hash; extern crate memory_cache; extern crate num_bigint; extern crate parity_bytes as bytes; extern crate parity_util_mem; extern crate parking_lot; -extern crate vm; +use vm; #[macro_use] extern crate lazy_static; diff --git a/crates/vm/evm/src/tests.rs b/crates/vm/evm/src/tests.rs index b97294ea0c..c27f758894 100644 --- a/crates/vm/evm/src/tests.rs +++ b/crates/vm/evm/src/tests.rs @@ -15,8 +15,8 @@ // along with OpenEthereum. If not, see . use super::interpreter::MAX_SUB_STACK_SIZE; +use crate::{factory::Factory, vmtype::VMType}; use ethereum_types::{Address, H256, U256}; -use factory::Factory; use hex_literal::hex; use rustc_hex::FromHex; use std::{ @@ -31,7 +31,6 @@ use vm::{ tests::{test_finalize, FakeCall, FakeCallType, FakeExt}, ActionParams, ActionValue, Ext, }; -use vmtype::VMType; evm_test! {test_add: test_add_int} fn test_add(factory: super::Factory) { diff --git a/crates/vm/vm/Cargo.toml b/crates/vm/vm/Cargo.toml index ece0197c49..6d39146c2e 100644 --- a/crates/vm/vm/Cargo.toml +++ b/crates/vm/vm/Cargo.toml @@ -3,6 +3,7 @@ description = "Virtual Machines (VM) Support Library" name = "vm" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] parity-bytes = "0.1" diff --git a/crates/vm/vm/src/action_params.rs b/crates/vm/vm/src/action_params.rs index f2076e6e04..3b408070e0 100644 --- a/crates/vm/vm/src/action_params.rs +++ b/crates/vm/vm/src/action_params.rs @@ -16,11 +16,11 @@ //! Evm input params. use super::access_list::AccessList; +use crate::call_type::CallType; use bytes::Bytes; -use call_type::CallType; use ethereum_types::{Address, H256, U256}; use ethjson; -use hash::{keccak, KECCAK_EMPTY}; +use hash::{KECCAK_EMPTY, keccak}; use std::sync::Arc; diff --git a/crates/vm/vm/src/error.rs b/crates/vm/vm/src/error.rs index bb1b8355fa..50b0bee8b3 100644 --- a/crates/vm/vm/src/error.rs +++ b/crates/vm/vm/src/error.rs @@ -16,12 +16,10 @@ //! VM errors module -use action_params::ActionParams; +use crate::{ResumeCall, ResumeCreate, action_params::ActionParams}; use ethereum_types::Address; use ethtrie; use std::fmt; -use ResumeCall; -use ResumeCreate; #[derive(Debug)] pub enum TrapKind { diff --git a/crates/vm/vm/src/ext.rs b/crates/vm/vm/src/ext.rs index fccb42e78c..25a6da7ca7 100644 --- a/crates/vm/vm/src/ext.rs +++ b/crates/vm/vm/src/ext.rs @@ -16,13 +16,15 @@ //! Interface for Evm externalities. +use crate::{ + call_type::CallType, + env_info::EnvInfo, + error::{Result, TrapKind}, + return_data::ReturnData, + schedule::Schedule, +}; use bytes::Bytes; -use call_type::CallType; -use env_info::EnvInfo; -use error::{Result, TrapKind}; use ethereum_types::{Address, H256, U256}; -use return_data::ReturnData; -use schedule::Schedule; use std::sync::Arc; #[derive(Debug)] diff --git a/crates/vm/vm/src/lib.rs b/crates/vm/vm/src/lib.rs index f662921c7f..5a17f647c6 100644 --- a/crates/vm/vm/src/lib.rs +++ b/crates/vm/vm/src/lib.rs @@ -16,8 +16,6 @@ //! Virtual machines support library -extern crate ethereum_types; -extern crate ethjson; extern crate keccak_hash as hash; extern crate parity_bytes as bytes; extern crate patricia_trie_ethereum as ethtrie; @@ -34,11 +32,13 @@ pub mod schedule; pub mod tests; +pub use crate::error::{ + Error, ExecTrapError, ExecTrapResult, Result, TrapError, TrapKind, TrapResult, +}; pub use access_list::AccessList; pub use action_params::{ActionParams, ActionValue, ParamsType}; pub use call_type::CallType; pub use env_info::{EnvInfo, LastHashes}; -pub use error::{Error, ExecTrapError, ExecTrapResult, Result, TrapError, TrapKind, TrapResult}; pub use ext::{ContractCreateResult, CreateContractAddress, Ext, MessageCallResult}; pub use return_data::{GasLeft, ReturnData}; pub use schedule::{CleanDustMode, Schedule, WasmCosts}; diff --git a/crates/vm/vm/src/tests.rs b/crates/vm/vm/src/tests.rs index 34336da135..4b224c097d 100644 --- a/crates/vm/vm/src/tests.rs +++ b/crates/vm/vm/src/tests.rs @@ -19,21 +19,13 @@ use std::{ sync::Arc, }; -use crate::access_list::AccessList; +use crate::{ + CallType, ContractCreateResult, CreateContractAddress, EnvInfo, Ext, GasLeft, + MessageCallResult, Result, ReturnData, Schedule, access_list::AccessList, error::TrapKind, +}; use bytes::Bytes; -use error::TrapKind; use ethereum_types::{Address, H256, U256}; use hash::keccak; -use CallType; -use ContractCreateResult; -use CreateContractAddress; -use EnvInfo; -use Ext; -use GasLeft; -use MessageCallResult; -use Result; -use ReturnData; -use Schedule; pub struct FakeLogEntry { pub topics: Vec, diff --git a/crates/vm/wasm/Cargo.toml b/crates/vm/wasm/Cargo.toml index 2b1bafca22..075491dd50 100644 --- a/crates/vm/wasm/Cargo.toml +++ b/crates/vm/wasm/Cargo.toml @@ -3,6 +3,7 @@ description = "WASM Interpreter" name = "wasm" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2024" [dependencies] byteorder = "1.0" diff --git a/crates/vm/wasm/src/env.rs b/crates/vm/wasm/src/env.rs index ba51305c5d..b009f118ad 100644 --- a/crates/vm/wasm/src/env.rs +++ b/crates/vm/wasm/src/env.rs @@ -19,8 +19,8 @@ use std::cell::RefCell; use vm::WasmCosts; use wasmi::{ - self, memory_units, Error, FuncInstance, FuncRef, MemoryDescriptor, MemoryInstance, MemoryRef, - Signature, + self, Error, FuncInstance, FuncRef, MemoryDescriptor, MemoryInstance, MemoryRef, Signature, + memory_units, }; /// Internal ids all functions runtime supports. This is just a glue for wasmi interpreter @@ -210,7 +210,7 @@ impl wasmi::ModuleImportResolver for ImportResolver { return Err(wasmi::Error::Instantiation(format!( "Export {} not found", field_name - ))) + ))); } }; diff --git a/crates/vm/wasm/src/lib.rs b/crates/vm/wasm/src/lib.rs index 9601d53cdd..a0770db824 100644 --- a/crates/vm/wasm/src/lib.rs +++ b/crates/vm/wasm/src/lib.rs @@ -17,13 +17,13 @@ //! Wasm Interpreter extern crate byteorder; -extern crate ethereum_types; +use ethereum_types; #[macro_use] extern crate log; extern crate libc; extern crate parity_wasm; extern crate pwasm_utils as wasm_utils; -extern crate vm; +use vm; extern crate wasmi; #[cfg(test)] @@ -132,7 +132,6 @@ impl WasmInterpreter { address: self.params.address, sender: self.params.sender, origin: self.params.origin, - code_address: self.params.code_address, value: self.params.value.value(), }, ); diff --git a/crates/vm/wasm/src/runtime.rs b/crates/vm/wasm/src/runtime.rs index b4de3f77c6..01b684ad08 100644 --- a/crates/vm/wasm/src/runtime.rs +++ b/crates/vm/wasm/src/runtime.rs @@ -26,7 +26,6 @@ pub struct RuntimeContext { pub address: Address, pub sender: Address, pub origin: Address, - pub code_address: Address, pub value: U256, } @@ -806,7 +805,7 @@ impl<'a> Runtime<'a> { mod ext_impl { - use env::ids::*; + use crate::env::ids::*; use wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; macro_rules! void { diff --git a/default.nix b/default.nix new file mode 100644 index 0000000000..c30ae919c3 --- /dev/null +++ b/default.nix @@ -0,0 +1,7 @@ +{ pkgs ? import {} }: + pkgs.mkShell { + # nativeBuildInputs is usually what you want -- tools you need to run + nativeBuildInputs = with pkgs.buildPackages; [ + gcc + ]; +} \ No newline at end of file diff --git a/scripts/actions/build-linux.sh b/scripts/actions/build-linux.sh index dcfc2e9ae7..7df17df355 100755 --- a/scripts/actions/build-linux.sh +++ b/scripts/actions/build-linux.sh @@ -16,7 +16,7 @@ echo "_____ Post-processing binaries _____" rm -rf artifacts/* mkdir -p artifacts/ -cp -v target/release/openethereum artifacts/openethereum +cp -v target/release/diamond-node artifacts/diamond-node cp -v target/release/openethereum-evm artifacts/openethereum-evm cp -v target/release/ethstore artifacts/ethstore cp -v target/release/ethkey artifacts/ethkey diff --git a/scripts/actions/build-windows.sh b/scripts/actions/build-windows.sh deleted file mode 100755 index 948e7f85e5..0000000000 --- a/scripts/actions/build-windows.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -set -e # fail on any error -set -u # treat unset variables as error -# NOTE: Enables the aes-ni instructions for RustCrypto dependency. -# If you change this please remember to also update .cargo/config -export RUSTFLAGS=" -Ctarget-feature=+aes,+sse2,+ssse3 -Ctarget-feature=+crt-static -Clink-arg=-s" - -echo "_____ Build Parity and tools _____" -time cargo build --verbose --release --features final -time cargo build --verbose --release -p evmbin -time cargo build --verbose --release -p ethstore-cli -time cargo build --verbose --release -p ethkey-cli - -echo "_____ Post-processing binaries _____" -rm -rf artifacts -mkdir -p artifacts - -cp --verbose target/release/openethereum.exe artifacts/openethereum.exe -cp --verbose target/release/openethereum-evm.exe artifacts/openethereum-evm.exe -cp --verbose target/release/ethstore.exe artifacts/ethstore.exe -cp --verbose target/release/ethkey.exe artifacts/ethkey.exe