diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 72b9791eadb..3d82e9f34ae 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -63,7 +63,7 @@ RUN set -ex; \ rm /tmp/cargo-binstall; \ cargo binstall -V -RUN cargo binstall wasm-bindgen-cli@0.2.100 --locked \ +RUN cargo binstall wasm-bindgen-cli@0.2.103 --locked \ --no-discover-github-token \ --disable-telemetry \ --no-track \ diff --git a/.github/actions/local-network/action.yaml b/.github/actions/local-network/action.yaml index c2c6060ba6f..58e12f4c8a7 100644 --- a/.github/actions/local-network/action.yaml +++ b/.github/actions/local-network/action.yaml @@ -82,8 +82,8 @@ runs: docker pull ${{ inputs.image_org }}/dashmate-helper:$SHA_TAG docker tag ${{ inputs.image_org }}/dashmate-helper:$SHA_TAG dashpay/dashmate-helper:$VERSION - # Replace DAPI and Drive images with new org and tag in dashmate config - sed -i -E "s/dashpay\/(drive|dapi):[^\"]+/${{ inputs.image_org }}\/\1:${SHA_TAG}/g" ${{ env.HOME }}/.dashmate/config.json + # Replace Drive, DAPI, and RS-DAPI images with new org and tag in dashmate config + sed -i -E "s/dashpay\/(drive|dapi|rs-dapi):[^\"]+/${{ inputs.image_org }}\/\1:${SHA_TAG}/g" ${{ env.HOME }}/.dashmate/config.json cat ${{ env.HOME }}/.dashmate/config.json diff --git a/.github/package-filters/rs-packages.yml b/.github/package-filters/rs-packages.yml index 78129be673c..0e327bf773d 100644 --- a/.github/package-filters/rs-packages.yml +++ b/.github/package-filters/rs-packages.yml @@ -69,6 +69,12 @@ rs-dapi-client: &dapi_client - packages/rs-dapi-client/** - *dapi_grpc +rs-dapi: + - .github/workflows/tests* + - packages/rs-dapi/** + - *dapi_grpc + - *dpp + dash-sdk: &sdk - .github/workflows/tests* - packages/rs-drive-proof-verifier/** diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index d677ccb3d7e..6137602f882 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -34,6 +34,7 @@ jobs: scopes: | sdk dapi + rs-dapi platform drive drive-abci diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 476c11bd53f..a9d1b82fe02 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -70,7 +70,7 @@ jobs: if: ${{ steps.check-artifact.outputs.exists != 'true' }} - name: Install wasm-bindgen-cli - run: cargo binstall wasm-bindgen-cli@0.2.100 + run: cargo binstall wasm-bindgen-cli@0.2.103 if: ${{ steps.check-artifact.outputs.exists != 'true' }} - name: Install wasm-pack @@ -191,6 +191,18 @@ jobs: target: dapi tag: ${{ inputs.tag || github.event.release.tag_name }} + release-rs-dapi-image: + name: Release RS-DAPI image + if: ${{ !inputs.only_drive }} + secrets: inherit + uses: ./.github/workflows/release-docker-image.yml + with: + name: RS-DAPI + image_org: dashpay + image_name: rs-dapi + target: rs-dapi + tag: ${{ inputs.tag || github.event.release.tag_name }} + release-test-suite-image: name: Release Test Suite image if: ${{ !inputs.only_drive }} diff --git a/.github/workflows/tests-build-js.yml b/.github/workflows/tests-build-js.yml index f7e9104792d..c70274ecb81 100644 --- a/.github/workflows/tests-build-js.yml +++ b/.github/workflows/tests-build-js.yml @@ -54,7 +54,7 @@ jobs: if: ${{ steps.check-artifact.outputs.exists != 'true' }} - name: Install wasm-bindgen-cli - run: cargo binstall wasm-bindgen-cli@0.2.100 + run: cargo binstall wasm-bindgen-cli@0.2.103 if: ${{ steps.check-artifact.outputs.exists != 'true' }} - name: Install wasm-pack diff --git a/.github/workflows/tests-dashmate.yml b/.github/workflows/tests-dashmate.yml index 170006c22a6..576287ed166 100644 --- a/.github/workflows/tests-dashmate.yml +++ b/.github/workflows/tests-dashmate.yml @@ -67,6 +67,11 @@ jobs: docker pull $DOCKER_HUB_ORG/dapi:$SHA_TAG docker tag $DOCKER_HUB_ORG/dapi:$SHA_TAG $DAPI_IMAGE_AND_VERSION + # RS-DAPI + RS_DAPI_IMAGE_AND_VERSION=$(yarn dashmate config get --config=local platform.dapi.rsDapi.docker.image) + docker pull $DOCKER_HUB_ORG/rs-dapi:$SHA_TAG + docker tag $DOCKER_HUB_ORG/rs-dapi:$SHA_TAG $RS_DAPI_IMAGE_AND_VERSION + # Dashmate helper image is hardcoded so we replace it with the built one VERSION=$(cat package.json | jq -r '.version') docker pull $DOCKER_HUB_ORG/dashmate-helper:$SHA_TAG diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 353cc0b7ae5..88f8da3453b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -6,11 +6,11 @@ on: types: [opened, synchronize, reopened, ready_for_review] branches: - master - - 'v*-dev' + - "v*-dev" push: branches: - master - - 'v*-dev' + - "v*-dev" schedule: - cron: "30 4 * * *" @@ -81,6 +81,9 @@ jobs: - name: DAPI image_name: dapi target: dapi + - name: RS-DAPI + image_name: rs-dapi + target: rs-dapi - name: Dashmate helper image_name: dashmate-helper target: dashmate-helper @@ -102,7 +105,7 @@ jobs: uses: ./.github/workflows/tests-rs-package.yml with: package: ${{ matrix.rs-package }} - check-each-feature: ${{ contains(fromJSON('["dash-sdk","rs-dapi-client","dapi-grpc","dpp","drive-abci"]'), matrix.rs-package) }} + check-each-feature: ${{ contains(fromJSON('["dash-sdk","rs-dapi-client","rs-dapi","dapi-grpc","dpp","drive-abci"]'), matrix.rs-package) }} direct-packages: ${{ needs.changes.outputs.rs-packages-direct }} rs-crates-security: diff --git a/.yarn/cache/fsevents-patch-19706e7e35-10.zip b/.yarn/cache/fsevents-patch-19706e7e35-10.zip deleted file mode 100644 index aff1ab12ce5..00000000000 Binary files a/.yarn/cache/fsevents-patch-19706e7e35-10.zip and /dev/null differ diff --git a/CHANGELOG.md b/CHANGELOG.md index fa959bc380f..a8360437107 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,608 @@ +## [2.1.0-pr.2716.1](https://github.com/dashpay/platform/compare/v2.1.0-dev.8...v2.1.0-pr.2716.1) (2025-10-10) + + +### ⚠ BREAKING CHANGES + +* **sdk:** provide all getStatus info (#2729) + +### Features + +* access log +* add tests for new token transitions +* bloom filters +* cache metrics +* config dapi_platform_cache_bytes, dapi_core_cache_bytes +* core client block cache +* core get_block get_blockchain_status get_masternode_status get_estimated_transaction_fee +* core get_transaction +* core transaction broadcast +* **dapi-cli:** dapi_cli core masternode-status +* **dapi-cli:** identity cmd +* dashmate rs-dapi metrics support +* **dashmate:** deprecate old dapi +* **dashmate:** rs dapi log level configurable +* **drive-abci:** event bus +* event bus in rs-dapi +* **event_bus:** impl no_unsubscribe_on_drop +* forward core chain locks in tx stream +* get_best_block_height +* historical streaming for `subscribeToBlockHeadersWithChainLocks` +* masternode list diff +* **platform-test-suite:** disable peer lookup when DAPI_ADDRESSES is provided +* publish finalized transaction +* request metrics +* rs-dapi only forwards subscribe platform events req/resp +* **rs-dapi:** rest and jsonrpc translator +* **rs-dapi:** rest get glock by hash/height +* rs-drive-abci subscribe endpoint +* **sdk:** provide all getStatus info ([#2729](https://github.com/dashpay/platform/issues/2729)) +* subscribe to all transactions +* tenderdash client example +* transaction stream gate auto open after timeout +* **wasm-sdk:** implement four missing token transitions + + +### Bug Fixes + +* mempool uses shared stream state +* cache merged for no good reason +* core client needs timeouts +* dapi-grpc service capitalization - clippy warning +* **dapi-grpc:** invalid routers +* dash-serialized-consensus-error-bin +* dashmate reset access denied when deleting logs +* **dashmate:** support dapi deprecated in schema +* deprecated mode not working +* docker build fails +* dockerfile overwrites log level +* duplicates in subs +* empty data scenario +* error handling, continued +* get_blockchain_status fails +* getBlockHash height parsing +* instant lock deserialize +* invalid serialization of consensus params +* jsonrpc request id should be unique +* mempool should not block normal txs sending +* mn diff using wrong method +* **sdk:** fix generate docs ([#2730](https://github.com/dashpay/platform/issues/2730)) +* streaming fails on error +* tenderdash client +* **test:** fix env tests that are unsafe in edition=2024 +* transaction filter update +* transaction stream: merkle block checks already delivered txs +* **wallet-lib:** resume sync from last synced height +* **wallet-lib:** stream restarted every now and then due to addressesGenerated = []; +* **wasm-drive-verify:** simple_benchmars.rs fail +* wasm-sdk does not build +* **wasm-sdk:** enable proofs for getContestedResourceVotersForIdentity ([#2732](https://github.com/dashpay/platform/issues/2732)) +* **wasm-sdk:** resolve test failures and optimize CI workflow ([#2735](https://github.com/dashpay/platform/issues/2735)) +* **wasm-sdk:** use identity contract nonce for data contract updates ([#2738](https://github.com/dashpay/platform/issues/2738)) +* zmq tx event +* zmq_listernet should listen on rawtxlock and rawchainlock + + +### Tests + +* add debug info - to revert +* bloom filtering +* bloom filters +* increase tests of bloom filtering +* more debug, to revert +* platform events ping test (fails now) +* **sdk:** expand wasm-sdk page UI testing ([#2720](https://github.com/dashpay/platform/issues/2720)) +* test event bus +* **wallet-lib:** add txids to merke block info +* **wallet-lib:** add txids to merke block info, continued + + +### Build System + +* bump tenderdash-abci to v1.5.0-dev.2 +* **deps:** bump wasm-bindgen to 0.2.103 +* **deps:** dashcore v0.40-dev in wasm-sdk +* **deps:** rust 1.89 +* **deps:** update dashcore to latest dev +* Dockerfile rocksdb 10.4.2 +* fix build +* **rs-dapi:** bump tonic +* update Cargo.lock +* update rust-dashcore + + +### Code Refactoring + +* cache sync +* cache using serde +* cache with memory usage limits +* dapi-cli +* divide block_header_stream.rs into parts. +* error handling +* less Arcs in DapiServer +* move bloom implementation to separate mod +* move some code around +* remove mocks +* remove REST gateway +* remove rs-dapi/dapi_cli +* rename rs-dash-notify to rs-dash-event-bus +* **rs-dapi:** move mux to platformserviceimpl +* **sdk:** wasm-sdk doc generation refactor ([#2726](https://github.com/dashpay/platform/issues/2726)) +* td client uses req/resp structs, not plain json +* trace spawned threaads +* transaction_stream refactor async handling +* use bloom logic from dash spv +* use bounded streams in subscriptions + + +### Miscellaneous Chores + +* Remove panic on ZMQ startup +* add InstantLock and chainLock msgs to bloom filter and CoreAllTxs +* add merke block to the tx stream +* add rs-dapi todo +* add some logs +* add some tracing +* add subscribePlatformEvents to envoy config +* add timeout when proxying +* add wasm-sdk as scope for pr linting ([#2731](https://github.com/dashpay/platform/issues/2731)) +* always create websocket client +* better debug +* block header stream - add chainlocks +* block_header_stream adjust errors +* block_header_stream async +* block_header_stream history adjust +* block_header_stream input validation +* bloom filters using dashcore +* broadcast tx error handling - cbor walk fix +* cache fixes +* cache invalidation +* cache layer, initial impl +* cargo fmt +* change how bloom filter locking works +* clippy +* comment +* consensus error print on silly level +* dapi-cli protocol upgrade info +* dashmate config location +* dashmate config migrations fix +* debug +* debug consensus error again +* debug improvements +* debug log in js-dapi-client +* disable deduplication +* DriveErrorDataBin +* drop events on full receiver +* drop invalid instant locks +* envoy tune keepalives for rs-dapi +* error handling +* error mapping +* errors continued +* event bus in rs-sdk +* fallback to contains +* fix access log +* fix build +* fix build +* fix build in rs-dash-notify +* fix cache and core client +* fix connected check +* fix core config +* fix debug msg +* fix error status +* fix example build +* fix notifications +* fix path parsing +* fix tests +* fix timeout in wait_for_state_transition_result_impl +* fix wallet-lib +* fixes of error handling +* fmt +* fmt and others +* furher implementation of erorrs in broadcast +* grpc producer ready flag +* grpc tuning +* healthcheck improvements +* improve debug +* improve error handling +* improve error mapping +* improve grpc logging +* improve subscriptions +* improve tests +* inc envoy timeouts +* instant lock delivered +* json rpc translator params bool parsing +* mempool processing gets separate TransactionsStreamState +* merkle block +* metrics and cache updates +* minor refactor +* minor review fixes +* mn list sync logging +* more debug +* more debug +* more debug +* more debug +* more debug ion subscriptions +* more logging +* more logs in subscriptions +* optimized core client with rate limits +* platform_events example improved +* refactor +* refactor logging +* refactoring broadcast errors +* remove logrotate.conf +* remove subscriptions from drive-abci and rs-sdk - moved to separate pr +* remove unused deps in rs-dash-event-bus +* remove uuid +* remove worksers from LruResponseCache +* rename filters +* revert DAPI_ADDRESSES env var support in platform-test-suite +* review +* rewrite rs-dash-notify +* rs-dapi, continued +* rs-sdk events WIP +* same wasm-bindgen version and related libs +* self review +* self review +* self review and tests updated +* speed up historical queries +* subscribe_platform_events +* sync docs +* sync event bus with packages/rs-dash-event-bus +* tracing middleware for reqwest in td client +* transaction_stream deliver merkle block even if no matching txs +* try to fix core client block fetch +* try to fix error handling +* trying to fix Dockerfile +* trying to fix error handling +* tx stream +* tx stream order +* tx stream refactor +* update wait for state transition result logic +* use EventBus instead of subscriber_manager +* use unconfirmed_tx to check if tx is in mempool +* wallet-lib logs tx hashes and block hashes - to revert +* watch channel instead of notify + +## [2.1.0-dev.3](https://github.com/dashpay/platform/compare/v2.1.0-dev.2...v2.1.0-dev.3) (2025-08-07) + + +### Miscellaneous Chores + +* fix wasm-sdk build +* getrandom downgrade continued +* getrandom downgrade, continued +* **release:** update changelog and version to 2.1.0-dev.3 +* trying to build +* update some deps +* wasm-sdk deps update + +## [2.1.0-dev.2](https://github.com/dashpay/platform/compare/v2.1.0-dev.1...v2.1.0-dev.2) (2025-08-06) + + +### Features + +* access logging + + +### Build System + +* **deps:** update getrandom to v0.3 + + +### Continuous Integration + +* rs-dapi workflows + + +### Miscellaneous Chores + +* at least compiles +* better logging +* cargo.lock version +* cargo.toml reorder packages +* cleanup deps +* clippy +* copy rs-dapi +* dashmate impl +* DESIGN - logging described +* disable access log (doesn't work anyway) +* example apps +* fix env var name +* identity create green +* improve logging +* minor fixes +* move old dapi to /deprecated prefix +* progress, tenderdash to do +* refactor of td client and websockets +* **release:** update changelog and version to 2.1.0-dev.2 +* replace sync zmq with async zeromq +* rs-dapi verbose entrypoint +* rs-dapi, wip +* some logs +* tracing logging +* try to fix logging +* wip +* wip +* wip +* zeromq improvements +* zmq +* zmq details +* zmq reconnecting +* zmq to test + +## [2.1.0-dev.1](https://github.com/dashpay/platform/compare/v2.0.1...v2.1.0-dev.1) (2025-07-11) + + +### Miscellaneous Chores + +* **release:** update changelog and version to 2.1.0-dev.1 + +## [2.1.0-dev.8](https://github.com/dashpay/platform/compare/v2.1.0-dev.7...v2.1.0-dev.8) (2025-10-03) + + +### ⚠ BREAKING CHANGES + +* **platform:** creator id and improved verification of document uniqueness before insertion (#2790) + +### Features + +* **platform:** creator id and improved verification of document uniqueness before insertion ([#2790](https://github.com/dashpay/platform/issues/2790)) +* **sdk:** expose data contract from json ([#2791](https://github.com/dashpay/platform/issues/2791)) + + +### Bug Fixes + +* **dashmate:** consensus params in dashmate different than on testnet ([#2682](https://github.com/dashpay/platform/issues/2682)) +* **sdk:** wasm is not initialized for some methods ([#2792](https://github.com/dashpay/platform/issues/2792)) + + +### Miscellaneous Chores + +* **release:** update changelog and bump version to 2.1.0-dev.8 ([#2797](https://github.com/dashpay/platform/issues/2797)) +* script to backup and restore state + +### [2.0.1](https://github.com/dashpay/platform/compare/v2.0.0...v2.0.1) (2025-07-10) + + +### ⚠ BREAKING CHANGES + +* **platform:** update keyword search contract ID and owner ID bytes (#2693) + +### Bug Fixes + +* **platform:** update keyword search contract ID and owner ID bytes ([#2693](https://github.com/dashpay/platform/issues/2693)) + + +### Miscellaneous Chores + +* release version 2.0.1 ([#2695](https://github.com/dashpay/platform/issues/2695)) + +## [2.1.0-dev.7](https://github.com/dashpay/platform/compare/v2.1.0-dev.6...v2.1.0-dev.7) (2025-09-29) + + +### Bug Fixes + +* **sdk:** wasm sdk is not initialized for static methods ([#2788](https://github.com/dashpay/platform/issues/2788)) + + +### Miscellaneous Chores + +* **release:** update changelog and bump version to 2.1.0-dev.7 ([#2789](https://github.com/dashpay/platform/issues/2789)) + +## [2.1.0-dev.6](https://github.com/dashpay/platform/compare/v2.1.0-dev.5...v2.1.0-dev.6) (2025-09-24) + + +### Features + +* **drive:** document filter for state transition subscriptions part 2 ([#2781](https://github.com/dashpay/platform/issues/2781)) +* **sdk:** add more SDK methods ([#2784](https://github.com/dashpay/platform/issues/2784)) + + +### Bug Fixes + +* **dashmate:** incompatible tenderdash version ([#2786](https://github.com/dashpay/platform/issues/2786)) + + +### Performance Improvements + +* **rs-sdk:** optimize wasm-sdk bundle size ([#2783](https://github.com/dashpay/platform/issues/2783)) + + +### Miscellaneous Chores + +* **release:** update changelog and bump version to 2.1.0-dev.6 ([#2785](https://github.com/dashpay/platform/issues/2785)) + +## [2.1.0-dev.5](https://github.com/dashpay/platform/compare/v2.1.0-dev.4...v2.1.0-dev.5) (2025-09-19) + + +### Features + +* **drive:** document filter for state transition subscriptions part 1 ([#2761](https://github.com/dashpay/platform/issues/2761)) + + +### Build System + +* fix sdk npm packaging ([#2780](https://github.com/dashpay/platform/issues/2780)) + + +### Miscellaneous Chores + +* **release:** update changelog and bump version to 2.1.0-dev.5 ([#2782](https://github.com/dashpay/platform/issues/2782)) + +## [2.1.0-dev.4](https://github.com/dashpay/platform/compare/v2.1.0-dev.3...v2.1.0-dev.4) (2025-09-19) + + +### ⚠ BREAKING CHANGES + +* **wasm-sdk:** handle identity create transition signing for all types of keys (#2754) +* **wasm-sdk:** remove unused key_id parameters from state transitions (#2759) +* **sdk:** provide all getStatus info (#2729) + +### Features + +* add tests for new token transitions +* evo sdk ([#2771](https://github.com/dashpay/platform/issues/2771)) +* **sdk:** epic: rs-sdk-ffi and ios support ([#2756](https://github.com/dashpay/platform/issues/2756)) +* **sdk:** provide all getStatus info ([#2729](https://github.com/dashpay/platform/issues/2729)) +* **wasm-sdk:** implement four missing token transitions +* **wasm-sdk:** remove unused key_id parameters from state transitions ([#2759](https://github.com/dashpay/platform/issues/2759)) + + +### Bug Fixes + +* **sdk:** fix generate docs ([#2730](https://github.com/dashpay/platform/issues/2730)) +* **sdk:** js sdk audit warnings by adding crypto-related dependencies to package.json ([#2757](https://github.com/dashpay/platform/issues/2757)) +* **wasm-sdk:** handle identity create transition signing for all types of keys ([#2754](https://github.com/dashpay/platform/issues/2754)) +* **wasm-sdk:** address compiler warnings ([#2734](https://github.com/dashpay/platform/issues/2734)) +* **wasm-sdk:** connect where and orderBy clause functionality for getDocuments ([#2753](https://github.com/dashpay/platform/issues/2753)) +* **wasm-sdk:** enable proofs for getContestedResourceVotersForIdentity ([#2732](https://github.com/dashpay/platform/issues/2732)) +* **wasm-sdk:** fix nft transitions ([#2751](https://github.com/dashpay/platform/issues/2751)) +* **wasm-sdk:** resolve CI test failures and build issues ([#2765](https://github.com/dashpay/platform/issues/2765)) +* **wasm-sdk:** resolve test failures and optimize CI workflow ([#2735](https://github.com/dashpay/platform/issues/2735)) +* **wasm-sdk:** use identity contract nonce for data contract updates ([#2738](https://github.com/dashpay/platform/issues/2738)) + + +### Tests + +* **sdk:** expand wasm-sdk page UI testing ([#2720](https://github.com/dashpay/platform/issues/2720)) +* **wasm-sdk:** add ui tests for almost all state transitions ([#2739](https://github.com/dashpay/platform/issues/2739)) + + +### Build System + +* bump tenderdash-abci to v1.5.0-dev.2 ([#2770](https://github.com/dashpay/platform/issues/2770)) +* update rust to 1.89 ([#2755](https://github.com/dashpay/platform/issues/2755)) + + +### Code Refactoring + +* **sdk:** wasm-sdk doc generation refactor ([#2726](https://github.com/dashpay/platform/issues/2726)) +* swift sdk fixes ([#2772](https://github.com/dashpay/platform/issues/2772)) +* **wasm-sdk:** improve documentation generation maintainability ([#2773](https://github.com/dashpay/platform/issues/2773)) + + +### Continuous Integration + +* dont do CI when it's not needed ([#2774](https://github.com/dashpay/platform/issues/2774)) +* swift CI fixes ([#2775](https://github.com/dashpay/platform/issues/2775)) +* Use self hosted mac runner ([#2776](https://github.com/dashpay/platform/issues/2776)) + + +### Miscellaneous Chores + +* add wasm-sdk as scope for pr linting ([#2731](https://github.com/dashpay/platform/issues/2731)) +* clean dpp clippy ([#2764](https://github.com/dashpay/platform/issues/2764)) +* **drive:** fix drive linting ([#2763](https://github.com/dashpay/platform/issues/2763)) +* **release:** update changelog and bump version to 2.1.0-dev.4 ([#2779](https://github.com/dashpay/platform/issues/2779)) +* sdk clippy issues ([#2767](https://github.com/dashpay/platform/issues/2767)) +* update yarn cache with new dependencies ([#2758](https://github.com/dashpay/platform/issues/2758)) +* **wasm-sdk:** apply cargo fmt and cleanup ([#2766](https://github.com/dashpay/platform/issues/2766)) + +## [2.1.0-dev.3](https://github.com/dashpay/platform/compare/v2.1.0-dev.2...v2.1.0-dev.3) (2025-08-07) + + +### Miscellaneous Chores + +* fix wasm-sdk build +* getrandom downgrade continued +* getrandom downgrade, continued +* **release:** update changelog and version to 2.1.0-dev.3 +* trying to build +* update some deps +* wasm-sdk deps update + +## [2.1.0-dev.2](https://github.com/dashpay/platform/compare/v2.1.0-dev.1...v2.1.0-dev.2) (2025-08-06) + + +### Features + +* access logging +* add wasm bindings for Drive verification functions ([#2660](https://github.com/dashpay/platform/issues/2660)) +* balance checker app ([#2688](https://github.com/dashpay/platform/issues/2688)) +* **dashmate:** allow configuring zmq using dashmate ([#2697](https://github.com/dashpay/platform/issues/2697)) +* **sdk:** add request settings in wasm sdk ([#2707](https://github.com/dashpay/platform/issues/2707)) +* **sdk:** add username search example in evo-sdk ([#2706](https://github.com/dashpay/platform/issues/2706)) +* **sdk:** adding a trusted context provider package ([#2687](https://github.com/dashpay/platform/issues/2687)) +* **sdk:** dpns sdk improvements ([#2692](https://github.com/dashpay/platform/issues/2692)) +* **sdk:** enable proof support for most queries ([#2718](https://github.com/dashpay/platform/issues/2718)) +* **sdk:** identity creation in wasm ([#2711](https://github.com/dashpay/platform/issues/2711)) +* **sdk:** make wasm sdk complete for all state transitions and most queries ([#2690](https://github.com/dashpay/platform/issues/2690)) +* **sdk:** wasm docs and fixes ([#2700](https://github.com/dashpay/platform/issues/2700)) +* **sdk:** wasm drive verify optimization ([#2683](https://github.com/dashpay/platform/issues/2683)) +* **sdk:** wasm sdk core and test suite ([#2709](https://github.com/dashpay/platform/issues/2709)) + + +### Bug Fixes + +* **sdk:** fix documentation examples ([#2710](https://github.com/dashpay/platform/issues/2710)) +* **sdk:** install wasm-opt from Github instead of apt ([#2701](https://github.com/dashpay/platform/issues/2701)) +* **sdk:** modifications to get wasm-sdk working again ([#2689](https://github.com/dashpay/platform/issues/2689)) + + +### Tests + +* **sdk:** automate wasm-sdk page UI testing (partial) ([#2715](https://github.com/dashpay/platform/issues/2715)) + + +### Build System + +* **deps:** update getrandom to v0.3 + + +### Continuous Integration + +* rs-dapi workflows + + +### Miscellaneous Chores + +* at least compiles +* better logging +* cargo.lock version +* cargo.toml reorder packages +* cleanup deps +* clippy +* copy rs-dapi +* dashmate impl +* DESIGN - logging described +* disable access log (doesn't work anyway) +* example apps +* fix env var name +* identity create green +* improve logging +* minor fixes +* move old dapi to /deprecated prefix +* **platform:** add protocol version 10 support ([#2686](https://github.com/dashpay/platform/issues/2686)) +* progress, tenderdash to do +* refactor of td client and websockets +* **release:** update changelog and version to 2.1.0-dev.2 +* replace sync zmq with async zeromq +* rs-dapi verbose entrypoint +* rs-dapi, wip +* **sdk:** use correct port for evo-sdk mainnet ([#2699](https://github.com/dashpay/platform/issues/2699)) +* some logs +* tracing logging +* try to fix logging +* wip +* wip +* wip +* zeromq improvements +* zmq +* zmq details +* zmq reconnecting +* zmq to test + +## [2.1.0-dev.1](https://github.com/dashpay/platform/compare/v2.0.1...v2.1.0-dev.1) (2025-07-11) + + +### Bug Fixes + +* **dashmate:** consensus params in dashmate different than on testnet ([#2682](https://github.com/dashpay/platform/issues/2682)) + + +### Miscellaneous Chores + +* **release:** update changelog and version to 2.1.0-dev.1 + ## [2.1.0-dev.8](https://github.com/dashpay/platform/compare/v2.1.0-dev.7...v2.1.0-dev.8) (2025-10-03) @@ -127,17 +732,6 @@ * bump tenderdash-abci to v1.5.0-dev.2 ([#2770](https://github.com/dashpay/platform/issues/2770)) * update rust to 1.89 ([#2755](https://github.com/dashpay/platform/issues/2755)) -### [2.0.1](https://github.com/dashpay/platform/compare/v2.0.0...v2.0.1) (2025-07-10) - - -### ⚠ BREAKING CHANGES - -* **platform:** update keyword search contract ID and owner ID bytes (#2693) - -### Bug Fixes - -* **platform:** update keyword search contract ID and owner ID bytes ([#2693](https://github.com/dashpay/platform/issues/2693)) - ## [2.0.0](https://github.com/dashpay/platform/compare/v2.0.0-rc.16...v2.0.0) (2025-06-26) diff --git a/Cargo.lock b/Cargo.lock index cc85d8188a7..d2d298deda9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -216,6 +216,19 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -811,7 +824,7 @@ checksum = "3fce8dd7fcfcbf3a0a87d8f515194b49d6135acab73e18bd380d1d93bb1a15eb" dependencies = [ "clap", "heck 0.4.1", - "indexmap 2.11.3", + "indexmap 2.11.4", "log", "proc-macro2", "quote", @@ -830,7 +843,7 @@ checksum = "975982cdb7ad6a142be15bdf84aea7ec6a9e5d4d797c004d43185b24cfe4e684" dependencies = [ "clap", "heck 0.5.0", - "indexmap 2.11.3", + "indexmap 2.11.4", "log", "proc-macro2", "quote", @@ -882,7 +895,7 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "check-features" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "toml", ] @@ -1165,7 +1178,7 @@ dependencies = [ "cast", "ciborium", "clap", - "criterion-plot", + "criterion-plot 0.5.0", "is-terminal", "itertools 0.10.5", "num-traits", @@ -1181,6 +1194,27 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot 0.6.0", + "itertools 0.13.0", + "num-traits", + "oorandom", + "regex", + "serde", + "serde_json", + "tinytemplate", + "walkdir", +] + [[package]] name = "criterion-plot" version = "0.5.0" @@ -1191,6 +1225,16 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "criterion-plot" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" +dependencies = [ + "cast", + "itertools 0.13.0", +] + [[package]] name = "critical-section" version = "1.2.0" @@ -1225,6 +1269,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" @@ -1314,7 +1367,7 @@ dependencies = [ [[package]] name = "dapi-grpc" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "dapi-grpc-macros", "futures-core", @@ -1332,7 +1385,7 @@ dependencies = [ [[package]] name = "dapi-grpc-macros" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "dapi-grpc", "heck 0.5.0", @@ -1377,7 +1430,7 @@ dependencies = [ [[package]] name = "dash-context-provider" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "dpp", "drive", @@ -1398,9 +1451,20 @@ dependencies = [ "serde", ] +[[package]] +name = "dash-network" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "bincode 2.0.0-rc.3", + "bincode_derive", + "hex", + "serde", +] + [[package]] name = "dash-platform-balance-checker" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "anyhow", "clap", @@ -1416,7 +1480,7 @@ dependencies = [ [[package]] name = "dash-sdk" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "arc-swap", "assert_matches", @@ -1469,13 +1533,41 @@ dependencies = [ "blsful", "clap", "crossterm", - "dashcore", - "dashcore_hashes", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "hex", "hickory-resolver", - "indexmap 2.11.3", - "key-wallet", - "key-wallet-manager", + "indexmap 2.11.4", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "key-wallet-manager 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "log", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror 1.0.69", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "dash-spv" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "anyhow", + "async-trait", + "bincode 1.3.3", + "blsful", + "clap", + "crossterm", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "hex", + "hickory-resolver", + "indexmap 2.11.4", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "key-wallet-manager 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", "log", "rand 0.8.5", "serde", @@ -1493,14 +1585,14 @@ source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145 dependencies = [ "cbindgen 0.29.0", "clap", - "dash-spv", - "dashcore", + "dash-spv 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "env_logger 0.10.2", "futures", "hex", - "key-wallet", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "key-wallet-ffi", - "key-wallet-manager", + "key-wallet-manager 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "libc", "log", "once_cell", @@ -1524,9 +1616,9 @@ dependencies = [ "bitvec", "blake3", "blsful", - "dash-network", - "dashcore-private", - "dashcore_hashes", + "dash-network 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore-private 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "ed25519-dalek", "hex", "hex_lit", @@ -1537,17 +1629,60 @@ dependencies = [ "thiserror 2.0.16", ] +[[package]] +name = "dashcore" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "anyhow", + "base64-compat", + "bech32", + "bincode 2.0.0-rc.3", + "bincode_derive", + "bitvec", + "blake3", + "blsful", + "dash-network 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore-private 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "hex", + "hex_lit", + "log", + "rustversion", + "secp256k1", + "serde", + "thiserror 2.0.16", +] + [[package]] name = "dashcore-private" version = "0.40.0" source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145e2003d549619698511513db925c" +[[package]] +name = "dashcore-private" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" + [[package]] name = "dashcore-rpc" version = "0.40.0" source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145e2003d549619698511513db925c" dependencies = [ - "dashcore-rpc-json", + "dashcore-rpc-json 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "hex", + "jsonrpc", + "log", + "serde", + "serde_json", +] + +[[package]] +name = "dashcore-rpc" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "dashcore-rpc-json 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", "hex", "jsonrpc", "log", @@ -1561,9 +1696,24 @@ version = "0.40.0" source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145e2003d549619698511513db925c" dependencies = [ "bincode 2.0.0-rc.3", - "dashcore", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "hex", - "key-wallet", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "serde", + "serde_json", + "serde_repr", + "serde_with 2.3.3", +] + +[[package]] +name = "dashcore-rpc-json" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "bincode 2.0.0-rc.3", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "hex", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", "serde", "serde_json", "serde_repr", @@ -1576,7 +1726,19 @@ version = "0.40.0" source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145e2003d549619698511513db925c" dependencies = [ "bincode 2.0.0-rc.3", - "dashcore-private", + "dashcore-private 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "rs-x11-hash", + "secp256k1", + "serde", +] + +[[package]] +name = "dashcore_hashes" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "bincode 2.0.0-rc.3", + "dashcore-private 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", "rs-x11-hash", "secp256k1", "serde", @@ -1584,7 +1746,7 @@ dependencies = [ [[package]] name = "dashpay-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "platform-value", "platform-version", @@ -1594,7 +1756,7 @@ dependencies = [ [[package]] name = "data-contracts" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "dashpay-contract", "dpns-contract", @@ -1741,7 +1903,7 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "dpns-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "platform-value", "platform-version", @@ -1751,7 +1913,7 @@ dependencies = [ [[package]] name = "dpp" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "anyhow", "assert_matches", @@ -1764,22 +1926,22 @@ dependencies = [ "chrono", "chrono-tz", "ciborium", - "dash-spv", - "dashcore", - "dashcore-rpc", + "dash-spv 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore-rpc 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "data-contracts", "derive_more 1.0.0", "dpp", "env_logger 0.11.8", "getrandom 0.2.16", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "integer-encoding", "itertools 0.13.0", "json-schema-compatibility-validator", "jsonschema", - "key-wallet", - "key-wallet-manager", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "key-wallet-manager 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "lazy_static", "log", "nohash-hasher", @@ -1807,7 +1969,7 @@ dependencies = [ [[package]] name = "drive" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "arc-swap", "assert_matches", @@ -1817,7 +1979,7 @@ dependencies = [ "byteorder", "chrono", "ciborium", - "criterion", + "criterion 0.5.1", "derive_more 1.0.0", "dpp", "enum-map", @@ -1828,7 +1990,7 @@ dependencies = [ "grovedb-storage", "grovedb-version", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "integer-encoding", "intmap", "itertools 0.13.0", @@ -1848,7 +2010,7 @@ dependencies = [ [[package]] name = "drive-abci" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "arc-swap", "assert_matches", @@ -1871,7 +2033,7 @@ dependencies = [ "envy", "file-rotate", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "integer-encoding", "itertools 0.13.0", "lazy_static", @@ -1902,7 +2064,7 @@ dependencies = [ [[package]] name = "drive-proof-verifier" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "bincode 2.0.0-rc.3", "dapi-grpc", @@ -1911,7 +2073,7 @@ dependencies = [ "dpp", "drive", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "platform-serialization", "platform-serialization-derive", "serde", @@ -2156,7 +2318,7 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "feature-flags-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "platform-value", "platform-version", @@ -2491,7 +2653,7 @@ dependencies = [ "grovedbg-types", "hex", "hex-literal", - "indexmap 2.11.3", + "indexmap 2.11.4", "integer-encoding", "intmap", "itertools 0.14.0", @@ -2547,7 +2709,7 @@ dependencies = [ "grovedb-version", "grovedb-visualize", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "integer-encoding", "num_cpus", "rand 0.8.5", @@ -2625,7 +2787,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.11.3", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -3144,12 +3306,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.3" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92119844f513ffa41556430369ab02c295a3578af21cf945caa3e9e0c2481ac3" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", "serde_core", ] @@ -3312,9 +3474,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "852f13bec5eba4ba9afbeb93fd7c13fe56147f055939ae21c43a29a0ecb2702e" dependencies = [ "once_cell", "wasm-bindgen", @@ -3333,7 +3495,7 @@ dependencies = [ [[package]] name = "json-schema-compatibility-validator" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "assert_matches", "json-patch", @@ -3402,10 +3564,10 @@ dependencies = [ "bip39", "bitflags 2.9.4", "bs58", - "dash-network", - "dashcore", - "dashcore-private", - "dashcore_hashes", + "dash-network 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore-private 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "getrandom 0.2.16", "hex", "hkdf", @@ -3419,17 +3581,43 @@ dependencies = [ "zeroize", ] +[[package]] +name = "key-wallet" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "base58ck", + "bincode 2.0.0-rc.3", + "bincode_derive", + "bip39", + "bitflags 2.9.4", + "dash-network 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore-private 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "getrandom 0.2.16", + "hex", + "hkdf", + "rand 0.8.5", + "secp256k1", + "serde", + "serde_json", + "sha2", + "tracing", + "zeroize", +] + [[package]] name = "key-wallet-ffi" version = "0.40.0" source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145e2003d549619698511513db925c" dependencies = [ "cbindgen 0.29.0", - "dash-network", - "dashcore", + "dash-network 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "hex", - "key-wallet", - "key-wallet-manager", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "key-wallet-manager 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "libc", "secp256k1", "tokio", @@ -3442,16 +3630,30 @@ source = "git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0#c877c1a74d145 dependencies = [ "async-trait", "bincode 2.0.0-rc.3", - "dashcore", - "dashcore_hashes", - "key-wallet", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "secp256k1", + "zeroize", +] + +[[package]] +name = "key-wallet-manager" +version = "0.40.0" +source = "git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964#e44b1fb2086ad57c8884995f9f93f14de91bf964" +dependencies = [ + "async-trait", + "bincode 2.0.0-rc.3", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore_hashes 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", "secp256k1", "zeroize", ] [[package]] name = "keyword-search-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "base58", "platform-value", @@ -3603,7 +3805,7 @@ dependencies = [ [[package]] name = "masternode-reward-shares-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "platform-value", "platform-version", @@ -3676,7 +3878,7 @@ dependencies = [ "http-body-util", "hyper", "hyper-util", - "indexmap 2.11.3", + "indexmap 2.11.4", "ipnet", "metrics", "metrics-util", @@ -4224,7 +4426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.11.3", + "indexmap 2.11.4", ] [[package]] @@ -4315,7 +4517,7 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "platform-serialization" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "bincode 2.0.0-rc.3", "platform-version", @@ -4323,7 +4525,7 @@ dependencies = [ [[package]] name = "platform-serialization-derive" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "proc-macro2", "quote", @@ -4333,14 +4535,14 @@ dependencies = [ [[package]] name = "platform-value" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "base64 0.22.1", "bincode 2.0.0-rc.3", "bs58", "ciborium", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "platform-serialization", "platform-version", "rand 0.8.5", @@ -4352,7 +4554,7 @@ dependencies = [ [[package]] name = "platform-value-convertible" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "quote", "syn 2.0.106", @@ -4360,7 +4562,7 @@ dependencies = [ [[package]] name = "platform-version" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "bincode 2.0.0-rc.3", "grovedb-version", @@ -4371,7 +4573,7 @@ dependencies = [ [[package]] name = "platform-versioning" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "proc-macro2", "quote", @@ -4380,13 +4582,13 @@ dependencies = [ [[package]] name = "platform-wallet" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ - "dashcore", + "dashcore 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "dpp", - "indexmap 2.11.3", - "key-wallet", - "key-wallet-manager", + "indexmap 2.11.4", + "key-wallet 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", + "key-wallet-manager 0.40.0 (git+https://github.com/dashpay/rust-dashcore?tag=v0.40.0)", "serde", "thiserror 1.0.69", ] @@ -4532,6 +4734,21 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" +dependencies = [ + "cfg-if 1.0.3", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror 2.0.16", +] + [[package]] name = "prost" version = "0.13.5" @@ -4618,6 +4835,26 @@ dependencies = [ "prost 0.14.1", ] +[[package]] +name = "protobuf" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" +dependencies = [ + "once_cell", + "protobuf-support", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-support" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" +dependencies = [ + "thiserror 1.0.69", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -4673,6 +4910,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "quick_cache" +version = "0.6.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad6644cb07b7f3488b9f3d2fde3b4c0a7fa367cafefb39dff93a659f76eb786" +dependencies = [ + "ahash 0.8.12", + "equivalent", + "hashbrown 0.15.5", + "parking_lot", +] + [[package]] name = "quinn" version = "0.11.9" @@ -4978,6 +5227,37 @@ dependencies = [ "webpki-roots", ] +[[package]] +name = "reqwest-middleware" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e" +dependencies = [ + "anyhow", + "async-trait", + "http", + "reqwest", + "serde", + "thiserror 1.0.69", + "tower-service", +] + +[[package]] +name = "reqwest-tracing" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d70ea85f131b2ee9874f0b160ac5976f8af75f3c9badfe0d955880257d10bd83" +dependencies = [ + "anyhow", + "async-trait", + "getrandom 0.2.16", + "http", + "matchit 0.8.4", + "reqwest", + "reqwest-middleware", + "tracing", +] + [[package]] name = "resolv-conf" version = "0.7.5" @@ -5027,6 +5307,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "rmp" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + +[[package]] +name = "rmp-serde" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" +dependencies = [ + "byteorder", + "rmp", + "serde", +] + [[package]] name = "rocksdb" version = "0.23.0" @@ -5058,9 +5360,58 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "rs-dapi" +version = "2.1.0-pr.2716.1" +dependencies = [ + "async-trait", + "axum 0.8.4", + "base64 0.22.1", + "chrono", + "ciborium", + "clap", + "dapi-grpc", + "dash-spv 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dashcore-rpc 0.40.0 (git+https://github.com/dashpay/rust-dashcore?rev=e44b1fb2086ad57c8884995f9f93f14de91bf964)", + "dotenvy", + "dpp", + "envy", + "futures", + "hex", + "once_cell", + "prometheus", + "quick_cache", + "reqwest", + "reqwest-middleware", + "reqwest-tracing", + "rmp-serde", + "rs-dash-event-bus", + "serde", + "serde_bytes", + "serde_json", + "serial_test", + "sha2", + "tempfile", + "test-case", + "thiserror 2.0.16", + "tokio", + "tokio-stream", + "tokio-tungstenite", + "tokio-util", + "tonic 0.14.2", + "tower 0.5.2", + "tower-http", + "tracing", + "tracing-subscriber", + "url", + "xxhash-rust", + "zeroize", + "zeromq", +] + [[package]] name = "rs-dapi-client" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "backon", "chrono", @@ -5085,9 +5436,18 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "rs-dash-event-bus" +version = "2.1.0-pr.2716.1" +dependencies = [ + "metrics", + "tokio", + "tracing", +] + [[package]] name = "rs-sdk-ffi" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "bincode 2.0.0-rc.3", "bs58", @@ -5116,7 +5476,7 @@ dependencies = [ [[package]] name = "rs-sdk-trusted-context-provider" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "arc-swap", "async-trait", @@ -5331,6 +5691,15 @@ dependencies = [ "regex", ] +[[package]] +name = "scc" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc" +dependencies = [ + "sdd", +] + [[package]] name = "schannel" version = "0.1.28" @@ -5387,6 +5756,12 @@ dependencies = [ "sha2", ] +[[package]] +name = "sdd" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca" + [[package]] name = "seahash" version = "4.1.0" @@ -5546,7 +5921,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.3", + "indexmap 2.11.4", "itoa", "memchr", "ryu", @@ -5623,7 +5998,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.11.3", + "indexmap 2.11.4", "schemars 0.9.0", "schemars 1.0.4", "serde", @@ -5667,6 +6042,31 @@ dependencies = [ "serde", ] +[[package]] +name = "serial_test" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" +dependencies = [ + "futures", + "log", + "once_cell", + "parking_lot", + "scc", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "sha1" version = "0.10.6" @@ -5767,7 +6167,7 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "simple-signer" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "base64 0.22.1", "bincode 2.0.0-rc.3", @@ -5864,7 +6264,7 @@ dependencies = [ [[package]] name = "strategy-tests" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "bincode 2.0.0-rc.3", "dpp", @@ -6261,7 +6661,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token-history-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "platform-value", "platform-version", @@ -6345,6 +6745,20 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "tokio-tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +dependencies = [ + "futures-util", + "log", + "native-tls", + "tokio", + "tokio-native-tls", + "tungstenite", +] + [[package]] name = "tokio-util" version = "0.7.16" @@ -6353,6 +6767,7 @@ checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -6394,7 +6809,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.11.3", + "indexmap 2.11.4", "toml_datetime 0.6.11", "winnow 0.5.40", ] @@ -6405,7 +6820,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.3", + "indexmap 2.11.4", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -6419,7 +6834,7 @@ version = "0.23.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2ad0b7ae9cfeef5605163839cb9221f453399f15cfb5c10be9885fcf56611f9" dependencies = [ - "indexmap 2.11.3", + "indexmap 2.11.4", "toml_datetime 0.7.1", "toml_parser", "winnow 0.7.13", @@ -6594,7 +7009,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.11.3", + "indexmap 2.11.4", "pin-project-lite", "slab", "sync_wrapper", @@ -6743,6 +7158,26 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "native-tls", + "rand 0.8.5", + "sha1", + "thiserror 1.0.69", + "url", + "utf-8", +] + [[package]] name = "typenum" version = "1.18.0" @@ -6934,7 +7369,7 @@ dependencies = [ [[package]] name = "wallet-utils-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "platform-value", "platform-version", @@ -6977,21 +7412,22 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "ab10a69fbd0a177f5f649ad4d8d3305499c42bab9aef2f7ff592d0ec8f833819" dependencies = [ "cfg-if 1.0.3", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "0bb702423545a6007bbc368fde243ba47ca275e549c8a28617f56f6ba53b1d1c" dependencies = [ "bumpalo", "log", @@ -7003,9 +7439,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "a0b221ff421256839509adbb55998214a70d829d3a28c69b4a6672e9d2a42f67" dependencies = [ "cfg-if 1.0.3", "js-sys", @@ -7016,9 +7452,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "fc65f4f411d91494355917b605e1480033152658d71f722a90647f56a70c88a0" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7026,9 +7462,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "ffc003a991398a8ee604a401e194b6b3a39677b3173d6e74495eb51b82e99a32" dependencies = [ "proc-macro2", "quote", @@ -7039,18 +7475,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "293c37f4efa430ca14db3721dfbe48d8c33308096bd44d80ebaa775ab71ba1cf" dependencies = [ "unicode-ident", ] [[package]] name = "wasm-bindgen-test" -version = "0.3.50" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +checksum = "aee0a0f5343de9221a0d233b04520ed8dc2e6728dce180b1dcd9288ec9d9fa3c" dependencies = [ "js-sys", "minicov", @@ -7061,9 +7497,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.50" +version = "0.3.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +checksum = "a369369e4360c2884c3168d22bded735c43cccae97bbc147586d4b480edd138d" dependencies = [ "proc-macro2", "quote", @@ -7072,7 +7508,7 @@ dependencies = [ [[package]] name = "wasm-dpp" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "anyhow", "async-trait", @@ -7096,18 +7532,18 @@ dependencies = [ [[package]] name = "wasm-drive-verify" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "base64 0.22.1", "bincode 2.0.0-rc.3", "bs58", "ciborium", "console_error_panic_hook", - "criterion", + "criterion 0.7.0", "dpp", "drive", "hex", - "indexmap 2.11.3", + "indexmap 2.11.4", "js-sys", "nohash-hasher", "serde", @@ -7131,7 +7567,7 @@ dependencies = [ [[package]] name = "wasm-sdk" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "base64 0.22.1", "bip39", @@ -7181,9 +7617,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "fbe734895e869dc429d78c4b433f8d17d95f8d05317440b4fad5ab2d33e596dc" dependencies = [ "js-sys", "wasm-bindgen", @@ -7695,7 +8131,7 @@ checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "withdrawals-contract" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" dependencies = [ "num_enum 0.5.11", "platform-value", @@ -7721,6 +8157,12 @@ dependencies = [ "tap", ] +[[package]] +name = "xxhash-rust" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" + [[package]] name = "yansi" version = "1.0.1" @@ -7813,6 +8255,29 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "zeromq" +version = "0.5.0-pre" +source = "git+https://github.com/gvz/zmq.rs?rev=b0787de310befaedd1f762e3b9bc711612d8137f#b0787de310befaedd1f762e3b9bc711612d8137f" +dependencies = [ + "async-trait", + "asynchronous-codec", + "bytes", + "crossbeam-queue", + "futures", + "log", + "num-traits", + "once_cell", + "parking_lot", + "rand 0.8.5", + "regex", + "scc", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "uuid", +] + [[package]] name = "zerotrie" version = "0.2.2" @@ -7875,7 +8340,7 @@ dependencies = [ "arbitrary", "crc32fast", "flate2", - "indexmap 2.11.3", + "indexmap 2.11.4", "memchr", "zopfli", ] diff --git a/Cargo.toml b/Cargo.toml index 0041d4db196..c669a9eb00b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,8 @@ members = [ "packages/rs-sdk-ffi", "packages/wasm-drive-verify", "packages/dash-platform-balance-checker", + "packages/rs-dapi", + "packages/rs-dash-event-bus", "packages/rs-platform-wallet", "packages/wasm-sdk", ] diff --git a/Dockerfile b/Dockerfile index 5560419149b..d08c0d28cb9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -292,7 +292,7 @@ ONBUILD ARG CARGO_BUILD_PROFILE=dev RUN --mount=type=secret,id=AWS <> /platform/packages/dapi-grpc/build.rs && \ + cargo build \ + --profile "${CARGO_BUILD_PROFILE}" \ + --package rs-dapi \ + --locked && \ + cp target/${OUT_DIRECTORY}/rs-dapi /artifacts/ && \ + if [[ -x /usr/bin/sccache ]]; then sccache --show-stats; fi && \ + # Remove /platform to reduce layer size + rm -rf /platform + +# +# STAGE: RS-DAPI RUNTIME +# +FROM alpine:${ALPINE_VERSION} AS rs-dapi + +LABEL maintainer="Dash Developers " +LABEL description="Dash Platform API (DAPI) - Rust Implementation" + +RUN apk add --no-cache libgcc libstdc++ + +ENV RUST_BACKTRACE=1 +ENV RUST_LOG=info + +COPY --from=build-rs-dapi /artifacts/rs-dapi /usr/bin/rs-dapi + +# Create example .env file +RUN mkdir -p /app +COPY packages/rs-dapi/.env.example /app/.env + +# Double-check that we don't have missing deps +RUN ldd /usr/bin/rs-dapi + +# +# Create new non-root user +# +ARG USERNAME=dapi +ARG USER_UID=1000 +ARG USER_GID=$USER_UID +RUN addgroup -g $USER_GID $USERNAME && \ + adduser -D -u $USER_UID -G $USERNAME -h /app $USERNAME && \ + mkdir -p /var/log/rs-dapi && \ + chown -R $USER_UID:$USER_GID /app /var/log/rs-dapi + +USER $USERNAME + +WORKDIR /app +ENTRYPOINT ["/usr/bin/rs-dapi", "start"] + +# Default gRPC port +EXPOSE 3010 +# Optional HTTP/REST port (if implemented) +EXPOSE 3000 diff --git a/README.md b/README.md index 110cb2f5b29..0860ab96b4f 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ this repository may be used on the following networks: in terminal run `echo 'export PATH="/opt/homebrew/opt/llvm/bin:$PATH"' >> ~/.zshrc` or `echo 'export PATH="/opt/homebrew/opt/llvm/bin:$PATH"' >> ~/.bash_profile` depending on your default shell. You can find your default shell with `echo $SHELL` - Reload your shell with `source ~/.zshrc` or `source ~/.bash_profile` - - `cargo install wasm-bindgen-cli@0.2.100` + - `cargo install wasm-bindgen-cli@0.2.103` - *double-check that wasm-bindgen-cli version above matches wasm-bindgen version in Cargo.lock file* - *Depending on system, additional packages may need to be installed as a prerequisite for wasm-bindgen-cli. If anything is missing, installation will error and prompt what packages are missing (i.e. clang, llvm, libssl-dev)* - essential build tools - example for Debian/Ubuntu: `apt install -y build-essential libssl-dev pkg-config clang cmake llvm` diff --git a/package.json b/package.json index 9037de80278..d4f8b61309e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/platform", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "private": true, "scripts": { "setup": "yarn install && yarn run build && yarn run configure", diff --git a/packages/bench-suite/package.json b/packages/bench-suite/package.json index a4e887a40b0..2cea8e03239 100644 --- a/packages/bench-suite/package.json +++ b/packages/bench-suite/package.json @@ -1,7 +1,7 @@ { "name": "@dashevo/bench-suite", "private": true, - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Dash Platform benchmark tool", "scripts": { "bench": "node ./bin/bench.js", diff --git a/packages/check-features/Cargo.toml b/packages/check-features/Cargo.toml index 6af72546803..5873205e691 100644 --- a/packages/check-features/Cargo.toml +++ b/packages/check-features/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "check-features" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/packages/dapi-grpc/Cargo.toml b/packages/dapi-grpc/Cargo.toml index a7bc50ae828..3634671bb44 100644 --- a/packages/dapi-grpc/Cargo.toml +++ b/packages/dapi-grpc/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dapi-grpc" description = "GRPC client for Dash Platform" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" authors = [ "Samuel Westrich ", "Igor Markin ", diff --git a/packages/dapi-grpc/package.json b/packages/dapi-grpc/package.json index 511c11ee767..99bc42efba9 100644 --- a/packages/dapi-grpc/package.json +++ b/packages/dapi-grpc/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/dapi-grpc", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "DAPI GRPC definition file and generated clients", "browser": "browser.js", "main": "node.js", diff --git a/packages/dapi-grpc/protos/platform/v0/platform.proto b/packages/dapi-grpc/protos/platform/v0/platform.proto index 46be29d86bc..a11a9b1bc87 100644 --- a/packages/dapi-grpc/protos/platform/v0/platform.proto +++ b/packages/dapi-grpc/protos/platform/v0/platform.proto @@ -37,7 +37,8 @@ service Platform { rpc getDocuments(GetDocumentsRequest) returns (GetDocumentsResponse); rpc getIdentityByPublicKeyHash(GetIdentityByPublicKeyHashRequest) returns (GetIdentityByPublicKeyHashResponse); - rpc getIdentityByNonUniquePublicKeyHash(GetIdentityByNonUniquePublicKeyHashRequest) + rpc getIdentityByNonUniquePublicKeyHash( + GetIdentityByNonUniquePublicKeyHashRequest) returns (GetIdentityByNonUniquePublicKeyHashResponse); rpc waitForStateTransitionResult(WaitForStateTransitionResultRequest) returns (WaitForStateTransitionResultResponse); @@ -49,7 +50,8 @@ service Platform { GetProtocolVersionUpgradeVoteStatusRequest) returns (GetProtocolVersionUpgradeVoteStatusResponse); rpc getEpochsInfo(GetEpochsInfoRequest) returns (GetEpochsInfoResponse); - rpc getFinalizedEpochInfos(GetFinalizedEpochInfosRequest) returns (GetFinalizedEpochInfosResponse); + rpc getFinalizedEpochInfos(GetFinalizedEpochInfosRequest) + returns (GetFinalizedEpochInfosResponse); // What votes are currently happening for a specific contested index rpc getContestedResources(GetContestedResourcesRequest) returns (GetContestedResourcesResponse); @@ -611,9 +613,7 @@ message GetIdentityByNonUniquePublicKeyHashRequest { message GetIdentityByNonUniquePublicKeyHashResponse { message GetIdentityByNonUniquePublicKeyHashResponseV0 { - message IdentityResponse { - optional bytes identity = 1; - } + message IdentityResponse { optional bytes identity = 1; } message IdentityProvedResponse { Proof grovedb_identity_public_key_hash_proof = 1; @@ -624,7 +624,7 @@ message GetIdentityByNonUniquePublicKeyHashResponse { IdentityProvedResponse proof = 2; } - ResponseMetadata metadata = 3; // Metadata about the blockchain state + ResponseMetadata metadata = 3; // Metadata about the blockchain state } oneof version { GetIdentityByNonUniquePublicKeyHashResponseV0 v0 = 1; } } @@ -801,11 +801,11 @@ message GetEpochsInfoResponse { message GetFinalizedEpochInfosRequest { message GetFinalizedEpochInfosRequestV0 { - uint32 start_epoch_index = 1; // The starting epoch index - bool start_epoch_index_included = 2; // Whether to include the start epoch - uint32 end_epoch_index = 3; // The ending epoch index - bool end_epoch_index_included = 4; // Whether to include the end epoch - bool prove = 5; // Flag to request a proof as the response + uint32 start_epoch_index = 1; // The starting epoch index + bool start_epoch_index_included = 2; // Whether to include the start epoch + uint32 end_epoch_index = 3; // The ending epoch index + bool end_epoch_index_included = 4; // Whether to include the end epoch + bool prove = 5; // Flag to request a proof as the response } oneof version { GetFinalizedEpochInfosRequestV0 v0 = 1; } @@ -813,9 +813,10 @@ message GetFinalizedEpochInfosRequest { message GetFinalizedEpochInfosResponse { message GetFinalizedEpochInfosResponseV0 { - // FinalizedEpochInfos holds a collection of finalized epoch information entries + // FinalizedEpochInfos holds a collection of finalized epoch information + // entries message FinalizedEpochInfos { - repeated FinalizedEpochInfo finalized_epoch_infos = + repeated FinalizedEpochInfo finalized_epoch_infos = 1; // List of finalized information for each requested epoch } @@ -824,15 +825,17 @@ message GetFinalizedEpochInfosResponse { uint32 number = 1; // The number of the epoch uint64 first_block_height = 2 [ jstype = JS_STRING ]; // The height of the first block in this epoch - uint32 first_core_block_height = + uint32 first_core_block_height = 3; // The height of the first Core block in this epoch uint64 first_block_time = 4 - [ jstype = JS_STRING ]; // The timestamp of the first block (milliseconds) - double fee_multiplier = 5; // The fee multiplier (converted from permille) + [ jstype = + JS_STRING ]; // The timestamp of the first block (milliseconds) + double fee_multiplier = 5; // The fee multiplier (converted from permille) uint32 protocol_version = 6; // The protocol version for this epoch uint64 total_blocks_in_epoch = 7 [ jstype = JS_STRING ]; // Total number of blocks in the epoch - uint32 next_epoch_start_core_block_height = 8; // Core block height where next epoch starts + uint32 next_epoch_start_core_block_height = + 8; // Core block height where next epoch starts uint64 total_processing_fees = 9 [ jstype = JS_STRING ]; // Total processing fees collected uint64 total_distributed_storage_fees = 10 @@ -841,20 +844,21 @@ message GetFinalizedEpochInfosResponse { [ jstype = JS_STRING ]; // Total storage fees created uint64 core_block_rewards = 12 [ jstype = JS_STRING ]; // Rewards from core blocks - repeated BlockProposer block_proposers = 13; // List of block proposers and their counts + repeated BlockProposer block_proposers = + 13; // List of block proposers and their counts } // BlockProposer represents a block proposer and their block count message BlockProposer { - bytes proposer_id = 1; // The proposer's identifier + bytes proposer_id = 1; // The proposer's identifier uint32 block_count = 2; // Number of blocks proposed } oneof result { - FinalizedEpochInfos epochs = + FinalizedEpochInfos epochs = 1; // The actual finalized information about the requested epochs - Proof proof = - 2; // Cryptographic proof of the finalized epoch information, if requested + Proof proof = 2; // Cryptographic proof of the finalized epoch + // information, if requested } ResponseMetadata metadata = 3; // Metadata about the blockchain state } @@ -1456,7 +1460,6 @@ message GetTokenDirectPurchasePricesRequest { oneof version { GetTokenDirectPurchasePricesRequestV0 v0 = 1; } } - // Response to GetTokenDirectPurchasePricesRequest, containing information about // direct purchase prices defined for requested token IDs. message GetTokenDirectPurchasePricesResponse { @@ -1517,9 +1520,7 @@ message GetTokenContractInfoRequest { bool prove = 2; } - oneof version { - GetTokenContractInfoRequestV0 v0 = 1; - } + oneof version { GetTokenContractInfoRequestV0 v0 = 1; } } // Response to GetTokenContractInfoRequest. @@ -1546,9 +1547,7 @@ message GetTokenContractInfoResponse { ResponseMetadata metadata = 3; } - oneof version { - GetTokenContractInfoResponseV0 v0 = 1; - } + oneof version { GetTokenContractInfoResponseV0 v0 = 1; } } message GetTokenPreProgrammedDistributionsRequest { @@ -1605,15 +1604,16 @@ message GetTokenPerpetualDistributionLastClaimRequest { message GetTokenPerpetualDistributionLastClaimRequestV0 { // 32‑byte token identifier - bytes token_id = 1; + bytes token_id = 1; - // This should be set if you wish to get back the last claim info as a specific type + // This should be set if you wish to get back the last claim info as a + // specific type optional ContractTokenInfo contract_info = 2; // Identity whose last‑claim timestamp is requested bytes identity_id = 4; // Return GroveDB / signature proof instead of raw value - bool prove = 5; + bool prove = 5; } oneof version { GetTokenPerpetualDistributionLastClaimRequestV0 v0 = 1; } @@ -1626,17 +1626,17 @@ message GetTokenPerpetualDistributionLastClaimResponse { oneof paid_at { uint64 timestamp_ms = 1 [ jstype = JS_STRING ]; // Unix epoch, ms uint64 block_height = 2 [ jstype = JS_STRING ]; // Core‑block height - uint32 epoch = 3; // Epoch index - bytes raw_bytes = 4; // Arbitrary encoding + uint32 epoch = 3; // Epoch index + bytes raw_bytes = 4; // Arbitrary encoding } } oneof result { LastClaimInfo last_claim = 1; // Direct answer - Proof proof = 2; // GroveDB / quorum proof + Proof proof = 2; // GroveDB / quorum proof } - ResponseMetadata metadata = 3; // Chain context + ResponseMetadata metadata = 3; // Chain context } oneof version { GetTokenPerpetualDistributionLastClaimResponseV0 v0 = 1; } @@ -1647,7 +1647,7 @@ message GetTokenTotalSupplyRequest { bytes token_id = 1; bool prove = 2; } - oneof version {GetTokenTotalSupplyRequestV0 v0 = 1;} + oneof version { GetTokenTotalSupplyRequestV0 v0 = 1; } } message GetTokenTotalSupplyResponse { diff --git a/packages/dapi/package.json b/packages/dapi/package.json index 1e757364199..b0977b402fe 100644 --- a/packages/dapi/package.json +++ b/packages/dapi/package.json @@ -1,7 +1,7 @@ { "name": "@dashevo/dapi", "private": true, - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "A decentralized API for the Dash network", "scripts": { "api": "node scripts/api.js", diff --git a/packages/dash-platform-balance-checker/Cargo.toml b/packages/dash-platform-balance-checker/Cargo.toml index 0838fcd4f92..76831c85196 100644 --- a/packages/dash-platform-balance-checker/Cargo.toml +++ b/packages/dash-platform-balance-checker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dash-platform-balance-checker" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" [[bin]] diff --git a/packages/dash-spv/package.json b/packages/dash-spv/package.json index 6ffa3d3eeb0..b3ff40666b9 100644 --- a/packages/dash-spv/package.json +++ b/packages/dash-spv/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/dash-spv", - "version": "3.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Repository containing SPV functions used by @dashevo", "main": "index.js", "scripts": { diff --git a/packages/dashmate/configs/defaults/getBaseConfigFactory.js b/packages/dashmate/configs/defaults/getBaseConfigFactory.js index 09353d6520c..ab1703c57a7 100644 --- a/packages/dashmate/configs/defaults/getBaseConfigFactory.js +++ b/packages/dashmate/configs/defaults/getBaseConfigFactory.js @@ -230,6 +230,12 @@ export default function getBaseConfigFactory() { }, }, dapi: { + // Controls whether to use the deprecated JS DAPI stack + // If enabled = true -> use old DAPI (JS) + // If enabled = false -> use rs-dapi (Rust) [default] + deprecated: { + enabled: false, + }, api: { docker: { image: `dashpay/dapi:${dockerImageVersion}`, @@ -245,6 +251,30 @@ export default function getBaseConfigFactory() { }, waitForStResultTimeout: 120000, }, + rsDapi: { + docker: { + image: `dashpay/rs-dapi:${dockerImageVersion}`, + deploy: { + replicas: 1, + }, + build: { + enabled: false, + context: path.join(PACKAGE_ROOT_DIR, '..', '..'), + dockerFile: path.join(PACKAGE_ROOT_DIR, '..', '..', 'Dockerfile'), + target: 'rs-dapi', + }, + }, + metrics: { + host: '127.0.0.1', + port: 9091, + }, + logs: { + level: 'debug', + jsonFormat: false, + accessLogPath: null, + accessLogFormat: 'combined', + }, + }, }, drive: { abci: { diff --git a/packages/dashmate/configs/getConfigFileMigrationsFactory.js b/packages/dashmate/configs/getConfigFileMigrationsFactory.js index a39f2016260..a3d68ef8bf4 100644 --- a/packages/dashmate/configs/getConfigFileMigrationsFactory.js +++ b/packages/dashmate/configs/getConfigFileMigrationsFactory.js @@ -1119,6 +1119,92 @@ export default function getConfigFileMigrationsFactory(homeDir, defaultConfigs) }); return configFile; }, + '2.1.0-dev.9': (configFile) => { + Object.entries(configFile.configs) + .forEach(([name, options]) => { + const defaultConfig = getDefaultConfigByNameOrGroup(name, options.group); + + if (!options.platform.dapi.deprecated) { + options.platform.dapi.deprecated = defaultConfig.get('platform.dapi.deprecated'); + } else if (typeof options.platform.dapi.deprecated.enabled === 'undefined') { + options.platform.dapi.deprecated.enabled = defaultConfig.get('platform.dapi.deprecated.enabled'); + } + + if (!options.platform.dapi.rsDapi) { + options.platform.dapi.rsDapi = lodash.cloneDeep(defaultConfig.get('platform.dapi.rsDapi')); + return; + } + + const defaultMetrics = defaultConfig.get('platform.dapi.rsDapi.metrics'); + + if (options.platform.dapi.rsDapi.healthCheck) { + options.platform.dapi.rsDapi.metrics = lodash.cloneDeep( + options.platform.dapi.rsDapi.healthCheck, + ); + delete options.platform.dapi.rsDapi.healthCheck; + } + + if (!options.platform.dapi.rsDapi.metrics) { + options.platform.dapi.rsDapi.metrics = lodash.cloneDeep(defaultMetrics); + } + + if (typeof options.platform.dapi.rsDapi.metrics.host === 'undefined') { + options.platform.dapi.rsDapi.metrics.host = defaultMetrics.host; + } + + if (typeof options.platform.dapi.rsDapi.metrics.port === 'undefined') { + options.platform.dapi.rsDapi.metrics.port = defaultMetrics.port; + } + + if (!options.platform.dapi.rsDapi.logs) { + options.platform.dapi.rsDapi.logs = lodash.cloneDeep(defaultConfig.get('platform.dapi.rsDapi.logs')); + } + + if (typeof options.platform.dapi.rsDapi.logs.level === 'undefined') { + options.platform.dapi.rsDapi.logs.level = defaultConfig.get('platform.dapi.rsDapi.logs.level'); + } + + if (typeof options.platform.dapi.rsDapi.logs.jsonFormat === 'undefined') { + options.platform.dapi.rsDapi.logs.jsonFormat = defaultConfig.get('platform.dapi.rsDapi.logs.jsonFormat'); + } + + if (typeof options.platform.dapi.rsDapi.logs.accessLogPath === 'undefined') { + options.platform.dapi.rsDapi.logs.accessLogPath = defaultConfig.get('platform.dapi.rsDapi.logs.accessLogPath'); + } + + if (typeof options.platform.dapi.rsDapi.logs.accessLogFormat === 'undefined') { + options.platform.dapi.rsDapi.logs.accessLogFormat = defaultConfig.get('platform.dapi.rsDapi.logs.accessLogFormat'); + } + }); + + return configFile; + }, + '2.1.0-pr.2716.1': (configFile) => { + Object.entries(configFile.configs) + .forEach(([name, options]) => { + const defaultConfig = getDefaultConfigByNameOrGroup(name, options.group); + + options.platform.dapi.api.docker.image = defaultConfig + .get('platform.dapi.api.docker.image'); + + options.platform.drive.abci.docker.image = defaultConfig + .get('platform.drive.abci.docker.image'); + + if (options.platform.dapi.rsDapi + && defaultConfig.has('platform.dapi.rsDapi.docker.image')) { + options.platform.dapi.rsDapi.docker.image = defaultConfig + .get('platform.dapi.rsDapi.docker.image'); + } + + if (options.platform.drive.tenderdash + && defaultConfig.has('platform.drive.tenderdash.docker.image')) { + options.platform.drive.tenderdash.docker.image = defaultConfig + .get('platform.drive.tenderdash.docker.image'); + } + }); + + return configFile; + }, }; } diff --git a/packages/dashmate/docker-compose.build.rs-dapi.yml b/packages/dashmate/docker-compose.build.rs-dapi.yml new file mode 100644 index 00000000000..dfb63a992e2 --- /dev/null +++ b/packages/dashmate/docker-compose.build.rs-dapi.yml @@ -0,0 +1,22 @@ +--- + +services: + rs_dapi: + build: + context: ${PLATFORM_DAPI_RS_DAPI_DOCKER_BUILD_CONTEXT:?err} + dockerfile: ${PLATFORM_DAPI_RS_DAPI_DOCKER_BUILD_DOCKER_FILE:?err} + target: rs-dapi + args: + RUSTC_WRAPPER: ${RUSTC_WRAPPER} + SCCACHE_MEMCACHED: ${SCCACHE_MEMCACHED} + SCCACHE_GHA_ENABLED: ${SCCACHE_GHA_ENABLED} + ACTIONS_CACHE_URL: ${ACTIONS_CACHE_URL} + ACTIONS_RUNTIME_TOKEN: ${ACTIONS_RUNTIME_TOKEN} + SCCACHE_BUCKET: ${SCCACHE_BUCKET} + SCCACHE_REGION: ${SCCACHE_REGION} + SCCACHE_S3_KEY_PREFIX: ${SCCACHE_S3_KEY_PREFIX} + cache_from: + - ${CACHE_RS_DAPI_FROM:-${PLATFORM_DAPI_RS_DAPI_DOCKER_IMAGE}} + cache_to: + - ${CACHE_RS_DAPI_TO:-type=inline} + image: rs-dapi:local diff --git a/packages/dashmate/docker-compose.rate_limiter.yml b/packages/dashmate/docker-compose.rate_limiter.yml index 036589b6bb0..d652b7fa22d 100644 --- a/packages/dashmate/docker-compose.rate_limiter.yml +++ b/packages/dashmate/docker-compose.rate_limiter.yml @@ -10,8 +10,6 @@ x-default-logging: &default-logging services: gateway: depends_on: - - dapi_api - - dapi_core_streams - drive_abci - gateway_rate_limiter diff --git a/packages/dashmate/docker-compose.yml b/packages/dashmate/docker-compose.yml index 1bdbc801a4c..dc0ba8f2710 100644 --- a/packages/dashmate/docker-compose.yml +++ b/packages/dashmate/docker-compose.yml @@ -1,5 +1,4 @@ --- - x-default-logging: &default-logging driver: local options: @@ -161,7 +160,7 @@ services: - 3004 - 3005 profiles: - - platform + - platform-dapi-deprecated dapi_core_streams: image: ${PLATFORM_DAPI_API_DOCKER_IMAGE:?err} @@ -190,7 +189,51 @@ services: command: yarn run core-streams stop_grace_period: 10s profiles: - - platform + - platform-dapi-deprecated + + # Uses existing configuration variables but deploys on different port (3010) + rs_dapi: + image: ${PLATFORM_DAPI_RS_DAPI_DOCKER_IMAGE:?err} + labels: + org.dashmate.service.title: "rs-dapi (Rust DAPI)" + restart: unless-stopped + logging: *default-logging + deploy: + replicas: ${PLATFORM_DAPI_API_DOCKER_DEPLOY_REPLICAS:-1} + depends_on: + - drive_tenderdash + environment: + # Use same configuration as JS DAPI but different gRPC port for parallel deployment + - DAPI_GRPC_SERVER_PORT=3010 + - DAPI_JSON_RPC_PORT=3009 + - DAPI_REST_GATEWAY_PORT=8080 + - DAPI_METRICS_PORT=${PLATFORM_DAPI_RS_DAPI_METRICS_PORT:?err} + - DAPI_BIND_ADDRESS=0.0.0.0 + - DAPI_ENABLE_REST=false + - DAPI_DRIVE_URI=http://drive_abci:26670 + - DAPI_TENDERDASH_URI=http://drive_tenderdash:${PLATFORM_DRIVE_TENDERDASH_RPC_PORT:?err} + - DAPI_TENDERDASH_WEBSOCKET_URI=ws://drive_tenderdash:${PLATFORM_DRIVE_TENDERDASH_RPC_PORT:?err}/websocket + - DAPI_CORE_ZMQ_URL=tcp://core:${CORE_ZMQ_PORT:?err} + - DAPI_CORE_RPC_URL=http://core:${CORE_RPC_PORT:?err} + - DAPI_CORE_RPC_USER=dapi + - DAPI_CORE_RPC_PASS=${CORE_RPC_USERS_DAPI_PASSWORD:?err} + - DAPI_STATE_TRANSITION_WAIT_TIMEOUT=${PLATFORM_DAPI_API_WAIT_FOR_ST_RESULT_TIMEOUT:?err} + - DAPI_LOGGING_LEVEL=${PLATFORM_DAPI_RS_DAPI_LOGS_LEVEL:-info} + - DAPI_LOGGING_JSON_FORMAT=${PLATFORM_DAPI_RS_DAPI_LOGS_JSON_FORMAT:-false} + - DAPI_LOGGING_ACCESS_LOG_PATH=${PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_PATH:-} + - DAPI_LOGGING_ACCESS_LOG_FORMAT=${PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_FORMAT:-combined} + volumes: + - type: ${PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_VOLUME_TYPE:-volume} + source: ${PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_VOLUME_SOURCE:-rs-dapi-access-logs} + target: ${PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_CONTAINER_DIR} + expose: + - 3009 # JSON-RPC + - 3010 # gRPC (different from current DAPI to avoid conflict) + - ${PLATFORM_DAPI_RS_DAPI_METRICS_PORT:?err} # Metrics + ports: + - ${PLATFORM_DAPI_RS_DAPI_METRICS_HOST:?err}:${PLATFORM_DAPI_RS_DAPI_METRICS_PORT:?err}:${PLATFORM_DAPI_RS_DAPI_METRICS_PORT:?err} + profiles: + - platform-dapi-rs gateway: image: ${PLATFORM_GATEWAY_DOCKER_IMAGE:?err} @@ -206,8 +249,6 @@ services: - ${PLATFORM_GATEWAY_METRICS_HOST:?err}:${PLATFORM_GATEWAY_METRICS_PORT:?err}:9090 - ${PLATFORM_GATEWAY_ADMIN_HOST:?err}:${PLATFORM_GATEWAY_ADMIN_PORT:?err}:9901 depends_on: - - dapi_api - - dapi_core_streams - drive_abci networks: - default @@ -228,6 +269,7 @@ volumes: core_data: drive_abci_data: drive_tenderdash: + rs-dapi-access-logs: networks: default: diff --git a/packages/dashmate/docs/config/dapi.md b/packages/dashmate/docs/config/dapi.md index a8049566b30..54e7989bb4b 100644 --- a/packages/dashmate/docs/config/dapi.md +++ b/packages/dashmate/docs/config/dapi.md @@ -34,3 +34,34 @@ These settings allow you to build the DAPI API Docker image from source. If `ena | `platform.dapi.api.waitForStResultTimeout` | Timeout for state transitions (ms) | `120000` | `240000` | This timeout setting controls how long DAPI will wait for state transition results before returning a timeout error to the client. It is specified in milliseconds. + +## rs-dapi (Rust) + +### Docker + +| Option | Description | Default | Example | +|--------|-------------|---------|---------| +| `platform.dapi.rsDapi.docker.image` | Docker image for rs-dapi | `dashpay/rs-dapi:${version}` | `dashpay/rs-dapi:latest` | +| `platform.dapi.rsDapi.docker.build.enabled` | Enable custom build | `false` | `true` | +| `platform.dapi.rsDapi.docker.build.context` | Build context directory | `path.join(PACKAGE_ROOT_DIR, '..', '..')` (Dash Platform repo root) | `"/path/to/context"` | +| `platform.dapi.rsDapi.docker.build.dockerFile` | Path to Dockerfile | `path.join(PACKAGE_ROOT_DIR, '..', '..', 'Dockerfile')` | `"/path/to/Dockerfile"` | +| `platform.dapi.rsDapi.docker.build.target` | Target build stage | `rs-dapi` | `"rs-dapi"` | +| `platform.dapi.rsDapi.docker.deploy.replicas` | Number of replicas | `1` | `2` | + +### Health Monitoring and Metrics + +| Option | Description | Default | Example | +|--------|-------------|---------|---------| +| `platform.dapi.rsDapi.metrics.host` | Host interface exposed on the Docker host | `127.0.0.1` | `0.0.0.0` | +| `platform.dapi.rsDapi.metrics.port` | Host port for both health checks and Prometheus metrics | `9091` | `9191` | + +The rs-dapi metrics server exposes `/health` and `/metrics`. Prometheus-compatible metrics are served from `/metrics` on the configured port, allowing separate node instances on the same machine to use distinct ports. The `/health` endpoint aggregates dependency checks (Drive, Tenderdash, Core) and returns `503` when any upstream component is unhealthy. + +### Logging + +| Option | Description | Default | Example | +|--------|-------------|---------|---------| +| `platform.dapi.rsDapi.logs.level` | rs-dapi log verbosity. Accepts standard levels (`error`, `warn`, `info`, `debug`, `trace`, `off`) or a full `RUST_LOG` filter string | `info` | `debug` | +| `platform.dapi.rsDapi.logs.jsonFormat` | Enable structured JSON application logs (`true`) or human-readable logs (`false`) | `false` | `true` | +| `platform.dapi.rsDapi.logs.accessLogPath` | Absolute path for HTTP/gRPC access logs. Empty or `null` disables access logging | `null` | `"/var/log/rs-dapi/access.log"` | +| `platform.dapi.rsDapi.logs.accessLogFormat` | Access log output format | `combined` | `json` | diff --git a/packages/dashmate/docs/services/platform.md b/packages/dashmate/docs/services/platform.md index 8935bd66581..7bf3d1cd371 100644 --- a/packages/dashmate/docs/services/platform.md +++ b/packages/dashmate/docs/services/platform.md @@ -154,3 +154,6 @@ Tenderdash is the consensus engine that provides Byzantine Fault Tolerant (BFT) | **DAPI API** | JSON-RPC | 3004 | (fixed internal) | (internal) | - | | | gRPC | 3005 | (fixed internal) | (internal) | - | | **DAPI Core Streams** | gRPC Streaming | 3006 | (fixed internal) | (internal) | - | +| **rs-dapi (Rust)** | Health + Metrics | 9091 | `platform.dapi.rsDapi.metrics.port` | 127.0.0.1 | `platform.dapi.rsDapi.metrics.host` | + +The rs-dapi metrics server exposes health endpoints alongside Prometheus data on `/metrics` from the same port. diff --git a/packages/dashmate/package.json b/packages/dashmate/package.json index f5be57146b1..e0d3268de11 100644 --- a/packages/dashmate/package.json +++ b/packages/dashmate/package.json @@ -1,6 +1,6 @@ { "name": "dashmate", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Distribution package for Dash node installation", "scripts": { "lint": "eslint .", diff --git a/packages/dashmate/src/config/configJsonSchema.js b/packages/dashmate/src/config/configJsonSchema.js index 2daa18d5e3d..cdf693c62a5 100644 --- a/packages/dashmate/src/config/configJsonSchema.js +++ b/packages/dashmate/src/config/configJsonSchema.js @@ -804,6 +804,16 @@ export default { dapi: { type: 'object', properties: { + deprecated: { + type: 'object', + properties: { + enabled: { + type: 'boolean', + }, + }, + required: ['enabled'], + additionalProperties: false, + }, api: { type: 'object', properties: { @@ -841,8 +851,82 @@ export default { required: ['docker', 'waitForStResultTimeout'], additionalProperties: false, }, + rsDapi: { + type: 'object', + properties: { + docker: { + type: 'object', + properties: { + image: { + type: 'string', + minLength: 1, + }, + deploy: { + type: 'object', + properties: { + replicas: { + type: 'integer', + minimum: 0, + }, + }, + additionalProperties: false, + required: ['replicas'], + }, + build: { + $ref: '#/definitions/dockerBuild', + }, + }, + required: ['image', 'build', 'deploy'], + additionalProperties: false, + }, + metrics: { + type: 'object', + properties: { + host: { + type: 'string', + minLength: 1, + }, + port: { + type: 'integer', + minimum: 1, + maximum: 65535, + }, + }, + required: ['host', 'port'], + additionalProperties: false, + }, + logs: { + type: 'object', + properties: { + level: { + type: 'string', + minLength: 1, + description: 'error, warn, info, debug, trace, off or logging specification string in RUST_LOG format', + enum: ['error', 'warn', 'info', 'debug', 'trace', 'off'], + }, + jsonFormat: { + type: 'boolean', + description: 'Emit structured JSON application logs when true', + }, + accessLogPath: { + type: ['string', 'null'], + description: 'Filesystem path for access logs; leave empty or null to disable access logging', + }, + accessLogFormat: { + type: 'string', + description: 'Access log format', + enum: ['combined', 'json'], + }, + }, + required: ['level', 'jsonFormat', 'accessLogPath', 'accessLogFormat'], + additionalProperties: false, + }, + }, + required: ['docker', 'metrics', 'logs'], + additionalProperties: false, + }, }, - required: ['api'], + required: ['api', 'rsDapi'], additionalProperties: false, }, drive: { diff --git a/packages/dashmate/src/config/generateEnvsFactory.js b/packages/dashmate/src/config/generateEnvsFactory.js index 8d7823db070..b2c5190fe75 100644 --- a/packages/dashmate/src/config/generateEnvsFactory.js +++ b/packages/dashmate/src/config/generateEnvsFactory.js @@ -1,6 +1,7 @@ import os from 'os'; -import convertObjectToEnvs from './convertObjectToEnvs.js'; +import path from 'path'; import { DASHMATE_HELPER_DOCKER_IMAGE } from '../constants.js'; +import convertObjectToEnvs from './convertObjectToEnvs.js'; /** * @param {ConfigFile} configFile @@ -44,6 +45,9 @@ export default function generateEnvsFactory(configFile, homeDir, getConfigProfil dockerComposeFiles.push('docker-compose.build.dapi_api.yml'); dockerComposeFiles.push('docker-compose.build.dapi_core_streams.yml'); } + if (config.get('platform.dapi.rsDapi.docker.build.enabled')) { + dockerComposeFiles.push('docker-compose.build.rs-dapi.yml'); + } } if (config.get('core.insight.enabled')) { @@ -73,7 +77,7 @@ export default function generateEnvsFactory(configFile, homeDir, getConfigProfil driveAbciMetricsUrl = 'http://0.0.0.0:29090'; } - return { + const envs = { DASHMATE_HOME_DIR: homeDir.getPath(), LOCAL_UID: uid, LOCAL_GID: gid, @@ -89,6 +93,49 @@ export default function generateEnvsFactory(configFile, homeDir, getConfigProfil PLATFORM_DRIVE_ABCI_METRICS_URL: driveAbciMetricsUrl, ...convertObjectToEnvs(config.getOptions()), }; + + const configuredAccessLogPath = config.get('platform.dapi.rsDapi.logs.accessLogPath'); + const hasConfiguredPath = typeof configuredAccessLogPath === 'string' + && configuredAccessLogPath.trim() !== ''; + + const containerAccessLogDir = '/var/log/rs-dapi'; + let containerAccessLogPath = path.posix.join(containerAccessLogDir, 'access.log'); + let accessLogVolumeType = 'volume'; + let accessLogVolumeSource = 'rs-dapi-access-logs'; + + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_HOST_PATH = ''; + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_HOST_DIR = ''; + + if (hasConfiguredPath) { + const homeDirPath = homeDir.getPath(); + + const hostAccessLogPath = path.isAbsolute(configuredAccessLogPath) + ? configuredAccessLogPath + : path.resolve(homeDirPath, configuredAccessLogPath); + + const hostAccessLogDir = path.dirname(hostAccessLogPath); + const hostAccessLogFile = path.basename(hostAccessLogPath); + + containerAccessLogPath = path.posix.join(containerAccessLogDir, hostAccessLogFile); + accessLogVolumeType = 'bind'; + accessLogVolumeSource = hostAccessLogDir; + + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_HOST_PATH = hostAccessLogPath; + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_HOST_DIR = hostAccessLogDir; + } + + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_CONTAINER_DIR = containerAccessLogDir; + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_CONTAINER_PATH = containerAccessLogPath; + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_VOLUME_TYPE = accessLogVolumeType; + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_VOLUME_SOURCE = accessLogVolumeSource; + + if (hasConfiguredPath) { + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_PATH = containerAccessLogPath; + } else { + envs.PLATFORM_DAPI_RS_DAPI_LOGS_ACCESS_LOG_PATH = ''; + } + + return envs; } return generateEnvs; diff --git a/packages/dashmate/src/config/getConfigProfilesFactory.js b/packages/dashmate/src/config/getConfigProfilesFactory.js index 3beebf0389e..c1aa1bf6428 100644 --- a/packages/dashmate/src/config/getConfigProfilesFactory.js +++ b/packages/dashmate/src/config/getConfigProfilesFactory.js @@ -14,6 +14,13 @@ export default function getConfigProfilesFactory() { if (config.get('platform.enable')) { profiles.push('platform'); + + // Select which DAPI stack to enable via profiles + if (config.get('platform.dapi.deprecated.enabled')) { + profiles.push('platform-dapi-deprecated'); + } else { + profiles.push('platform-dapi-rs'); + } } return profiles; diff --git a/packages/dashmate/src/docker/DockerCompose.js b/packages/dashmate/src/docker/DockerCompose.js index 8a508008055..468764ae8fb 100644 --- a/packages/dashmate/src/docker/DockerCompose.js +++ b/packages/dashmate/src/docker/DockerCompose.js @@ -1,17 +1,17 @@ import { Observable } from 'rxjs'; -import isWsl from 'is-wsl'; import dockerCompose from '@dashevo/docker-compose'; +import isWsl from 'is-wsl'; import hasbin from 'hasbin'; -import semver from 'semver'; import util from 'node:util'; +import semver from 'semver'; import { PACKAGE_ROOT_DIR } from '../constants.js'; -import ServiceAlreadyRunningError from './errors/ServiceAlreadyRunningError.js'; +import ContainerIsNotPresentError from './errors/ContainerIsNotPresentError.js'; import DockerComposeError from './errors/DockerComposeError.js'; +import ServiceAlreadyRunningError from './errors/ServiceAlreadyRunningError.js'; import ServiceIsNotRunningError from './errors/ServiceIsNotRunningError.js'; -import ContainerIsNotPresentError from './errors/ContainerIsNotPresentError.js'; export default class DockerCompose { /** diff --git a/packages/dashmate/src/listr/tasks/resetNodeTaskFactory.js b/packages/dashmate/src/listr/tasks/resetNodeTaskFactory.js index 108a53047e2..506e601778d 100644 --- a/packages/dashmate/src/listr/tasks/resetNodeTaskFactory.js +++ b/packages/dashmate/src/listr/tasks/resetNodeTaskFactory.js @@ -24,6 +24,35 @@ export default function resetNodeTaskFactory( homeDir, generateEnvs, ) { + /** + * Remove path but ignore permission issues to avoid failing reset on root-owned directories. + * + * @param {string} targetPath + * @param {Object} [task] + */ + function removePathSafely(targetPath, task) { + try { + fs.rmSync(targetPath, { + recursive: true, + force: true, + }); + } catch (e) { + if (e?.code === 'EACCES' || e?.code === 'EPERM') { + const message = `Skipping removal of '${targetPath}' due to insufficient permissions`; + + if (task) { + // eslint-disable-next-line no-param-reassign + task.output = message; + } else if (process.env.DEBUG) { + // eslint-disable-next-line no-console + console.warn(message); + } + } else if (e?.code !== 'ENOENT') { + throw e; + } + } + } + /** * @typedef {resetNodeTask} * @param {Config} config @@ -126,21 +155,18 @@ export default function resetNodeTaskFactory( { title: `Remove config ${config.getName()}`, enabled: (ctx) => ctx.removeConfig, - task: () => { + task: (_, task) => { configFile.removeConfig(config.getName()); const serviceConfigsPath = homeDir.joinPath(config.getName()); - fs.rmSync(serviceConfigsPath, { - recursive: true, - force: true, - }); + removePathSafely(serviceConfigsPath, task); }, }, { title: `Reset config ${config.getName()}`, enabled: (ctx) => !ctx.removeConfig && ctx.isHardReset, - task: (ctx) => { + task: (ctx, task) => { const groupName = config.get('group'); const defaultConfigName = groupName || config.getName(); @@ -164,10 +190,7 @@ export default function resetNodeTaskFactory( serviceConfigsPath = path.join(serviceConfigsPath, 'platform'); } - fs.rmSync(serviceConfigsPath, { - recursive: true, - force: true, - }); + removePathSafely(serviceConfigsPath, task); } else { // Delete config if no base config configFile.removeConfig(config.getName()); @@ -175,10 +198,7 @@ export default function resetNodeTaskFactory( // Remove service configs const serviceConfigsPath = homeDir.joinPath(defaultConfigName); - fs.rmSync(serviceConfigsPath, { - recursive: true, - force: true, - }); + removePathSafely(serviceConfigsPath, task); } }, }, diff --git a/packages/dashmate/src/listr/tasks/setup/setupLocalPresetTaskFactory.js b/packages/dashmate/src/listr/tasks/setup/setupLocalPresetTaskFactory.js index fd6b5c66b25..eb1d5ebcf39 100644 --- a/packages/dashmate/src/listr/tasks/setup/setupLocalPresetTaskFactory.js +++ b/packages/dashmate/src/listr/tasks/setup/setupLocalPresetTaskFactory.js @@ -176,6 +176,7 @@ export default function setupLocalPresetTaskFactory( config.set('platform.drive.abci.grovedbVisualizer.port', config.get('platform.drive.abci.grovedbVisualizer.port') + (i * 100)); config.set('platform.drive.abci.tokioConsole.port', config.get('platform.drive.abci.tokioConsole.port') + (i * 100)); config.set('platform.drive.abci.metrics.port', config.get('platform.drive.abci.metrics.port') + (i * 100)); + config.set('platform.dapi.rsDapi.metrics.port', config.get('platform.dapi.rsDapi.metrics.port') + (i * 100)); config.set('platform.gateway.admin.port', config.get('platform.gateway.admin.port') + (i * 100)); config.set('platform.gateway.listeners.dapiAndDrive.port', config.get('platform.gateway.listeners.dapiAndDrive.port') + (i * 100)); config.set('platform.gateway.metrics.port', config.get('platform.gateway.metrics.port') + (i * 100)); diff --git a/packages/dashmate/src/listr/tasks/startNodeTaskFactory.js b/packages/dashmate/src/listr/tasks/startNodeTaskFactory.js index 06bff9e7edb..ed72b6bfaf6 100644 --- a/packages/dashmate/src/listr/tasks/startNodeTaskFactory.js +++ b/packages/dashmate/src/listr/tasks/startNodeTaskFactory.js @@ -1,4 +1,5 @@ import { Listr } from 'listr2'; +import path from 'path'; import { Observable } from 'rxjs'; import { NETWORK_LOCAL } from '../../constants.js'; import isServiceBuildRequired from '../../util/isServiceBuildRequired.js'; @@ -12,6 +13,7 @@ import isServiceBuildRequired from '../../util/isServiceBuildRequired.js'; * @param {buildServicesTask} buildServicesTask * @param {getConnectionHost} getConnectionHost * @param {ensureFileMountExists} ensureFileMountExists + * @param {HomeDir} homeDir * @return {startNodeTask} */ export default function startNodeTaskFactory( @@ -22,6 +24,7 @@ export default function startNodeTaskFactory( buildServicesTask, getConnectionHost, ensureFileMountExists, + homeDir, ) { /** * @typedef {startNodeTask} @@ -62,6 +65,18 @@ export default function startNodeTaskFactory( if (tenderdashLogFilePath !== null) { ensureFileMountExists(tenderdashLogFilePath, 0o666); } + + const configuredAccessLogPath = config.get('platform.dapi.rsDapi.logs.accessLogPath'); + const hasConfiguredAccessLogPath = typeof configuredAccessLogPath === 'string' + && configuredAccessLogPath.trim() !== ''; + + if (hasConfiguredAccessLogPath) { + const hostAccessLogPath = path.isAbsolute(configuredAccessLogPath) + ? configuredAccessLogPath + : path.resolve(homeDir.getPath(), configuredAccessLogPath); + + ensureFileMountExists(hostAccessLogPath, 0o666); + } } return new Listr([ diff --git a/packages/dashmate/src/test/constants/services.js b/packages/dashmate/src/test/constants/services.js index beb3d1d9aaf..d83e5b0a25f 100644 --- a/packages/dashmate/src/test/constants/services.js +++ b/packages/dashmate/src/test/constants/services.js @@ -1,9 +1,8 @@ export default { dashmate_helper: 'Dashmate Helper', gateway: 'Gateway', - dapi_api: 'DAPI API', + rs_dapi: 'DAPI', drive_tenderdash: 'Drive Tenderdash', drive_abci: 'Drive ABCI', - dapi_core_streams: 'DAPI Core Streams', core: 'Core', }; diff --git a/packages/dashmate/templates/platform/gateway/envoy.yaml.dot b/packages/dashmate/templates/platform/gateway/envoy.yaml.dot index a3bf8820ff7..e9ceb7953ec 100644 --- a/packages/dashmate/templates/platform/gateway/envoy.yaml.dot +++ b/packages/dashmate/templates/platform/gateway/envoy.yaml.dot @@ -1,3 +1,4 @@ +{{ useDeprecated = it.platform.dapi.deprecated && it.platform.dapi.deprecated.enabled; }} !ignore filters: &filters - name: envoy.http_connection_manager typed_config: @@ -17,7 +18,7 @@ # HTTP2 support multiple streams (requests) per connection. # For HTTP1 it applies for single request. # This param is overwritten in specific routes. - max_stream_duration: 15s + max_stream_duration: 60s # Reject malformed requests with headers containing underscores. headers_with_underscores_action: REJECT_REQUEST # HTTP2 specific settings @@ -111,6 +112,7 @@ - name: http_services domains: [ "*" ] routes: + {{? useDeprecated }} # DAPI core streaming endpoints - match: prefix: "/org.dash.platform.dapi.v0.Core/subscribeTo" @@ -194,6 +196,82 @@ cluster: dapi_json_rpc # Upstream response timeout timeout: 10s + {{??}} + # Core streaming endpoints + - match: + prefix: "/org.dash.platform.dapi.v0.Core/subscribeTo" + route: + cluster: rs_dapi + idle_timeout: 300s + # Upstream response timeout + timeout: 601s + max_stream_duration: + # Entire stream/request timeout + max_stream_duration: 601s + grpc_timeout_header_max: 600s + # Core endpoints + - match: + prefix: "/org.dash.platform.dapi.v0.Core" + route: + cluster: rs_dapi + # Upstream response timeout + timeout: 15s + # rs-dapi subscribePlatformEvents endpoint with bigger timeout (now exposed directly) + - match: + path: "/org.dash.platform.dapi.v0.Platform/subscribePlatformEvents" + route: + cluster: rs_dapi + idle_timeout: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + # Upstream response timeout + timeout: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + max_stream_duration: + # Entire stream/request timeout + max_stream_duration: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + grpc_timeout_header_max: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + # rs-dapi waitForStateTransitionResult endpoint with bigger timeout (now exposed directly) + - match: + path: "/org.dash.platform.dapi.v0.Platform/waitForStateTransitionResult" + route: + cluster: rs_dapi + idle_timeout: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + # Upstream response timeout + timeout: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + max_stream_duration: + # Entire stream/request timeout + max_stream_duration: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + grpc_timeout_header_max: {{= it.platform.gateway.listeners.dapiAndDrive.waitForStResultTimeout }} + # rs-dapi Platform endpoints (now exposed directly) + - match: + prefix: "/org.dash.platform.dapi.v0.Platform" + route: + cluster: rs_dapi + # Upstream response timeout + timeout: 60s + # Static responses of unsupported api versions + # core static response + - match: + safe_regex: + regex: "\/org\\.dash\\.platform\\.dapi\\.v[1-9]+\\." + response_headers_to_add: + - header: + key: "Content-Type" + value: "application/grpc-web+proto" + - header: + key: "grpc-status" + value: "12" + - header: + key: "grpc-message" + value: "Specified service version is not supported" + direct_response: + status: 204 + # JSON RPC endpoint + - match: + path: "/" + route: + cluster: rs_dapi_json_rpc + # Upstream response timeout + timeout: 10s + {{?}} {{? it.platform.gateway.rateLimiter.enabled }} rate_limits: - actions: @@ -207,7 +285,7 @@ allow_methods: GET, PUT, DELETE, POST, OPTIONS allow_headers: keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,custom-header-1,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout max_age: "1728000" - expose_headers: custom-header-1,grpc-status,grpc-message + expose_headers: custom-header-1,grpc-status,grpc-message,code,drive-error-data-bin,dash-serialized-consensus-error-bin,stack-bin static_resources: listeners: @@ -286,6 +364,37 @@ static_resources: "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router {{?}} clusters: + - name: rs_dapi + type: STRICT_DNS + per_connection_buffer_limit_bytes: 32768 # 32 KiB + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: + initial_stream_window_size: 65536 # 64 KiB + initial_connection_window_size: 1048576 # 1 MiB + circuit_breakers: + thresholds: + - priority: DEFAULT + # The maximum number of parallel requests + max_requests: {{= it.platform.gateway.upstreams.dapiApi.maxRequests }} + upstream_connection_options: + tcp_keepalive: + keepalive_probes: 3 + keepalive_time: 25 # 25 seconds + keepalive_interval: 20 # 20 seconds + load_assignment: + cluster_name: rs_dapi + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: rs_dapi + port_value: 3010 + + # Depcreated DAPI - name: dapi_api type: STRICT_DNS per_connection_buffer_limit_bytes: 32768 # 32 KiB @@ -352,6 +461,28 @@ static_resources: socket_address: address: dapi_api port_value: 3004 + + # rs-dapi JSON-RPC cluster + - name: rs_dapi_json_rpc + type: STRICT_DNS + per_connection_buffer_limit_bytes: 32768 # 32 KiB + circuit_breakers: + thresholds: + - priority: DEFAULT + # The maximum number of parallel connections + max_connections: {{= it.platform.gateway.upstreams.dapiJsonRpc.maxRequests }} + # The maximum number of parallel requests + max_requests: {{= it.platform.gateway.upstreams.dapiJsonRpc.maxRequests }} + load_assignment: + cluster_name: rs_dapi_json_rpc + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: rs_dapi + port_value: 3009 + - name: drive_grpc type: STRICT_DNS per_connection_buffer_limit_bytes: 32768 # 32 KiB diff --git a/packages/dashmate/test/e2e/localNetwork.spec.js b/packages/dashmate/test/e2e/localNetwork.spec.js index ab44a55991d..157debb5dd0 100644 --- a/packages/dashmate/test/e2e/localNetwork.spec.js +++ b/packages/dashmate/test/e2e/localNetwork.spec.js @@ -46,6 +46,7 @@ describe('Local Network', function main() { localConfig.set('dashmate.helper.docker.build.enabled', true); localConfig.set('platform.drive.abci.docker.build.enabled', true); localConfig.set('platform.dapi.api.docker.build.enabled', true); + localConfig.set('platform.dapi.rsDapi.docker.build.enabled', true); } localConfig.set('docker.network.subnet', '172.30.0.0/24'); diff --git a/packages/dashmate/test/e2e/testnetEvonode.spec.js b/packages/dashmate/test/e2e/testnetEvonode.spec.js index 5ffd9e3f94e..29aea34b1a2 100644 --- a/packages/dashmate/test/e2e/testnetEvonode.spec.js +++ b/packages/dashmate/test/e2e/testnetEvonode.spec.js @@ -88,6 +88,7 @@ describe('Testnet Evonode', function main() { config.set('dashmate.helper.docker.build.enabled', true); config.set('platform.drive.abci.docker.build.enabled', true); config.set('platform.dapi.api.docker.build.enabled', true); + config.set('platform.dapi.rsDapi.docker.build.enabled', true); } config.set('docker.network.subnet', '172.27.24.0/24'); diff --git a/packages/dashmate/test/e2e/testnetFullnode.spec.js b/packages/dashmate/test/e2e/testnetFullnode.spec.js index 01d36b61dbd..5ac3493429e 100644 --- a/packages/dashmate/test/e2e/testnetFullnode.spec.js +++ b/packages/dashmate/test/e2e/testnetFullnode.spec.js @@ -85,6 +85,7 @@ describe('Testnet Fullnode', function main() { config.set('dashmate.helper.docker.build.enabled', true); config.set('platform.drive.abci.docker.build.enabled', true); config.set('platform.dapi.api.docker.build.enabled', true); + config.set('platform.dapi.rsDapi.docker.build.enabled', true); } config.set('docker.network.subnet', '172.27.24.0/24'); diff --git a/packages/dashpay-contract/Cargo.toml b/packages/dashpay-contract/Cargo.toml index 012061d31d1..4c730d21c49 100644 --- a/packages/dashpay-contract/Cargo.toml +++ b/packages/dashpay-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dashpay-contract" description = "DashPay data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/dashpay-contract/package.json b/packages/dashpay-contract/package.json index 3e0914ee2a2..e11a65e1025 100644 --- a/packages/dashpay-contract/package.json +++ b/packages/dashpay-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/dashpay-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Reference contract of the DashPay DPA on Dash Evolution", "scripts": { "lint": "eslint .", diff --git a/packages/data-contracts/Cargo.toml b/packages/data-contracts/Cargo.toml index 7806a9263b6..fdc02da697b 100644 --- a/packages/data-contracts/Cargo.toml +++ b/packages/data-contracts/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "data-contracts" description = "Dash Platform system data contracts" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/dpns-contract/Cargo.toml b/packages/dpns-contract/Cargo.toml index 995744897c1..95a2e14dd7c 100644 --- a/packages/dpns-contract/Cargo.toml +++ b/packages/dpns-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dpns-contract" description = "DPNS data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/dpns-contract/package.json b/packages/dpns-contract/package.json index 202c34759d4..abd4175c255 100644 --- a/packages/dpns-contract/package.json +++ b/packages/dpns-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/dpns-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "A contract and helper scripts for DPNS DApp", "scripts": { "lint": "eslint .", diff --git a/packages/feature-flags-contract/Cargo.toml b/packages/feature-flags-contract/Cargo.toml index 831841581d7..f07ca24c9a2 100644 --- a/packages/feature-flags-contract/Cargo.toml +++ b/packages/feature-flags-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "feature-flags-contract" description = "Feature flags data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/feature-flags-contract/package.json b/packages/feature-flags-contract/package.json index 22c10afb708..4dcb3c0d37e 100644 --- a/packages/feature-flags-contract/package.json +++ b/packages/feature-flags-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/feature-flags-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Data Contract to store Dash Platform feature flags", "scripts": { "build": "", diff --git a/packages/js-dapi-client/lib/transport/ReconnectableStream.js b/packages/js-dapi-client/lib/transport/ReconnectableStream.js index af3a22c8fb8..ae742c7d533 100644 --- a/packages/js-dapi-client/lib/transport/ReconnectableStream.js +++ b/packages/js-dapi-client/lib/transport/ReconnectableStream.js @@ -56,7 +56,7 @@ class ReconnectableStream extends EventEmitter { const opts = { ...defaultOptions, ...options }; - this.logger = opts.logger || { debug: () => {} }; + this.logger = opts.logger || { debug: () => { } }; /** * Auto-reconnect interval in millisecond @@ -298,6 +298,7 @@ class ReconnectableStream extends EventEmitter { cancel() { // eslint-disable-next-line no-unused-expressions this.logger.debug('[ReconnectableStream] Canceling streams'); + this.stopAutoReconnect(); // Hack for browsers to properly unsubscribe from ERROR event. // (It will continue propagating despite of calling cancel) diff --git a/packages/js-dapi-client/package.json b/packages/js-dapi-client/package.json index d33b5f4de78..7bdd90b9bed 100644 --- a/packages/js-dapi-client/package.json +++ b/packages/js-dapi-client/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/dapi-client", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Client library used to access Dash DAPI endpoints", "main": "lib/index.js", "contributors": [ diff --git a/packages/js-dash-sdk/package.json b/packages/js-dash-sdk/package.json index f0067badf88..fc68416710c 100644 --- a/packages/js-dash-sdk/package.json +++ b/packages/js-dash-sdk/package.json @@ -1,6 +1,6 @@ { "name": "dash", - "version": "5.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Dash library for JavaScript/TypeScript ecosystem (Wallet, DAPI, Primitives, BLS, ...)", "main": "build/index.js", "unpkg": "dist/dash.min.js", diff --git a/packages/js-evo-sdk/package.json b/packages/js-evo-sdk/package.json index 025cd787e83..b2333ba3369 100644 --- a/packages/js-evo-sdk/package.json +++ b/packages/js-evo-sdk/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/evo-sdk", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "type": "module", "main": "./dist/evo-sdk.module.js", "types": "./dist/sdk.d.ts", diff --git a/packages/js-grpc-common/package.json b/packages/js-grpc-common/package.json index f3184aeebf9..500d11dac5c 100644 --- a/packages/js-grpc-common/package.json +++ b/packages/js-grpc-common/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/grpc-common", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Common GRPC library", "main": "index.js", "scripts": { diff --git a/packages/keyword-search-contract/Cargo.toml b/packages/keyword-search-contract/Cargo.toml index d8c49f95aef..1a64ac3f3d7 100644 --- a/packages/keyword-search-contract/Cargo.toml +++ b/packages/keyword-search-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "keyword-search-contract" description = "Search data contract schema and tools. Keyword Search contract is used to find other contracts and tokens" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/keyword-search-contract/package.json b/packages/keyword-search-contract/package.json index 48bb4ef75be..832358abe85 100644 --- a/packages/keyword-search-contract/package.json +++ b/packages/keyword-search-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/keyword-search-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "A contract that allows searching for contracts", "scripts": { "lint": "eslint .", diff --git a/packages/masternode-reward-shares-contract/Cargo.toml b/packages/masternode-reward-shares-contract/Cargo.toml index a55f7ee77a4..f1d30483e62 100644 --- a/packages/masternode-reward-shares-contract/Cargo.toml +++ b/packages/masternode-reward-shares-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "masternode-reward-shares-contract" description = "Masternode reward shares data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/masternode-reward-shares-contract/package.json b/packages/masternode-reward-shares-contract/package.json index 018d4ddf393..8d334c9f140 100644 --- a/packages/masternode-reward-shares-contract/package.json +++ b/packages/masternode-reward-shares-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/masternode-reward-shares-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "A contract and helper scripts for reward sharing", "scripts": { "lint": "eslint .", diff --git a/packages/platform-test-suite/package.json b/packages/platform-test-suite/package.json index 4644cf57d26..deea14862a8 100644 --- a/packages/platform-test-suite/package.json +++ b/packages/platform-test-suite/package.json @@ -1,7 +1,7 @@ { "name": "@dashevo/platform-test-suite", "private": true, - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Dash Network end-to-end tests", "scripts": { "test": "yarn exec bin/test.sh", diff --git a/packages/rs-context-provider/Cargo.toml b/packages/rs-context-provider/Cargo.toml index 69a74fe5880..8bc2c7420ca 100644 --- a/packages/rs-context-provider/Cargo.toml +++ b/packages/rs-context-provider/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dash-context-provider" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" authors = ["sam@dash.org"] license = "MIT" @@ -8,11 +8,13 @@ description = "Context provider traits for Dash Platform SDK" [dependencies] dpp = { path = "../rs-dpp", default-features = false } -drive = { path = "../rs-drive", default-features = false, features = ["verify"] } +drive = { path = "../rs-drive", default-features = false, features = [ + "verify", +] } thiserror = "1.0" hex = { version = "0.4", optional = true } serde = { version = "1.0", optional = true } serde_json = { version = "1.0", optional = true } [features] -mocks = ["hex", "serde", "serde_json", "dpp/data-contract-serde-conversion"] \ No newline at end of file +mocks = ["hex", "serde", "serde_json", "dpp/data-contract-serde-conversion"] diff --git a/packages/rs-dapi-client/Cargo.toml b/packages/rs-dapi-client/Cargo.toml index 686ff13a9ab..31229f085e7 100644 --- a/packages/rs-dapi-client/Cargo.toml +++ b/packages/rs-dapi-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rs-dapi-client" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" [features] diff --git a/packages/rs-dapi-client/src/lib.rs b/packages/rs-dapi-client/src/lib.rs index c54e4d3f65b..00a272b0a72 100644 --- a/packages/rs-dapi-client/src/lib.rs +++ b/packages/rs-dapi-client/src/lib.rs @@ -35,7 +35,10 @@ pub use request_settings::RequestSettings; /// A DAPI request could be executed with an initialized [DapiClient]. /// /// # Examples +/// Requires the `mocks` feature. /// ``` +/// # #[cfg(feature = "mocks")] +/// # { /// use rs_dapi_client::{RequestSettings, AddressList, mock::MockDapiClient, DapiClientError, DapiRequest, ExecutionError}; /// use dapi_grpc::platform::v0::{self as proto}; /// @@ -45,6 +48,7 @@ pub use request_settings::RequestSettings; /// let response = request.execute(&mut client, RequestSettings::default()).await?; /// # Ok::<(), ExecutionError>(()) /// # }; +/// # } /// ``` pub trait DapiRequest { /// Response from DAPI for this specific request. diff --git a/packages/rs-dapi-grpc-macros/Cargo.toml b/packages/rs-dapi-grpc-macros/Cargo.toml index cf1de0906a0..0694a0be2f5 100644 --- a/packages/rs-dapi-grpc-macros/Cargo.toml +++ b/packages/rs-dapi-grpc-macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "dapi-grpc-macros" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" description = "Macros used by dapi-grpc. Internal use only." diff --git a/packages/rs-dapi/.env.example b/packages/rs-dapi/.env.example new file mode 100644 index 00000000000..10ceaf03e0c --- /dev/null +++ b/packages/rs-dapi/.env.example @@ -0,0 +1,45 @@ +# rs-dapi Configuration Example +# Copy this file to .env and modify as needed + +# Server Configuration +# Unified gRPC server port (serves Core, Platform, and Streaming services) +DAPI_GRPC_SERVER_PORT=3005 +# JSON-RPC API server port +DAPI_JSON_RPC_PORT=3004 +# Metrics and health endpoints port (set to 0 to disable) +DAPI_METRICS_PORT=9090 +# IP address to bind all servers to +DAPI_BIND_ADDRESS=127.0.0.1 + +# External Service Configuration +# Drive service URI (Dash Platform storage layer) +DAPI_DRIVE_URI=http://127.0.0.1:6000 +# Tenderdash consensus service URI (HTTP RPC) +DAPI_TENDERDASH_URI=http://127.0.0.1:26657 +# Tenderdash WebSocket URI for real-time events +DAPI_TENDERDASH_WEBSOCKET_URI=ws://127.0.0.1:26657/websocket +# Dash Core ZMQ URL for blockchain events +DAPI_CORE_ZMQ_URL=tcp://127.0.0.1:29998 +# Dash Core JSON-RPC endpoint (hosted by dashd) +DAPI_CORE_RPC_URL=http://127.0.0.1:9998 +# Dash Core JSON-RPC credentials +DAPI_CORE_RPC_USER= +DAPI_CORE_RPC_PASS= + +# Cache Configuration (bytes) +DAPI_PLATFORM_CACHE_BYTES=2097152 +DAPI_CORE_CACHE_BYTES=67108864 + +# Timeout Configuration (in milliseconds) +# Timeout for waiting for state transition results +DAPI_STATE_TRANSITION_WAIT_TIMEOUT=30000 + +# Logging Configuration +# Main application log level (error, warn, info, debug, trace) +DAPI_LOGGING_LEVEL=info +# Enable JSON structured logging format +DAPI_LOGGING_JSON_FORMAT=false +# Access log file path (set to enable access logging, leave empty or unset to disable) +DAPI_LOGGING_ACCESS_LOG_PATH= +# Access log format ('combined' or 'json') +DAPI_LOGGING_ACCESS_LOG_FORMAT=combined diff --git a/packages/rs-dapi/Cargo.toml b/packages/rs-dapi/Cargo.toml new file mode 100644 index 00000000000..96ac8d12e5f --- /dev/null +++ b/packages/rs-dapi/Cargo.toml @@ -0,0 +1,102 @@ +[package] +name = "rs-dapi" +version = "2.1.0-pr.2716.1" +edition = "2024" + +[[bin]] +name = "rs-dapi" +path = "src/main.rs" + +[dependencies] +# Async runtime +tokio = { version = "1.47.0", features = ["full"] } +tokio-stream = "0.1" +futures = "0.3.31" +tokio-util = "0.7.15" + +# gRPC framework +tonic = "0.14.2" + +# HTTP framework for JSON-RPC and metrics endpoints +axum = "0.8.4" +tower = "0.5.2" +tower-http = { version = "0.6.6", features = ["cors", "trace"] } + +# Serialization +serde = { version = "1.0.219", features = ["derive"] } +serde_json = "1.0.141" +ciborium = "0.2" +serde_bytes = "0.11" +rmp-serde = "1.3.0" + +# Configuration +envy = "0.4.2" + +clap = { version = "4.4.10", features = ["derive"] } +dotenvy = { version = "0.15.7" } +# Logging +tracing = "0.1.41" +tracing-subscriber = { version = "0.3.19", features = ["env-filter", "json"] } + +# Error handling +thiserror = "2.0.12" + +# Time handling +chrono = { version = "0.4.41", features = ["serde"] } + +# HTTP client for external API calls +reqwest = { version = "0.12.23", features = ["json"] } +reqwest-middleware = { version = "0.4.2", features = ["json"] } +reqwest-tracing = "0.5.8" + +# Hex encoding/decoding +hex = "0.4" + +# Base64 encoding/decoding +base64 = "0.22" + +# Cryptographic hashing +sha2 = "0.10" + +# Async traits +async-trait = "0.1" + +# WebSocket support for Tenderdash events +tokio-tungstenite = { version = "0.21", features = ["native-tls"] } +url = "2.5" + +# ZMQ for real-time blockchain events +# zeromq = { version = "0.4.1", default-features = false, features = [ +# "tokio-runtime", +# "tcp-transport", +# ] } +# Use fork of zmq.rs to receive Disconnect events, see https://github.com/zeromq/zmq.rs/pull/209 +zeromq = { git = "https://github.com/gvz/zmq.rs", rev = "b0787de310befaedd1f762e3b9bc711612d8137f", features = [ + "tokio-runtime", + "tcp-transport", +], default-features = false } + +xxhash-rust = { version = "0.8.15", features = ["xxh3"] } +# Dash Platform dependencies (using workspace versions) +dpp = { path = "../rs-dpp", default-features = false } +dapi-grpc = { path = "../dapi-grpc", features = ["server", "client", "serde"] } +quick_cache = "0.6.16" +prometheus = "0.14" +once_cell = "1.19" + +# Dash Core RPC client +dashcore-rpc = { git = "https://github.com/dashpay/rust-dashcore", rev = "e44b1fb2086ad57c8884995f9f93f14de91bf964" } +dash-spv = { git = "https://github.com/dashpay/rust-dashcore", rev = "e44b1fb2086ad57c8884995f9f93f14de91bf964" } + +rs-dash-event-bus = { path = "../rs-dash-event-bus" } + +zeroize = "1.8" + + +[dev-dependencies] +tempfile = "3.13.0" +serial_test = "3.1.1" +test-case = "3.3.1" + +[package.metadata.cargo-machete] +ignored = ["rs-dash-event-bus"] diff --git a/packages/rs-dapi/README.md b/packages/rs-dapi/README.md new file mode 100644 index 00000000000..71e6a708bc6 --- /dev/null +++ b/packages/rs-dapi/README.md @@ -0,0 +1,36 @@ +DAPI (Distributed API) server for Dash Platform + +Provides gRPC and JSON-RPC endpoints for blockchain and platform data. + +CONFIGURATION: +Server configuration is based on environment variables that can be set in the +environment or saved in a .env file. Use 'rs-dapi config' to see current values. + +ENVIRONMENT VARIABLES: +Server Configuration: + DAPI_GRPC_SERVER_PORT - gRPC API server port (default: 3005) + DAPI_JSON_RPC_PORT - JSON-RPC server port (default: 3004) + DAPI_METRICS_PORT - Metrics server port (health + Prometheus, default: 9090, set to 0 to disable) + DAPI_BIND_ADDRESS - IP address to bind to (default: 127.0.0.1) + +Service Configuration: + DAPI_DRIVE_URI - Drive service URI (default: http://127.0.0.1:6000) + DAPI_TENDERDASH_URI - Tenderdash RPC URI (default: http://127.0.0.1:26657) + DAPI_TENDERDASH_WEBSOCKET_URI - Tenderdash WebSocket URI (default: ws://127.0.0.1:26657/websocket) + DAPI_CORE_ZMQ_URL - Dash Core ZMQ URL (default: tcp://127.0.0.1:29998) + DAPI_CORE_RPC_URL - Dash Core JSON-RPC URL (default: http://127.0.0.1:9998) + DAPI_CORE_RPC_USER - Dash Core RPC username (default: empty) + DAPI_CORE_RPC_PASS - Dash Core RPC password (default: empty) + DAPI_STATE_TRANSITION_WAIT_TIMEOUT - Timeout in ms (default: 30000) + +CONFIGURATION LOADING: +1. Command line environment variables (highest priority) +2. .env file variables (specified with --config or .env in current directory) +3. Default values (lowest priority) + +EXAMPLES: + rs-dapi # Start with defaults + rs-dapi --config /etc/dapi/production.env # Use custom config + rs-dapi -vv start # Start with trace logging + rs-dapi config # Show current configuration + rs-dapi --help # Show this help diff --git a/packages/rs-dapi/TODO.md b/packages/rs-dapi/TODO.md new file mode 100644 index 00000000000..6fa71aa9ebf --- /dev/null +++ b/packages/rs-dapi/TODO.md @@ -0,0 +1,129 @@ +# rs-dapi TODO & Migration Tracker + +This tracker lists remaining work to reach and exceed parity with the legacy JS `packages/dapi` implementation. Items are grouped by area and priority. File paths are included to anchor implementation work. + +Legend: +- P0: Required for parity/MVP +- P1: Important for production completeness +- P2: Nice-to-have/cleanup + +## P0 — Core gRPC (Layer 1) Parity + +- [x] Implement Dash Core RPC client (dashcore-rpc) + - Files: `src/clients/core_client.rs` (new), `src/config/mod.rs` (Core RPC URL/user/pass) +- - Implemented so far: `getblockcount`, `getrawtransaction(info)`, `sendrawtransaction` +- [x] Wire Core service methods in `src/services/core_service.rs` + - [x] `get_best_block_height` + - [x] `get_block` + - [x] `get_transaction` + - [x] `broadcast_transaction` + - [x] `get_blockchain_status` + - [x] `get_masternode_status` + - [x] `get_estimated_transaction_fee` +- [x] Map and standardize error handling to match JS behavior + - Files: `src/services/core_service.rs`, `src/error.rs` +- [x] Cache immutable Core responses with LRU (invalidate on new block) + - Files: `src/clients/core_client.rs`, `src/cache.rs`, `src/services/streaming_service/mod.rs`, `src/server.rs` + - Methods cached inside CoreClient: `get_block_bytes_by_hash(_hex)`; invalidated on ZMQ `hashblock` + +## P0 — Platform gRPC (Layer 2) Essentials + +- [x] Ensure full Drive-proxy coverage via `drive_method!` in `src/services/platform_service/mod.rs` + - Cross-check with `packages/dapi-grpc/protos/platform/v0/platform.proto` +- [x] Add caching for `getStatus` with 3-minute TTL and invalidate on new block + - Files: `src/services/platform_service/get_status.rs`, use ZMQ block notifications or Tenderdash events to invalidate +- [x] Finalize error mapping consistency between `broadcastStateTransition` and `waitForStateTransitionResult` + - Files: `src/services/platform_service/broadcast_state_transition.rs`, `src/services/platform_service/wait_for_state_transition_result.rs` + - Align codes/messages with Drive error codes and JS behavior +- [x] Configure gRPC transport robustness (sizes/compression) + - Increase max inbound message size for large proofs/doc queries + - Enable compression (e.g., gzip) for client/server + - Files: `src/clients/drive_client.rs` (client channel), `src/server.rs` (tonic Server builder) + +## P0 — Streaming MVP (ZMQ → gRPC) + +- [x] Remove panic on ZMQ startup; add retry/backoff and health reporting + - Files: `src/services/streaming_service/mod.rs` +- [x] Implement historical streaming for `subscribeToBlockHeadersWithChainLocks` + - Files: `src/services/streaming_service/block_header_stream.rs` + - Notes: For `count > 0`, stream historical headers (80-byte headers) from Core RPC in chunks and close stream. For `count = 0`, forward live ZMQ Core blocks/chainlocks. +- [x] Implement historical queries for `subscribeToTransactionsWithProofs` + - Files: `src/services/streaming_service/transaction_stream.rs` + - Notes: For `count > 0`, fetch blocks from given height/hash, filter transactions via bloom, stream `RawTransactions` plus a block boundary (`RawMerkleBlock` placeholder using raw block), then close. For `count = 0`, optionally backfill to tip then subscribe to live ZMQ. +- [x] Implement basic bloom filter matching + transaction parsing +- [x] Provide initial masternode list diff on subscription + - Files: `src/services/streaming_service/masternode_list_stream.rs` + +## P0 — Protocol Translation Minimums (Parity with JS DAPI) + +- [x] JSON-RPC: legacy parity endpoints + - [x] `getBestBlockHash` + - [x] `getBlockHash` + - Files: `src/protocol/jsonrpc_translator.rs`, `src/server.rs` (dispatch) + - Notes: Translator implemented with tests; server dispatch returns hex strings + +## P2 — Protocol Translation (Non-legacy extras) + +- [x] JSON-RPC extension: `sendRawTransaction` (not in JS DAPI docs) + - Files: `src/protocol/jsonrpc_translator.rs`, `src/server.rs` + - Accepts `hex[, allowHighFees, bypassLimits]`; returns txid string +- [x] JSON-RPC extension: Platform `getStatus` (not in JS DAPI docs) + - Files: `src/protocol/jsonrpc_translator.rs`, `src/server.rs` + +## P1 — Observability & Ops + +- [x] gRPC access logging (interceptor) to align with HTTP access logs + - Files: `src/logging/middleware.rs`, gRPC server builder wiring +- [ ] Prometheus metrics: request counts, latency, errors, subscriber counts + - Files: `src/server.rs` (`/metrics`), metrics crate integration +- [x] Health check validates upstreams (Drive, Tenderdash RPC, Core RPC) via `/health` + - Files: `src/server/metrics.rs`, `src/services/platform_service/get_status.rs` + +## P1 — Deployment + +- [ ] Complete `Dockerfile`, `docker-compose.yml`, and `DOCKER.md` + - Files: `packages/rs-dapi/docker-compose.yml`, `packages/rs-dapi/DOCKER.md` +- [ ] Provide Envoy/Dashmate integration examples (listeners/clusters/routes) + - Files: `docs/` or `packages/rs-dapi/doc/` + +## P1 — Testing + +- [ ] Unit tests for Core and Platform handlers (success + error mapping) +- [ ] Integration tests for Platform broadcast + wait (with/without proofs) +- [ ] Streaming tests: bloom filtering, proofs, subscription lifecycle +- [ ] Protocol translation tests (JSON-RPC ↔ gRPC round-trips) + - Progress: JSON-RPC translator unit tests added in `src/protocol/jsonrpc_translator.rs` +- [ ] CI workflow to build, test, and lint +- [ ] Drive-proxy smoke tests for all `drive_method!` endpoints + - Spin up a minimal tonic Platform test server to capture requests and return canned responses + - Verify passthrough of request/response and metadata; assert cache path hit/miss +- [ ] Proto drift guard (parity check) + - Add a unit/CI check that enumerates Platform proto RPCs and ensures corresponding service methods exist + - Fail CI if new RPCs are added to proto without service wiring + +## P2 — Documentation + +- [ ] Expand README with endpoint matrix and examples + - Files: `packages/rs-dapi/README.md` + - Files: `packages/rs-dapi/doc/` (spec + generation notes) +- [ ] Migration guide from JS dapi to rs-dapi, JSON-RPC deprecation scope + +## P2 — Cleanup & Consistency + +- [ ] Unify error types (`src/error.rs` vs `src/errors/mod.rs`) into one `DapiError` +- [ ] Refactor error conversions: remove `impl From> for DapiError` and map external errors explicitly to `DapiError` across the codebase + - Files: `src/error.rs`, `src/clients/*`, `src/services/*`, `src/server.rs` +- [ ] Remove remaining `TODO` placeholders or convert into tracked tasks here +- [ ] Harden Tenderdash WebSocket reconnection/backoff +- [ ] Consistent config naming and documentation, align with Dashmate + +--- + +Quick References: +- Core service: `src/services/core_service.rs:1` +- Platform service: `src/services/platform_service/mod.rs:1` +- Streaming service: `src/services/streaming_service/mod.rs:1` +- Protocol translation: `src/protocol/*.rs` +- Server: `src/server.rs:1` +- Config: `src/config/mod.rs:1` +- Clients: `src/clients/*` diff --git a/packages/rs-dapi/doc/DESIGN.md b/packages/rs-dapi/doc/DESIGN.md new file mode 100644 index 00000000000..70a33af7aec --- /dev/null +++ b/packages/rs-dapi/doc/DESIGN.md @@ -0,0 +1,744 @@ +# rs-dapi Technical Design Document + +## Overview + +rs-dapi is a Rust implementation of the Dash Decentralized API (DAPI) that serves as a drop-in replacement for the existing JavaScript DAPI implementation. It provides gRPC and JSON-RPC endpoints for accessing both Dash Core (Layer 1) and Dash Platform (Layer 2) functionality through the masternode network. + +rs-dapi operates behind Envoy as a reverse proxy gateway, which handles SSL termination, external security, protocol translation, and request routing. This architecture allows rs-dapi to focus on business logic while Envoy manages all external security concerns. + +## Architecture + +### High-Level Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ External Clients │ +│ (Web browsers, mobile apps, CLI tools) │ +└─────────────────────────┬───────────────────────────────────┘ + │ HTTPS/WSS/gRPC-Web + │ (SSL termination & security) +┌─────────────────────────┼───────────────────────────────────┐ +│ │ │ +│ Envoy Gateway │ +│ (Managed by Dashmate) │ +│ │ +│ • SSL/TLS termination • Load balancing │ +│ • Protocol translation • Rate limiting │ +│ • Authentication/authorization • Request routing │ +│ • CORS handling • Health checking │ +└─────────────────────────┬───────────────────────────────────┘ + │ HTTP/gRPC/WebSocket + │ (Internal network, trusted) +┌─────────────────────────┼───────────────────────────────────┐ +│ │ │ +│ rs-dapi │ +│ (Single Binary Process) │ +│ │ +│ ┌─────────────────────────────────────────────────────┐ │ +│ │ │ │ +│ │ Unified Server │ │ +│ │ │ │ +│ │ ┌─────────────┐ ┌─────────────────────────────┐ │ │ +│ │ │ │ │ │ │ │ +│ │ │ API Handler │ │ Streams Handler │ │ │ +│ │ │ │ │ │ │ │ +│ │ │ - Core gRPC │ │ - Block streaming │ │ │ +│ │ │ - Platform │ │ - TX streaming │ │ │ +│ │ │ - JSON-RPC │ │ - Masternode list streaming │ │ │ +│ │ │ │ │ │ │ │ +│ │ └─────────────┘ └─────────────────────────────┘ │ │ +│ │ │ │ +│ └─────────────────────────────────────────────────────┘ │ +│ │ │ +└─────────────────────────┼──────────────────────────────────┘ + │ + ┌─────────────────────┼─────────────────┐ + │ │ │ + │ External Services │ + │ │ + │ ┌──────────┐ ┌──────────────┐ │ + │ │ Dash │ │ Tenderdash/ │ │ + │ │ Core │ │ Drive │ │ + │ │ │ │ │ │ + │ │ RPC+ZMQ │ │ gRPC+WS+RPC │ │ + │ └──────────┘ └──────────────┘ │ + └───────────────────────────────────────┘ +``` + +## Core Components + +### 1. Project Structure + +``` +packages/rs-dapi/ +├── Cargo.toml +├── src/ +│ ├── main.rs # Entry point and server initialization +│ ├── lib.rs # Library exports +│ ├── server.rs # Unified server implementation +│ ├── config/ # Configuration management +│ │ ├── mod.rs +│ │ └── settings.rs +│ ├── protocol/ # Protocol translation layer +│ │ ├── mod.rs +│ │ ├── grpc_native.rs # Native gRPC protocol handler +│ │ └── jsonrpc_translator.rs # JSON-RPC to gRPC translation +│ ├── services/ # gRPC service implementations (protocol-agnostic) +│ │ ├── mod.rs +│ │ ├── core_service.rs # Core blockchain endpoints +│ │ ├── platform_service.rs # Platform endpoints (main service implementation) +│ │ ├── platform_service/ # Modular complex method implementations +│ │ │ ├── get_status.rs # Complex get_status implementation with status building +│ │ │ └── subscribe_platform_events.rs # Proxy for multiplexed Platform events +│ │ └── streams_service.rs # Streaming endpoints +│ ├── server/ # Network servers and monitoring endpoints +│ │ ├── mod.rs +│ │ ├── grpc.rs # Unified gRPC server +│ │ ├── jsonrpc.rs # JSON-RPC server bridge +│ │ └── metrics.rs # Metrics + health HTTP endpoints (/health, /metrics) +│ ├── clients/ # External API clients +│ │ ├── mod.rs +│ │ ├── dashcore.rs # Dash Core RPC + ZMQ +│ │ ├── drive.rs # Drive gRPC client +│ │ └── tenderdash.rs # Tenderdash RPC + WebSocket +│ ├── handlers/ # Business logic handlers (protocol-agnostic) +│ │ ├── mod.rs +│ │ ├── core_handlers.rs # Core endpoint logic +│ │ ├── platform_handlers.rs # Platform endpoint logic +│ │ └── stream_handlers.rs # Streaming logic +│ ├── utils/ # Shared utilities +│ │ ├── mod.rs +│ │ ├── validation.rs # Input validation +│ │ ├── hash.rs # Hash utilities +│ │ └── bloom_filter.rs # Bloom filter implementation +│ ├── errors/ # Error types and handling +│ │ ├── mod.rs +│ │ └── grpc_errors.rs # gRPC error mapping +│ └── jsonrpc/ # JSON-RPC server (deprecated, uses translation layer) +│ ├── mod.rs +│ └── server.rs +├── proto/ # Generated protobuf code (if needed) +├── tests/ # Integration tests +└── doc/ # Documentation + └── DESIGN.md # This document +``` + +### 2. Modular Service Architecture + +rs-dapi implements a modular service architecture that separates simple proxy operations from complex business logic: + +#### Architecture Principles +- **Separation of Concerns**: Complex methods are isolated in dedicated modules +- **Context Sharing**: All modules have access to service context without boilerplate +- **Maintainability**: Each complex operation lives in its own file for easy maintenance +- **Scalability**: New complex methods can be added as separate modules +- **Minimal Macros**: A small `drive_method!` macro is used to generate simple proxy methods with caching to reduce boilerplate; all complex logic remains in regular `impl` blocks + + + +#### Service Organization Pattern +``` +services/ +├── service_name.rs # Main service implementation +│ ├── Service struct definition +│ ├── Simple proxy methods (majority of methods) +│ ├── Service initialization +│ └── Delegation calls to complex modules +├── service_name/ # Directory for complex methods +│ ├── complex_method_1.rs # First complex method implementation +│ ├── complex_method_2.rs # Second complex method implementation +│ └── ... # Additional complex methods +└── shared_utilities.rs # Shared helper modules +``` + +#### Implementation Pattern +Each complex method follows this pattern: + +```rust +// Main service file (e.g., platform_service.rs) +mod complex_method; // Import the complex implementation + +impl GrpcTrait for ServiceImpl { + async fn simple_method(&self, req: Request) -> Result, Status> { + // Simple proxy - direct forwarding + match self.client.simple_method(req.get_ref()).await { + Ok(response) => Ok(Response::new(response)), + Err(e) => Err(Status::internal(format!("Client error: {}", e))), + } + } + + async fn complex_method(&self, req: Request) -> Result, Status> { + // Delegate to complex implementation + self.complex_method_impl(req).await + } +} + +// Complex method file (e.g., service_name/complex_method.rs) +impl ServiceImpl { + pub async fn complex_method_impl(&self, req: Request) -> Result, Status> { + // Full access to service context: + // - self.clients (drive_client, tenderdash_client, etc.) + // - self.cache + // - self.config + // Complex business logic here... + } +} +``` + + +### 3. External Dependencies + +The implementation leverages existing Dash Platform crates and external libraries: + +#### Platform Crates +- `dapi-grpc` - gRPC service definitions and generated code +- `rs-dpp` - Data Platform Protocol types and validation + + +## Service Implementations + +### 4. Core Service + +Implements blockchain-related gRPC endpoints (protocol-agnostic via translation layer): + +#### Endpoints +- `getBlockchainStatus` - Network and chain status information +- `getBestBlockHeight` - Current blockchain height +- `getTransaction` - Transaction lookup by hash +- `broadcastTransaction` - Submit transactions to network + +#### Key Features +- Direct integration with Dash Core RPC +- ZMQ notifications for real-time updates +- Transaction validation and error handling +- Network status aggregation +- **Protocol-Agnostic**: Works identically for gRPC and JSON-RPC clients + +Implementation notes: +- Implemented in `src/services/core_service.rs`, backed by `src/clients/core_client.rs` (dashcore-rpc) +- JSON-RPC minimal parity implemented in `src/server.rs` via translator (see below) + +### 5. Platform Service + +Implements Dash Platform gRPC endpoints (protocol-agnostic via translation layer) with a modular architecture for complex method implementations: + +#### Modular Architecture +The Platform Service uses a modular structure where complex methods are separated into dedicated modules. + +#### Main Service (`platform_service.rs`) +- **Service Definition**: Contains `PlatformServiceImpl` struct with all necessary context +- **Simple Methods**: Direct proxy methods that forward requests to Drive client +- **Complex Method Delegation**: Delegates complex operations to specialized modules +- **Shared Context**: All struct fields marked `pub(crate)` for submodule access + +#### Complex Method Modules (`platform_service/`) +- **Dedicated Files**: Each complex method gets its own module file +- **Context Access**: Full access to service context via `impl PlatformServiceImpl` blocks +- **Business Logic**: Contains all complex caching, validation, and processing logic +- **Integrated Utilities**: Status building and other utilities included directly in method modules +- **Clean Separation**: Isolated complex logic from simple proxy operations + +Implementation notes: +- Simple passthrough methods are generated by `drive_method!` with integrated LRU caching +- `get_status`, `broadcast_state_transition`, `wait_for_state_transition_result`, and `subscribe_platform_events` are implemented as dedicated modules +- Drive client is configured with increased message size limits; compression is disabled at rs-dapi level (Envoy handles wire compression) + + +#### Endpoints +- `broadcastStateTransition` - Submit state transitions +- `waitForStateTransitionResult` - Wait for processing with proof generation +- `getConsensusParams` - Platform consensus parameters +- `getStatus` - Platform status information + +### 6. Protocol Translation + +rs-dapi exposes a JSON-RPC gateway alongside gRPC. Axum powers JSON-RPC routing in `src/server.rs`. + +- JSON-RPC translator: `src/protocol/jsonrpc_translator.rs` + - Supported: `getStatus`, `getBestBlockHash`, `getBlockHash(height)`, `sendRawTransaction` + - Translator converts JSON-RPC requests to internal calls and back; error mapping aligns with JSON-RPC codes + - Unit tests cover translation and error paths + +Operational notes: +- Compression: disabled at rs-dapi; Envoy handles edge compression +- Access logging: HTTP/JSON-RPC and gRPC traffic share the same access logging layer when configured, so all protocols emit uniform access entries + +- Platform event streaming is handled via a direct upstream proxy: + - `subscribePlatformEvents` simply forwards every inbound command stream to a single Drive connection and relays responses back without multiplexing + +#### Key Features +- **Modular Organization**: Complex methods separated into dedicated modules for maintainability +- **Context Sharing**: Submodules have full access to service context (clients, cache, config) +- **No Boilerplate**: Uses `impl` blocks rather than wrapper structs +- **Integrated Utilities**: Status building and other helper functions co-located with their usage +- State transition hash validation (64-character SHA256 hex) +- Integration with Drive for proof generation +- Tenderdash WebSocket monitoring for real-time events +- Timeout handling for long-running operations +- Error conversion from Drive responses +- **Protocol-Agnostic**: Identical behavior across all client protocols + +##### Platform Events Subscription Proxy + +rs-dapi exposes `subscribePlatformEvents` as a server-streaming endpoint and currently performs a straightforward pass-through to rs-drive-abci. + +- Public interface: + - Bi-directional gRPC stream: `subscribePlatformEvents(request stream PlatformEventsCommand) -> (response stream PlatformEventsResponse)`. + - Commands (`Add`, `Remove`, `Ping`) and responses (`Event`, `Ack`, `Error`) stay in their protobuf `V0` envelopes end-to-end. + +- Upstream behavior: + - Each client stream obtains its own upstream Drive connection; tokio channels forward commands upstream and pipe responses back downstream without pooling. + - The `EventMux` from `rs-dash-event-bus` is retained for future multiplexing work but does not alter traffic today. + +- Observability: + - Standard `tracing` logging wraps the forwarders, and the proxy participates in the existing `/metrics` exporter via shared counters. + +### 6. Streams Service + +Implements real-time streaming gRPC endpoints (protocol-agnostic via translation layer): + +#### Endpoints +- `subscribeToBlockHeadersWithChainLocks` - Block header streaming +- `subscribeToTransactionsWithProofs` - Transaction filtering with bloom filters +- `subscribeToMasternodeList` - Masternode list updates + - Note: Platform event streaming is handled by `PlatformService::subscribePlatformEvents` and proxied directly to Drive as described in the Platform Service section. + +#### Key Features +- ZMQ event processing for real-time data +- Bloom filter management for transaction filtering +- Merkle proof generation for SPV verification +- Stream lifecycle management +- Connection resilience and reconnection +- **Protocol-Agnostic**: Streaming works consistently across all protocols + +### 7. JSON-RPC Service (Legacy) + +Provides legacy HTTP endpoints for backward compatibility via protocol translation: + +#### Endpoints +- `getBestBlockHash` - Hash of the latest block +- `getBlockHash` - Block hash by height + +#### Key Features +- **Translation Layer**: All requests converted to gRPC calls internally +- HTTP server with JSON-RPC 2.0 compliance +- Error format compatibility with existing clients +- Minimal subset focused on essential operations +- **Deprecated**: New clients should use gRPC APIs + +### 9. Health and Monitoring Endpoints + +Built-in observability and monitoring capabilities: + +#### Health Check Endpoints +- `GET /health` - Aggregated health check covering rs-dapi, Drive gRPC status, Tenderdash RPC, and Core RPC. Returns `503` when any dependency is unhealthy. +- Readiness/liveness split removed in favor of the single dependency-aware health probe. + +#### Metrics Endpoints +- `GET /metrics` - Prometheus metrics + +#### Status Information +- Service uptime and version +- External service connection status +- Request counts and latency statistics +- Error rates and types +- Active stream subscriber counts + +## Data Flow and Processing + +### 10. Multi-Protocol Server Architecture + +rs-dapi implements a unified server with a protocol translation layer that normalizes all incoming requests to gRPC format, operating behind Envoy as a trusted backend service: + +#### Protocol Translation Architecture +- **Protocol Translation Layer**: All non-gRPC protocols translated to gRPC format +- **Unified Business Logic**: All handlers work exclusively with gRPC messages +- **Single Code Path**: No protocol-specific logic in business layer +- **Native gRPC**: Direct pass-through for gRPC requests +- **Trusted Environment**: Operates in internal network behind Envoy gateway + +#### Request Flow with Protocol Translation +``` +External Client → Envoy Gateway → Protocol Translation → gRPC Services → External Services + ↓ ↓ ↓ ↓ ↓ + HTTPS/WSS SSL termination ┌─────────────────┐ Core Service Dash Core + gRPC-Web → Protocol xlat → │ JSON→gRPC xlat │→ Platform Svc → Drive + JSON-RPC Rate limiting │ Native gRPC │ Streams Svc Tenderdash + Auth/CORS └─────────────────┘ + Protocol Translation Layer +``` + +#### Internal Architecture with Translation Layer +``` +┌─────────────────────────────────────────────────────────────┐ +│ rs-dapi Process (localhost only) │ +│ │ +│ ┌─────────────────────────────────────────────────────┐ │ +│ │ Protocol Translation Layer │ │ +│ │ │ │ +│ │ ┌─────────────┐ ┌─────────────┐ │ │ +│ │ │ JSON-RPC │ │ gRPC │ │ │ +│ │ │ Translator │ │ Native │ │ │ +│ │ │ │ │ │ │ │ +│ │ │ JSON→gRPC │ │ Pass-through│ │ │ +│ │ └─────────────┘ └─────────────┘ │ │ +│ │ │ │ │ │ +│ │ └──────────────┘ │ │ +│ │ │ │ │ +│ │ ▼ │ │ +│ │ ┌─────────────────────────────────────────────┐ │ │ +│ │ │ gRPC Services Layer │ │ │ +│ │ │ (Protocol-Agnostic) │ │ │ +│ │ │ │ │ │ +│ │ │ ┌─────────────┐ ┌─────────────────────────┐ │ │ │ +│ │ │ │ Core Service│ │ Platform & Streams │ │ │ │ +│ │ │ │ │ │ Services │ │ │ │ +│ │ │ │ - Blockchain│ │ - State transitions │ │ │ │ +│ │ │ │ - TX broadcast │ - Block streaming │ │ │ │ +│ │ │ │ - Status │ │ - Masternode updates │ │ │ │ +│ │ │ └─────────────┘ └─────────────────────────┘ │ │ │ +│ │ └─────────────────────────────────────────────┘ │ │ +│ └─────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ +``` + +#### Protocol Translation Details +- **JSON-RPC Translator**: Converts JSON-RPC 2.0 format to corresponding gRPC calls +- **gRPC Native**: Direct pass-through for native gRPC requests (no translation) +- **Response Translation**: Converts gRPC responses back to original protocol format +- **Error Translation**: Maps gRPC status codes to appropriate protocol-specific errors +- **Streaming**: gRPC streaming for real-time data with consistent semantics across protocols + +### 11. Protocol Translation Layer + +The protocol translation layer is the key architectural component that enables unified business logic while supporting multiple client protocols: + +#### Translation Components + +##### JSON-RPC to gRPC Translator +- **RPC Method Mapping**: JSON-RPC method names mapped to gRPC service methods +- **Parameter Conversion**: JSON-RPC params converted to gRPC message fields +- **ID Tracking**: JSON-RPC request IDs preserved for response correlation +- **Batch Request Support**: Multiple JSON-RPC requests in single batch handled +- **Error Format**: gRPC errors converted to JSON-RPC 2.0 error format + +##### Native gRPC Handler +- **Direct Pass-through**: No translation required for native gRPC requests +- **Metadata Preservation**: gRPC metadata and headers preserved +- **Streaming Support**: Full bidirectional streaming support +- **Compression**: Native gRPC compression and optimization + +### 11. State Transition Processing + +The `waitForStateTransitionResult` endpoint follows this flow: + +1. **Input Validation** + - Check Tenderdash connection availability + - Validate state transition hash format (64-char hex) + - Parse request parameters (hash, prove flag) + +2. **Transaction Monitoring** + - Wait for transaction to be included in a block + - Monitor Tenderdash events via WebSocket + - Handle timeout scenarios with appropriate errors + +3. **Proof Generation** (if requested) + - Fetch proof from Drive for the state transition + - Include metadata and proof data in response + +4. **Error Handling** + - Convert Drive errors to gRPC status codes + - Handle timeout with `DEADLINE_EXCEEDED` + - Map transaction errors to structured responses + +### 12. Streaming Data Processing + +#### Transaction Filtering +1. Client subscribes with bloom filter +2. ZMQ notifications from Dash Core processed +3. Transactions tested against bloom filters +4. Matching transactions sent with merkle proofs + +#### Block Header Streaming +1. ZMQ block notifications from Dash Core +2. Block headers extracted and validated +3. Chain lock information included +4. Streamed to subscribed clients + +#### Race-Free Historical + Live Backfill +To avoid gaps between historical fetching and live streaming (race conditions), rs-dapi follows a subscribe-first pattern for continuous streams: +- Subscribe to live events first and attach the forwarder to the client stream. +- Snapshot the current best height from Core RPC. +- If the request includes a starting point (`fromBlockHeight` or `fromBlockHash`) with `count = 0`, backfill historical data from the start to the snapshotted best height and send to the same stream. +- Continue forwarding live events from ZMQ; duplicates are tolerated and handled client-side. + +This pattern is applied to: +- `subscribeToBlockHeadersWithChainLocks` (count = 0 with `fromBlock*`): subscribe, snapshot, backfill headers to tip, then stream live block headers and chainlocks. +- `subscribeToTransactionsWithProofs` (count = 0 with `fromBlock*`): subscribe, snapshot, backfill filtered transactions + merkle blocks to tip, then stream live transactions/locks/blocks. + +Rationale: If the server performs historical fetch first and subscribes later, any blocks/transactions arriving during the fetch window can be missed. Subscribing first guarantees coverage; backfill up to a captured tip ensures deterministic catch-up without gaps. + +### 13. External Service Integration + +#### Dash Core Integration +- **RPC Client**: Blockchain queries, transaction broadcasting +- **ZMQ Client**: Real-time notifications (blocks, transactions, chainlocks) +- **Connection Management**: Retry logic, health checks + +#### Drive Integration +- **gRPC Client**: State queries, proof generation +- **Error Mapping**: Drive-specific errors to gRPC status codes +- **Connection Pooling**: Efficient resource utilization + +#### Tenderdash Integration +- **RPC Client**: Consensus queries, network status +- **WebSocket Client**: Real-time Platform events +- **Event Processing**: State transition monitoring + +## Configuration and Deployment + +### 14. Configuration Management + + +#### Process Architecture +- **Single Binary**: One process handles all DAPI functionality behind Envoy +- **Multi-threaded**: Tokio runtime with multiple worker threads +- **Shared State**: Common configuration and client connections +- **Service Isolation**: Logical separation of Core, Platform, and Streams services +- **Internal Network**: All services bind to localhost/internal addresses only +- **Trusted Backend**: No direct external exposure, operates behind Envoy gateway +- + +#### Configuration Files +- .env-based configuration with environment override +- Strict precedence: compile-time defaults < `.env` < environment variables < CLI overrides +- Network-specific default configurations +- Validation and error reporting for invalid configs + +### 15. Binary Architecture + +The rs-dapi binary is designed as a unified server that handles all DAPI functionality: + +#### Single Process Design +- **Unified Server**: Single process serving all endpoints +- **Unified gRPC Services**: Core, Platform, and Streams services on the same port, distinguished by service path +- **Integrated JSON-RPC**: HTTP server embedded within the same process +- **Shared Resources**: Common connection pools and state management + +#### Port Configuration (configurable) +- **gRPC Server Port** (default: 3005): Unified port for Core + Platform + streaming endpoints +- **JSON-RPC Port** (default: 3004): Legacy HTTP endpoints +- **Health/Metrics Port** (default: 9090): Monitoring endpoints + +All ports bind to internal Docker network. External access is handled by Envoy. + +#### Service livecycle management + +- **Docker** as primary deployment method +- **Dashmate** as primary deployment and management tool + +#### Dashmate Integration +- **Drop-in Replacement**: Direct substitution for JavaScript DAPI processes +- **Same Configuration**: Uses existing environment variables and setup +- **Compatible Deployment**: Works with current dashmate deployment scripts +- **Envoy Gateway**: Integrates with existing Envoy configuration in dashmate +- **Internal Service**: Operates as trusted backend behind Envoy proxy +- **Resource Efficiency**: Single process reduces memory footprint and complexity +- **Automatic Startup**: All services and dependencies start with single command +- **Built-in Monitoring**: Health endpoints accessible to Envoy for health checks + +### 16. Error Handling Strategy + +#### gRPC Error Mapping +- `INVALID_ARGUMENT` - Input validation failures +- `UNAVAILABLE` - External service connectivity issues +- `DEADLINE_EXCEEDED` - Operation timeouts +- `INTERNAL` - Unexpected internal errors +- `NOT_FOUND` - Resource not found + +#### Error Context +- Structured error messages with context +- Request correlation IDs for tracing +- Detailed error metadata for debugging +- Compatible error formats with JavaScript DAPI + +## Performance and Scalability + +### 17. Performance Characteristics + +#### Async Processing +- Tokio runtime with work-stealing scheduler +- Non-blocking I/O for all external communications +- Concurrent request handling + +#### Resource Management +- Connection pooling for external services +- Efficient memory usage with zero-copy operations +- Stream backpressure handling + +#### Caching Strategy +- Blockchain status caching with TTL +- Connection keep-alive for external services +- Smart invalidation based on ZMQ events + +### 18. Monitoring and Observability + +#### Logging +- Structured logging with `tracing`, including correlation IDs, timing, and protocol metadata when useful +- Each gRPC/streaming handler emits exactly one `info!` on success and a single `warn!` on failure, capturing mapped `Status` and identifying context +- Background workers and helper paths stay at `debug!` for exceptional branches and `trace!` for steady-state loops; reserve spans similarly (`trace_span!`/`debug_span!`) so higher levels remain quiet +- Log levels: + - info - business events, target audience: users, sysops/devops + - error - errors that break things, need action or pose threat to service, target audience: users, sysops/devops + - warn - other issues that need attention, target audience: users, sysops/devops + - debug - non-verbose debugging information adding much value to understanding system operations; target audience: developers + - trace - other debugging information that is either quite verbose, or adds little value to understanding system operations; target audience: developers +- Prefer logging at logical block boundaries instead of every operation to keep even `trace` output digestible + +#### Built-in Metrics +- **Request Metrics**: Counts, latency histograms per protocol +- **Connection Metrics**: External service status and health +- **Stream Metrics**: Active subscribers, message throughput +- **System Metrics**: Memory usage, CPU utilization, goroutine counts +- **Business Metrics**: Transaction success rates, proof generation times + +#### Prometheus Integration +- Native Prometheus metrics endpoint +- Custom metrics for DAPI-specific operations +- Grafana-compatible dashboards +- Alerting rules for operational monitoring + +#### Health Checks +- Service readiness and liveness endpoints +- External service connectivity validation +- Graceful degradation strategies + +## Security Considerations + +### 19. Envoy Gateway Security Model + +rs-dapi operates in a trusted environment behind Envoy Gateway, which handles all external security concerns: + +#### External Security (Handled by Envoy) +- **SSL/TLS Termination**: All external HTTPS/WSS connections terminated at Envoy +- **Certificate Management**: SSL certificates managed by dashmate/Envoy configuration +- **Rate Limiting**: Request rate limiting and DDoS protection at gateway level +- **CORS Handling**: Cross-origin resource sharing policies enforced by Envoy +- **Authentication/Authorization**: Client authentication and authorization at gateway +- **Protocol Translation**: Secure gRPC-Web, WebSocket, and HTTPS to internal HTTP/gRPC + +#### Internal Security (rs-dapi Responsibility) +- **Input Validation**: SHA256 hash format validation, buffer overflow prevention +- **Request Sanitization**: Input sanitization for all endpoints and parameters +- **Request Size Limits**: Maximum request size enforcement +- **Connection Limits**: Maximum concurrent connections per internal service +- **Trust Boundary**: Only accepts connections from localhost/internal network + +### 20. Network Architecture Security + +#### Trust Model +- **Trusted Internal Network**: rs-dapi assumes all requests come from trusted Envoy +- **No Direct External Exposure**: All services bind to localhost (127.0.0.1) by default +- **Network Isolation**: External network access only through Envoy gateway +- **Service Mesh**: Can be integrated with service mesh for additional internal security + +#### Internal Communication Security +- **Dash Core Integration**: Secure RPC connections with authentication credentials +- **Drive Integration**: Internal gRPC connections within trusted network +- **Tenderdash Integration**: Authenticated RPC and WebSocket connections +- **Credential Management**: Secure storage and rotation of service credentials + +## Testing Strategy + +### 21. Test Coverage + +#### Unit Tests +- Individual component testing +- Error condition testing +- Input validation testing + +#### Integration Tests +- End-to-end service testing +- External service integration +- Stream lifecycle testing +- Error propagation testing + +#### Performance Tests +- Load testing under various conditions +- Memory usage profiling +- Connection limit testing +- Concurrent client testing + +## Migration and Compatibility + +### 22. Compatibility Requirements + +#### API Compatibility +- Identical gRPC service definitions +- Same JSON-RPC endpoint behavior +- Compatible error response formats +- Matching timeout behaviors + +#### Configuration Compatibility +- Same environment variable names +- Compatible configuration file formats +- Identical default values +- Same network selection logic + +### 23. Deployment Strategy + +#### Gradual Migration +1. **Dashmate Integration**: Update dashmate to use rs-dapi binary +2. **Feature Flag Deployment**: Deploy with feature flags for rollback capability +3. **Traffic Validation**: Monitor performance and error rates in production +4. **Full Migration**: Complete replacement of JavaScript DAPI once validated + +#### Deployment in Dashmate +- **Binary Replacement**: rs-dapi replaces existing JavaScript DAPI processes +- **Envoy Integration**: Works seamlessly with existing Envoy gateway configuration +- **Configuration Compatibility**: Uses same environment variables as current setup +- **Internal Network Binding**: All services bind to localhost, external access via Envoy +- **Process Management**: Single process simplifies service management in dashmate +- **Resource Optimization**: Reduced memory usage and inter-process communication overhead +- **Security Simplification**: No SSL/certificate management needed in rs-dapi + +## Future Considerations + +### 24. Extensibility + +#### Plugin Architecture +- Modular service design +- Configurable middleware +- Extension points for custom logic + +#### Performance Optimizations +- Custom allocators for high-frequency operations +- SIMD optimizations for cryptographic operations +- Advanced caching strategies + +### 25. Maintenance and Updates + +#### Code Organization +- Clear module boundaries +- Comprehensive documentation +- Automated testing and CI/CD +- Regular dependency updates + +#### Code Style Guidelines +- Constructor pattern: `new()` methods should create fully operational objects +- Objects should be ready to use immediately after construction +- Use builder pattern for complex configuration instead of multi-step initialization +- Prefer composition over inheritance for extending functionality +- Follow Rust naming conventions and idiomatic patterns +- Document public APIs with short examples +- Use `Result` for fallible operations, not panics in constructors + +#### Monitoring and Debugging +- Advanced debugging capabilities +- Performance profiling tools +- Memory leak detection +- Crash reporting and analysis + +--- + +This design document serves as the foundation for implementing rs-dapi and will be updated as the implementation progresses and requirements evolve. diff --git a/packages/rs-dapi/examples/.env.example b/packages/rs-dapi/examples/.env.example new file mode 100644 index 00000000000..14e54ef7934 --- /dev/null +++ b/packages/rs-dapi/examples/.env.example @@ -0,0 +1,45 @@ +# Example .env file for rs-dapi with logging configuration +# Copy this to .env in your deployment directory and adjust values as needed + +# Application settings +HOST=127.0.0.1 +PORT=3000 + +# Logging configuration +# Log level: error, warn, info, debug, trace +LOG_LEVEL=info + +# Log format: json (structured) or compact (human-readable) +LOG_FORMAT=json + +# Enable colors in console output (only for compact format) +LOG_COLORS=true + +# Access log configuration +# Enable access logging +ACCESS_LOG_ENABLED=true + +# Access log file path (if not set, access logging is disabled) +# Ensure the directory exists and is writable by the rs-dapi process +ACCESS_LOG_FILE=/var/log/rs-dapi/access.log + +# Access log format: combined (Apache format) or json +ACCESS_LOG_FORMAT=combined + +# Buffer access logs before writing (improves performance) +# Set to 0 to disable buffering (immediate writes) +ACCESS_LOG_BUFFER_SIZE=1024 + +# Flush buffer interval in seconds (only if buffering is enabled) +ACCESS_LOG_FLUSH_INTERVAL=5 + +# Alternative logging configurations (uncomment to use) +# Use systemd journal instead of files +# SYSTEMD_LOGGING=true +# SYSTEMD_IDENTIFIER=rs-dapi + +# Use syslog instead of files +# SYSLOG_ENABLED=true +# SYSLOG_FACILITY=daemon +# SYSLOG_HOSTNAME=localhost +# SYSLOG_PROCESS_ID=true diff --git a/packages/rs-dapi/examples/tenderdash_client.rs b/packages/rs-dapi/examples/tenderdash_client.rs new file mode 100644 index 00000000000..779ba763e5d --- /dev/null +++ b/packages/rs-dapi/examples/tenderdash_client.rs @@ -0,0 +1,89 @@ +use std::{env, error::Error, time::Duration}; + +use base64::engine::{Engine as _, general_purpose::STANDARD}; +use rs_dapi::{DAPIResult, clients::TenderdashClient}; +use tokio::time::timeout; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Enable a basic tracing subscriber if the caller did not configure one already. + let _ = tracing_subscriber::fmt::try_init(); + + println!("Tenderdash Client example that tests all implemented Tenderdash methods."); + println!( + "You can use TENDERDASH_RPC_URL and TENDERDASH_WS_URL env vars to override the default connection URLs." + ); + + let rpc_url = + env::var("TENDERDASH_RPC_URL").unwrap_or_else(|_| "http://127.0.0.1:26657".to_string()); + let ws_url = env::var("TENDERDASH_WS_URL") + .unwrap_or_else(|_| "ws://127.0.0.1:26657/websocket".to_string()); + + println!("Connecting to Tenderdash HTTP at {rpc_url} and WS at {ws_url}"); + + let client = match TenderdashClient::new(&rpc_url, &ws_url).await { + Ok(client) => client, + Err(err) => { + eprintln!("Failed to initialize Tenderdash client: {err}"); + return Ok(()); + } + }; + + // Fetch high-level node status information. + print_result("status", client.status().await); + + // Fetch network information about peers and listeners. + print_result("net_info", client.net_info().await); + + // Prepare simple demo payloads (base64 encoded strings are expected by RPC). + let demo_tx = STANDARD.encode(b"demo-state-transition"); + let demo_hash = STANDARD.encode("demo-transaction-hash"); + + // Validate a transaction with CheckTx (tenderdash will likely reject our dummy payload). + print_result("check_tx", client.check_tx(demo_tx.clone()).await); + + // Try broadcasting the same transaction. + print_result("broadcast_tx", client.broadcast_tx(demo_tx.clone()).await); + + // Search for the transaction in the mempool and committed blocks. + print_result("unconfirmed_tx", client.unconfirmed_tx(&demo_hash).await); + print_result("tx", client.tx(demo_hash.clone()).await); + + // Subscribe to streaming transaction and block events. + let mut tx_events = client.subscribe_to_transactions(); + let mut block_events = client.subscribe_to_blocks(); + + let tx_listener = tokio::spawn(async move { + match timeout(Duration::from_secs(5), tx_events.recv()).await { + Ok(Ok(event)) => println!("Received transaction event: {:?}", event), + Ok(Err(err)) => println!("Transaction subscription closed with error: {err}"), + Err(_) => println!("No transaction events received within 5 seconds"), + } + }); + + let block_listener = tokio::spawn(async move { + match timeout(Duration::from_secs(5), block_events.recv()).await { + Ok(Ok(event)) => println!("Received block event: {:?}", event), + Ok(Err(err)) => println!("Block subscription closed with error: {err}"), + Err(_) => println!("No block events received within 5 seconds"), + } + }); + + let (tx_result, block_result) = tokio::join!(tx_listener, block_listener); + if let Err(err) = tx_result { + println!("Transaction listener task failed: {err}"); + } + if let Err(err) = block_result { + println!("Block listener task failed: {err}"); + } + + println!("Tenderdash client example finished."); + Ok(()) +} + +fn print_result(label: &str, result: DAPIResult) { + match result { + Ok(value) => println!("{label} succeeded: {value:#?}"), + Err(err) => println!("{label} failed: {err}"), + } +} diff --git a/packages/rs-dapi/src/cache.rs b/packages/rs-dapi/src/cache.rs new file mode 100644 index 00000000000..a489c4cbcc9 --- /dev/null +++ b/packages/rs-dapi/src/cache.rs @@ -0,0 +1,631 @@ +use quick_cache::{Weighter, sync::Cache}; +use std::fmt::Debug; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::{Duration, Instant}; +use tracing::{debug, warn}; + +use crate::DapiError; +use crate::metrics::{self, MethodLabel}; +use crate::services::streaming_service::SubscriptionHandle; +use crate::sync::Workers; + +/// Estimated average size of a cache entry in bytes, used for initial capacity planning. +const ESTIMATED_ENTRY_SIZE_BYTES: u64 = 1024; + +#[derive(Clone)] +/// An LRU cache for storing serialized responses, keyed by method name and request parameters. +/// Uses a background worker to invalidate the cache on demand. +/// +/// Entries are weighted by their estimated memory usage to better utilize the configured capacity. +/// +/// The cache is thread-safe, cheaply cloneable, and can be shared across multiple threads. +/// +/// # Panics +/// +/// Panics if serialization of keys or values fails. +pub struct LruResponseCache { + inner: Arc>, + label: Arc, + #[allow(dead_code)] + workers: Workers, +} + +impl Debug for LruResponseCache { + /// Display cache size, total weight, and capacity for debugging output. + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "LruResponseCache {{ size: {}, weight: {}, capacity: {} }}", + self.inner.len(), + self.inner.weight(), + self.inner.capacity() + ) + } +} + +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub struct CacheKey { + method: &'static str, + /// Message digest; when None, all lookups will miss + digest: Option, +} + +type CacheIndex = u128; + +impl CacheKey { + #[inline(always)] + pub fn new(method: &'static str, key: &M) -> CacheKey { + make_cache_key(method, key) + } + + #[inline(always)] + pub const fn method(self) -> &'static str { + self.method + } + + #[inline(always)] + pub fn method_label(&self) -> MethodLabel { + MethodLabel::from_type_name(self.method) + } + + pub const fn digest(self) -> Option { + self.digest + } +} +#[derive(Clone)] +struct CachedValue { + inserted_at: Instant, + data: serde_bytes::ByteBuf, +} + +impl Debug for CachedValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CachedValue") + .field("inserted_at", &self.inserted_at) + .field("data", &hex::encode(&self.data)) + .field("data_len", &self.data.len()) + .finish() + } +} + +impl CachedValue { + /// Capture the current instant and serialize the provided value into bytes. + /// + /// Returns None if serialization fails. + fn new(data: T) -> Option { + // We don't use bincode, as we have hit a bug in bincode + // that causes deserialization to fail in some cases within get_with_ttl. + let serialized = rmp_serde::to_vec(&data) + .inspect_err(|e| { + tracing::debug!("Failed to serialize value for caching: {}", e); + }) + .ok()?; + + Some(Self { + inserted_at: Instant::now(), + data: serialized.into(), + }) + } + + /// Deserialize the cached bytes into the requested type if possible. + fn value(&self) -> Result { + rmp_serde::from_slice(&self.data).map_err(|e| { + DapiError::invalid_data(format!("Failed to deserialize cached value: {}", e)) + }) + } +} + +#[derive(Clone, Default)] +struct CachedValueWeighter; + +impl Weighter for CachedValueWeighter { + /// Estimate cache entry weight by combining struct overhead and payload size. + fn weight(&self, _key: &CacheIndex, value: &CachedValue) -> u64 { + let structural = std::mem::size_of::() as u64; + let payload = value.data.len() as u64; + (structural + payload).max(1) + } +} + +impl LruResponseCache { + /// Create a cache with a fixed capacity and without any external invalidation. + /// Use this when caching immutable responses (e.g., blocks by hash). + /// `capacity` is expressed in bytes. + pub fn with_capacity(label: impl Into>, capacity: u64) -> Self { + let label = label.into(); + let cache = Self { + inner: Self::new_cache(capacity), + label: label.clone(), + workers: Workers::new(), + }; + observe_memory(&cache.inner, cache.label.as_ref()); + cache + } + /// Create a cache and start a background worker that clears the cache + /// whenever a signal is received on the provided receiver. + /// `capacity` is expressed in bytes. + pub fn new(label: impl Into>, capacity: u64, receiver: SubscriptionHandle) -> Self { + let label = label.into(); + let inner = Self::new_cache(capacity); + let inner_clone = inner.clone(); + let label_clone = label.clone(); + let workers = Workers::new(); + workers.spawn(async move { + while let Some(event) = receiver.recv().await { + tracing::trace!(?event, "Cache invalidation event received, clearing cache"); + inner_clone.clear(); + observe_memory(&inner_clone, label_clone.as_ref()); + } + tracing::debug!("Cache invalidation task exiting"); + Result::<(), DapiError>::Ok(()) + }); + + let cache = Self { + inner, + label, + workers, + }; + observe_memory(&cache.inner, cache.label.as_ref()); + cache + } + + /// Create the underlying cache with weighted capacity based on estimated entry size. + fn new_cache(capacity: u64) -> Arc> { + let capacity_bytes = capacity.max(1); + let estimated_items_u64 = (capacity_bytes / ESTIMATED_ENTRY_SIZE_BYTES).max(1); + let estimated_items = estimated_items_u64.min(usize::MAX as u64) as usize; + Arc::new(Cache::with_weighter( + estimated_items, + capacity_bytes, + CachedValueWeighter, + )) + } + + /// Remove all entries from the cache. + pub fn clear(&self) { + self.inner.clear(); + observe_memory(&self.inner, self.label.as_ref()); + } + + /// Helper to get and parse the cached value + fn get_and_parse( + &self, + key: &CacheKey, + ) -> Option<(T, Instant)> { + let cached_value = self.inner.get(&key.digest()?)?; + + let value = match cached_value.value() { + Ok(cv) => Some(cv), + Err(error) => { + debug!(%error, method = key.method(), "Failed to deserialize cached value, interpreting as cache miss and dropping"); + self.remove(key); + + None + } + }; + + tracing::trace!( + method = key.method(), + age_ms = cached_value.inserted_at.elapsed().as_millis(), + ?cached_value, + ?value, + "Cache hit" + ); + + value.map(|v| (v, cached_value.inserted_at)) + } + + /// Retrieve a cached value by key, deserializing it into the requested type. + pub fn get(&self, key: &CacheKey) -> Option + where + T: serde::de::DeserializeOwned + Debug, + { + let method_label = key.method_label(); + match self.get_and_parse(key) { + Some((v, _)) => { + metrics::cache_hit(self.label.as_ref(), &method_label); + Some(v) + } + None => { + metrics::cache_miss(self.label.as_ref(), &method_label); + None + } + } + } + + /// Get a value with TTL semantics; returns None if entry is older than TTL. + #[inline(always)] + pub fn get_with_ttl(&self, key: &CacheKey, ttl: Duration) -> Option + where + T: serde::de::DeserializeOwned + Debug, + { + let Some((value, inserted_at)) = self.get_and_parse(key) else { + metrics::cache_miss(self.label.as_ref(), &key.method_label()); + return None; + }; + + let method_label = key.method_label(); + + if inserted_at.elapsed() <= ttl { + metrics::cache_hit(self.label.as_ref(), &method_label); + + return value; + } + + // expired, drop it + self.remove(key); + // treat as miss + metrics::cache_miss(self.label.as_ref(), &method_label); + None + } + + /// Remove a cached value by key. + /// Returns true if an entry was removed. + pub fn remove(&self, key: &CacheKey) -> bool { + let Some(index) = key.digest() else { + return false; + }; + + let removed = self.inner.remove(&index).is_some(); + if removed { + observe_memory(&self.inner, self.label.as_ref()); + } + removed + } + + /// Insert or replace a cached value for the given key. + /// + /// On error during serialization, the value is not cached. + #[inline] + pub fn put(&self, key: CacheKey, value: &T) + where + T: serde::Serialize, + { + let Some(index) = key.digest() else { + // serialization of key failed, skip caching + debug!( + method = key.method(), + "Cache key serialization failed, skipping cache" + ); + return; + }; + + if let Some(cv) = CachedValue::new(value) { + self.inner.insert(index, cv); + observe_memory(&self.inner, self.label.as_ref()); + } + } + + /// Get a cached value or compute it using `producer` and insert into cache. + /// The `producer` is executed only on cache miss. + pub async fn get_or_try_insert(&self, key: CacheKey, producer: F) -> Result + where + T: serde::Serialize + serde::de::DeserializeOwned, + F: FnOnce() -> Fut, + Fut: std::future::Future>, + E: From + Debug, + { + let method_label = key.method_label(); + // calculate index; if serialization fails, always miss + let Some(index) = key.digest() else { + // serialization of key failed, always miss + warn!( + method = key.method(), + "Cache key serialization failed, skipping cache" + ); + metrics::cache_miss(self.label.as_ref(), &method_label); + return producer().await; + }; + + let cache_hit = Arc::new(AtomicBool::new(true)); + let inner_hit = cache_hit.clone(); + + let item = self + .inner + .get_or_insert_async(&index, async move { + // wrapped in async block to not execute producer immediately + // executed only on cache miss + inner_hit.store(false, Ordering::SeqCst); + + match producer().await { + Ok(v) => CachedValue::new(v) + .ok_or_else(|| DapiError::invalid_data("Failed to serialize value").into()), + Err(e) => Err(e), + } + }) + .await + .and_then(|cv| cv.value().map_err(Into::into)); + + let hit = cache_hit.load(Ordering::SeqCst); + match (hit, &item) { + (true, Ok(_)) => { + tracing::trace!(method = key.method(), "Cache hit"); + metrics::cache_hit(self.label.as_ref(), &method_label); + } + (true, Err(error)) => { + tracing::debug!( + method = key.method(), + ?error, + "Cache hit but failed to deserialize cached value, dropping entry and recording as a miss" + ); + metrics::cache_miss(self.label.as_ref(), &method_label); + self.remove(&key); + } + (false, Ok(_)) => { + tracing::trace!( + method = key.method(), + "Cache miss, value produced and cached" + ); + metrics::cache_miss(self.label.as_ref(), &method_label); + observe_memory(&self.inner, self.label.as_ref()); + } + (false, Err(error)) => { + tracing::debug!( + method = key.method(), + ?error, + "Cache miss, value production failed" + ); + metrics::cache_miss(self.label.as_ref(), &method_label); + } + } + + item + } +} + +#[inline(always)] +fn observe_memory(cache: &Arc>, label: &str) { + metrics::cache_memory_usage_bytes(label, cache.weight()); + metrics::cache_memory_capacity_bytes(label, cache.capacity()); + metrics::cache_entries(label, cache.len()); +} + +#[inline(always)] +/// Combine a method name and serializable key into a stable 128-bit cache key. +/// +/// Sets digest to None if serialization fails, causing all lookups to miss. +pub fn make_cache_key(method: &'static str, key: &M) -> CacheKey { + let digest = match rmp_serde::to_vec(key) { + Ok(mut data) => { + data.push(0); // separator + data.extend(method.as_bytes()); + Some(xxhash_rust::xxh3::xxh3_128(&data)) + } + Err(error) => { + debug!(?key, %error, "Failed to serialize cache key"); + None + } + }; + CacheKey { method, digest } +} + +#[cfg(test)] +mod tests { + use super::*; + use dapi_grpc::platform::v0::{ + GetStatusRequest, GetStatusResponse, get_status_request, + get_status_response::{self, GetStatusResponseV0, get_status_response_v0::Time}, + }; + use std::sync::Arc; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::time::Duration; + + #[tokio::test(flavor = "multi_thread")] + /// Test that all cache methods work as expected. + /// + /// We have hit a bug in bincode that causes deserialization to fail when used through + /// get_with_ttl. This test ensures it works correctly in that case. + async fn all_cache_methods_must_work() { + // Configure tracing for the test + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::TRACE) + .with_test_writer() + .try_init(); + + // Given some cache, request, response and ttl + let cache = LruResponseCache::with_capacity("platform", ESTIMATED_ENTRY_SIZE_BYTES * 4); + let request = GetStatusRequest { + version: Some(get_status_request::Version::V0( + get_status_request::GetStatusRequestV0 {}, + )), + }; + let key = make_cache_key("get_status", &request); + + let cached_time = Time { + local: 42, + block: Some(100), + genesis: Some(200), + epoch: Some(300), + }; + + let response = GetStatusResponse { + version: Some(get_status_response::Version::V0(GetStatusResponseV0 { + time: Some(cached_time), + ..Default::default() + })), + }; + + let ttl = Duration::from_secs(30); + + // When we put the response in the cache + cache.put(key, &response); + + // Then all methods should return the cached response + // 1. Directly inspect the raw cache entry + + let inner_cached_value = cache + .inner + .get(&key.digest().expect("digest present")) + .expect("cache should contain raw entry"); + assert!( + !inner_cached_value.data.is_empty(), + "serialized cache entry should not be empty" + ); + let decoded_from_raw = inner_cached_value + .value::() + .expect("raw decode should succeed"); + assert_eq!( + decoded_from_raw, response, + "raw cache entry should deserialize to stored response" + ); + + // 2. Use the typed get method + let get_response = cache + .get::(&key) + .expect("expected plain get to succeed"); + + assert_eq!( + get_response, response, + "plain cache get should match stored response" + ); + + // 3. Use internal get_and_parse method + let (get_and_parse_response, _inserted_at) = cache + .get_and_parse::(&key) + .expect("expected get_and_parse to succeed"); + + assert_eq!( + get_and_parse_response, response, + "get_and_parse value should match stored response" + ); + + // 4. Use the get_with_ttl method + let get_with_ttl_response = cache + .get_with_ttl::(&key, ttl) + .expect("expected get_status response to be cached"); + + // HERE IT FAILS WITH BINCODE!!! + assert_eq!( + get_with_ttl_response, response, + "get_with_ttl cached response should match stored value" + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_or_try_insert_caches_successful_values() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::TRACE) + .with_test_writer() + .try_init(); + + let cache = LruResponseCache::with_capacity("test_cache", ESTIMATED_ENTRY_SIZE_BYTES * 2); + let key = CacheKey::new("get_u64", &"key"); + let produced_value = 1337_u64; + let producer_calls = Arc::new(AtomicUsize::new(0)); + + let initial_calls = producer_calls.clone(); + let first = cache + .get_or_try_insert::<_, _, _, DapiError>(key, || { + let initial_calls = initial_calls.clone(); + async move { + initial_calls.fetch_add(1, Ordering::SeqCst); + Ok(produced_value) + } + }) + .await + .expect("value should be produced on first call"); + + assert_eq!(first, produced_value, "produced value must be returned"); + assert_eq!( + producer_calls.load(Ordering::SeqCst), + 1, + "producer should run exactly once on cache miss" + ); + + let cached = cache + .get::(&key) + .expect("value should be cached after first call"); + assert_eq!(cached, produced_value, "cached value must match producer"); + + let follow_up_calls = producer_calls.clone(); + let second = cache + .get_or_try_insert::<_, _, _, DapiError>(key, || { + let follow_up_calls = follow_up_calls.clone(); + async move { + follow_up_calls.fetch_add(10, Ordering::SeqCst); + Ok(produced_value + 1) + } + }) + .await + .expect("cached value should be returned on second call"); + + assert_eq!( + second, produced_value, + "second call must yield cached value rather than producer result" + ); + assert_eq!( + producer_calls.load(Ordering::SeqCst), + 1, + "producer should not run again when cache contains value" + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_or_try_insert_does_not_cache_errors() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::TRACE) + .with_test_writer() + .try_init(); + + let cache = + LruResponseCache::with_capacity("test_cache_errors", ESTIMATED_ENTRY_SIZE_BYTES); + let key = CacheKey::new("get_error", &"key"); + let producer_calls = Arc::new(AtomicUsize::new(0)); + + let failing_calls = producer_calls.clone(); + let first_attempt: Result = cache + .get_or_try_insert::(key, || { + let failing_calls = failing_calls.clone(); + async move { + failing_calls.fetch_add(1, Ordering::SeqCst); + Err(DapiError::invalid_data("boom")) + } + }) + .await; + + assert!( + first_attempt.is_err(), + "failed producer result should be returned to caller" + ); + assert_eq!( + producer_calls.load(Ordering::SeqCst), + 1, + "producer should run once even when it errors" + ); + assert!( + cache.get::(&key).is_none(), + "failed producer must not populate the cache" + ); + + let successful_calls = producer_calls.clone(); + let expected_value = 9001_u64; + let second_attempt = cache + .get_or_try_insert::(key, || { + let successful_calls = successful_calls.clone(); + async move { + successful_calls.fetch_add(1, Ordering::SeqCst); + Ok(expected_value) + } + }) + .await + .expect("second attempt should succeed and cache value"); + + assert_eq!( + second_attempt, expected_value, + "successful producer result should be returned" + ); + assert_eq!( + producer_calls.load(Ordering::SeqCst), + 2, + "producer should run again after an error because nothing was cached" + ); + let cached = cache + .get::(&key) + .expect("successful producer must populate cache"); + assert_eq!( + cached, expected_value, + "cached value should match successful producer output" + ); + } +} diff --git a/packages/rs-dapi/src/clients/core_client.rs b/packages/rs-dapi/src/clients/core_client.rs new file mode 100644 index 00000000000..70982f60e05 --- /dev/null +++ b/packages/rs-dapi/src/clients/core_client.rs @@ -0,0 +1,479 @@ +use crate::cache::{LruResponseCache, make_cache_key}; +use crate::clients::REQUEST_TIMEOUT; +use crate::error::MapToDapiResult; +use crate::{DAPIResult, DapiError}; +use dashcore_rpc::{self, Auth, Client, RpcApi, dashcore, jsonrpc}; +use std::any::type_name; +use std::sync::Arc; +use tokio::select; +use tokio::sync::{OwnedSemaphorePermit, Semaphore, mpsc}; +use tokio::time::timeout; + +use tracing::trace; +use zeroize::Zeroizing; + +const CORE_RPC_GUARD_PERMITS: usize = 2; +const PERMIT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10); +#[derive(Debug, Clone)] +pub struct CoreClient { + client: Arc, + cache: LruResponseCache, + access_guard: Arc, +} + +impl CoreClient { + /// Create a Core RPC client with caching and concurrency guards. + /// Wraps the dashcore RPC client and response cache. + pub fn new( + url: String, + user: String, + pass: Zeroizing, + cache_capacity_bytes: u64, + ) -> DAPIResult { + let client = Client::new(&url, Auth::UserPass(user, pass.to_string())) + .map_err(|e| DapiError::client(format!("Failed to create Core RPC client: {}", e)))?; + Ok(Self { + client: Arc::new(client), + cache: LruResponseCache::with_capacity("core_client", cache_capacity_bytes), + access_guard: Arc::new(CoreRpcAccessGuard::new(CORE_RPC_GUARD_PERMITS)), + }) + } + + /// Execute a blocking Core RPC call inside a limited concurrency pool. + async fn guarded_blocking_call(&self, op: F) -> Result, DapiError> + where + F: FnOnce(Arc) -> Result + Send + 'static, + R: Send + Sync + 'static, + E: Send + Sync + 'static, + { + let mut permit = Some(self.access_guard.acquire().await); + let client = self.client.clone(); + + let permit_deadline = tokio::time::Instant::now() + PERMIT_TIMEOUT; + + let (tx, mut rx) = mpsc::channel(1); + // let (abortable_fut, abort) = abortable(); + let task = tokio::task::spawn(timeout(REQUEST_TIMEOUT, async move { + let res = tokio::task::spawn_blocking(move || op(client)).await; + tx.send(res).await + })); + + let result = loop { + select! { + biased; + result = rx.recv() => {break result;} + + _ = tokio::time::sleep_until(permit_deadline) => { + tracing::debug!("Core RPC access guard permit wait timed out after {:?}, releasing permit", PERMIT_TIMEOUT); + drop(permit.take()); + } + } + }; + drop(permit); + // task should be done by now + task.abort(); + + result + .ok_or_else(|| { + DapiError::timeout(format!( + "Core RPC call of type {} did not complete", + type_name::() + )) + })? + .map_err(DapiError::TaskJoin) + } + + /// Retrieve the current block count from Dash Core as a `u32`. + pub async fn get_block_count(&self) -> DAPIResult { + trace!("Core RPC: get_block_count"); + let height = self + .guarded_blocking_call(|client| client.get_block_count()) + .await??; + + Ok(height as u32) + } + + /// Fetch verbose transaction metadata by txid hex string. + pub async fn get_transaction_info( + &self, + txid_hex: &str, + ) -> DAPIResult { + use std::str::FromStr; + trace!("Core RPC: get_raw_transaction_info"); + + let txid = dashcore_rpc::dashcore::Txid::from_str(txid_hex) + .map_err(|e| DapiError::InvalidArgument(format!("invalid txid: {}", e)))?; + let info = self + .guarded_blocking_call(move |client| client.get_raw_transaction_info(&txid, None)) + .await??; + Ok(info) + } + + /// Broadcast a raw transaction byte slice and return its txid hex string. + pub async fn send_raw_transaction(&self, raw: &[u8]) -> DAPIResult { + trace!("Core RPC: send_raw_transaction"); + let raw_vec = raw.to_vec(); + let txid = self + .guarded_blocking_call(move |client| client.send_raw_transaction(&raw_vec)) + .await??; + Ok(txid.to_string()) + } + + /// Fetches a block hash by its height. + /// Uses caching to avoid repeated calls for the same height. + pub async fn get_block_hash( + &self, + height: u32, + ) -> DAPIResult { + use std::str::FromStr; + trace!("Core RPC: get_block_hash"); + + let key = make_cache_key("get_block_hash", &height); + + let this = self.clone(); + + let bytes = self + .cache + .get_or_try_insert::<_, _, _, DapiError>(key, move || { + let this = this.clone(); + let target_height = height; + async move { + let hash = this + .guarded_blocking_call(move |client| client.get_block_hash(target_height)) + .await??; + Ok(hash.to_string().into_bytes()) + } + }) + .await?; + + let s = String::from_utf8(bytes.to_vec()) + .map_err(|e| DapiError::client(format!("invalid utf8 in cached hash: {}", e)))?; + let hash = dashcore_rpc::dashcore::BlockHash::from_str(&s) + .map_err(|e| DapiError::client(format!("invalid cached hash: {}", e)))?; + Ok(hash) + } + + /// Fetches and decodes a block by its hash. + /// Wrapper around `get_block_bytes_by_hash` that also decodes the block. + pub async fn get_block_by_hash( + &self, + hash: dashcore_rpc::dashcore::BlockHash, + ) -> DAPIResult { + trace!("Core RPC: get_block (bytes)"); + let block_bytes = self.get_block_bytes_by_hash(hash).await?; + + dashcore::consensus::encode::deserialize(&block_bytes).map_err(|e| { + DapiError::InvalidData(format!("Failed to decode block data from core: {e}")) + }) + } + + /// Fetches a block's raw bytes by its hash. + /// Uses caching to avoid repeated calls for the same hash. + pub async fn get_block_bytes_by_hash( + &self, + hash: dashcore_rpc::dashcore::BlockHash, + ) -> DAPIResult> { + trace!("Core RPC: get_block (bytes)"); + + // Use cache-or-populate with immutable key by hash + let key = make_cache_key("get_block_bytes_by_hash", &hash); + + let this = self.clone(); + let block = self + .cache + .get_or_try_insert::<_, _, _, DapiError>(key, move || { + let this = this.clone(); + let hash = hash; + async move { + // We use get_block_hex to workaround dashcore serialize/deserialize issues + // (eg. UnsupportedSegwitFlag(0), UnknownSpecialTransactionType(58385)) + let block_hex = this + .guarded_blocking_call(move |client| client.get_block_hex(&hash)) + .await??; + + hex::decode(&block_hex).map_err(|e| { + DapiError::InvalidData(format!( + "Failed to decode hex block data from core: {e}" + )) + }) + } + }) + .await?; + + Ok(block) + } + + /// Retrieve serialized block header bytes for the given block hash. + pub async fn get_block_header_bytes_by_hash( + &self, + hash: dashcore_rpc::dashcore::BlockHash, + ) -> DAPIResult> { + trace!("Core RPC: get_block_header"); + let header = self + .guarded_blocking_call(move |client| client.get_block_header(&hash)) + .await??; + + let bytes = dashcore::consensus::encode::serialize(&header); + Ok(bytes) + } + + /// Convenience helper decoding a hash hex string before fetching block bytes. + pub async fn get_block_bytes_by_hash_hex(&self, hash_hex: &str) -> DAPIResult> { + use std::str::FromStr; + if hash_hex.trim().is_empty() { + return Err(DapiError::InvalidArgument( + "hash is not specified".to_string(), + )); + } + + let hash = dashcore_rpc::dashcore::BlockHash::from_str(hash_hex) + .map_err(|e| DapiError::InvalidArgument(format!("invalid block hash: {}", e)))?; + self.get_block_bytes_by_hash(hash).await + } + + /// Fetch raw transactions (as bytes) for a block by hash without full block deserialization. + pub async fn get_block_transactions_bytes_by_hash( + &self, + hash: dashcore_rpc::dashcore::BlockHash, + ) -> DAPIResult>> { + trace!("Core RPC: get_block (verbosity=2) -> tx hex list"); + + // Use cache-or-populate with immutable key by hash + let key = make_cache_key("get_block_transactions_bytes_by_hash", &hash); + + let this = self.clone(); + let transactions = self + .cache + .get_or_try_insert::<_, _, _, DapiError>(key, move || { + let this = this.clone(); + let hash_hex = hash.to_string(); + async move { + let value: serde_json::Value = this + .guarded_blocking_call(move |client| { + let params = [ + serde_json::Value::String(hash_hex), + serde_json::Value::Number(serde_json::Number::from(2)), + ]; + client.call("getblock", ¶ms) + }) + .await??; + + let obj = value.as_object().ok_or_else(|| { + DapiError::invalid_data("getblock verbosity 2 did not return an object") + })?; + let txs_val = obj.get("tx").ok_or_else(|| { + DapiError::invalid_data("getblock verbosity 2 missing 'tx' field") + })?; + let arr = txs_val + .as_array() + .ok_or_else(|| DapiError::invalid_data("getblock 'tx' is not an array"))?; + + let mut out: Vec> = Vec::with_capacity(arr.len()); + for txv in arr.iter() { + if let Some(tx_obj) = txv.as_object() + && let Some(h) = tx_obj.get("hex").and_then(|v| v.as_str()) + { + let raw = hex::decode(h).map_err(|e| { + DapiError::invalid_data(format!("invalid tx hex: {}", e)) + })?; + out.push(raw); + continue; + } + return Err(DapiError::invalid_data( + "getblock verbosity 2 'tx' entries missing 'hex'", + )); + } + Ok(out) + } + }) + .await?; + + Ok(transactions) + } + + /// List txids currently present in the Core mempool. + pub async fn get_mempool_txids(&self) -> DAPIResult> { + trace!("Core RPC: get_raw_mempool"); + self.guarded_blocking_call(|client| client.get_raw_mempool()) + .await? + .to_dapi_result() + } + + /// Retrieve a raw transaction by txid, decoding it into a `Transaction`. + pub async fn get_raw_transaction( + &self, + txid: dashcore_rpc::dashcore::Txid, + ) -> DAPIResult { + trace!("Core RPC: get_raw_transaction"); + self.guarded_blocking_call(move |client| client.get_raw_transaction(&txid, None)) + .await? + .to_dapi_result() + } + + /// Fetches block header information by its hash. + /// Uses caching to avoid repeated calls for the same hash. + pub async fn get_block_header_info( + &self, + hash: &dashcore_rpc::dashcore::BlockHash, + ) -> DAPIResult { + trace!("Core RPC: get_block_header_info"); + + let key = make_cache_key("get_block_header_info", hash); + + let this = self.clone(); + let info = self + .cache + .get_or_try_insert::<_, _, _, DapiError>(key, move || { + let this = this.clone(); + let h = *hash; + async move { + let header = this + .guarded_blocking_call(move |client| client.get_block_header_info(&h)) + .await??; + let v = serde_json::to_vec(&header) + .map_err(|e| DapiError::client(format!("serialize header: {}", e)))?; + let parsed: dashcore_rpc::json::GetBlockHeaderResult = + serde_json::from_slice(&v) + .map_err(|e| DapiError::client(format!("deserialize header: {}", e)))?; + Ok(parsed) + } + }) + .await?; + + Ok(info) + } + + /// Obtain the latest ChainLock if available, tolerating Core's "not ready" response. + pub async fn get_best_chain_lock( + &self, + ) -> DAPIResult> { + trace!("Core RPC: get_best_chain_lock"); + match self + .guarded_blocking_call(|client| client.get_best_chain_lock()) + .await + { + Ok(Ok(chain_lock)) => Ok(Some(chain_lock)), + Ok(Err(dashcore_rpc::Error::JsonRpc(jsonrpc::Error::Rpc(rpc)))) + if rpc.code == -32603 => + { + // Dash Core returns -32603 when no chain lock is available yet + Ok(None) + } + Ok(Err(e)) => Err(DapiError::from(e)), + Err(e) => Err(e), + } + } + + /// Request a masternode list diff between two block hashes via `protx diff`. + pub async fn mn_list_diff( + &self, + base_block: &dashcore_rpc::dashcore::BlockHash, + block: &dashcore_rpc::dashcore::BlockHash, + ) -> DAPIResult { + trace!("Core RPC: protx diff"); + let base_hex = base_block.to_string(); + let block_hex = block.to_string(); + let diff = self + .guarded_blocking_call(move |client| { + let params = [ + serde_json::Value::String("diff".to_string()), + serde_json::Value::String(base_hex), + serde_json::Value::String(block_hex), + ]; + client.call("protx", ¶ms) + }) + .await??; + Ok(diff) + } + + /// Fetch general blockchain state information from Dash Core. + pub async fn get_blockchain_info( + &self, + ) -> DAPIResult { + trace!("Core RPC: get_blockchain_info"); + let info = self + .guarded_blocking_call(|client| client.get_blockchain_info()) + .await??; + Ok(info) + } + + /// Fetch network-level statistics and connection details from Dash Core. + pub async fn get_network_info(&self) -> DAPIResult { + trace!("Core RPC: get_network_info"); + let info = self + .guarded_blocking_call(|client| client.get_network_info()) + .await??; + Ok(info) + } + + /// Estimate the smart fee in Dash per KB for the target confirmation window. + pub async fn estimate_smart_fee_btc_per_kb(&self, blocks: u16) -> DAPIResult> { + trace!("Core RPC: estimatesmartfee"); + let result = self + .guarded_blocking_call(move |client| client.estimate_smart_fee(blocks, None)) + .await??; + Ok(result.fee_rate.map(|a| a.to_dash())) + } + + /// Query the local masternode status from Dash Core. + pub async fn get_masternode_status(&self) -> DAPIResult { + trace!("Core RPC: masternode status"); + let st = self + .guarded_blocking_call(|client| client.get_masternode_status()) + .await??; + Ok(st) + } + + /// Fetch the deterministic masternode synchronization status from Dash Core. + pub async fn mnsync_status(&self) -> DAPIResult { + trace!("Core RPC: mnsync status"); + let st = self + .guarded_blocking_call(|client| client.mnsync_status()) + .await??; + Ok(st) + } + + /// Retrieve the PoSe penalty score for the specified masternode ProTx hash. + pub async fn get_masternode_pos_penalty( + &self, + pro_tx_hash_hex: &str, + ) -> DAPIResult> { + use std::collections::HashMap; + trace!("Core RPC: masternode list (filter)"); + let filter = pro_tx_hash_hex.to_string(); + let map: HashMap = self + .guarded_blocking_call(move |client| { + client.get_masternode_list(Some("json"), Some(&filter)) + }) + .await??; + + // Find the entry matching the filter + if let Some((_k, v)) = map.into_iter().next() { + return Ok(Some(v.pos_penalty_score)); + } + Ok(None) + } +} + +#[derive(Debug)] +struct CoreRpcAccessGuard { + semaphore: Arc, +} + +impl CoreRpcAccessGuard { + /// Construct a semaphore-backed guard limiting concurrent Core RPC calls. + fn new(max_concurrent: usize) -> Self { + Self { + semaphore: Arc::new(Semaphore::new(max_concurrent.max(1))), + } + } + + /// Acquire a permit, ensuring at most `max_concurrent` active RPC requests. + async fn acquire(&self) -> OwnedSemaphorePermit { + self.semaphore + .clone() + .acquire_owned() + .await + .expect("Core RPC access guard semaphore not closed") + } +} diff --git a/packages/rs-dapi/src/clients/drive_client.rs b/packages/rs-dapi/src/clients/drive_client.rs new file mode 100644 index 00000000000..e3189f6dd09 --- /dev/null +++ b/packages/rs-dapi/src/clients/drive_client.rs @@ -0,0 +1,273 @@ +use std::sync::Arc; + +use dapi_grpc::drive::v0::drive_internal_client::DriveInternalClient; +use dapi_grpc::platform::v0::{GetStatusRequest, platform_client::PlatformClient}; +use serde::{Deserialize, Serialize}; + +use tower::ServiceBuilder; +use tower_http::{ + LatencyUnit, + trace::{ + DefaultMakeSpan, DefaultOnBodyChunk, DefaultOnEos, DefaultOnFailure, DefaultOnRequest, + DefaultOnResponse, Trace, TraceLayer, + }, +}; +use tracing::{Level, debug, error, info, trace, warn}; + +/// gRPC client factory for interacting with Dash Platform Drive +/// +/// ## Cloning +/// +/// This client is designed to be cloned cheaply. No need to use `Arc` or `Rc`. +#[derive(Clone)] +pub struct DriveClient { + client: PlatformClient, + internal_client: DriveInternalClient, + // base url stored as an Arc for faster cloning + base_url: Arc, +} + +impl std::fmt::Debug for DriveClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DriveClient") + .field("base_url", &self.base_url) + .finish() + } +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveStatusResponse { + pub version: Option, + pub chain: Option, + pub time: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveVersion { + pub software: Option, + pub protocol: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveSoftware { + pub drive: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveProtocol { + pub drive: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveProtocolVersion { + pub current: Option, + pub latest: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveChain { + #[serde(rename = "coreChainLockedHeight")] + pub core_chain_locked_height: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct DriveTime { + pub block: Option, + pub genesis: Option, + pub epoch: Option, +} + +pub type DriveChannel = Trace< + tonic::transport::Channel, + tower_http::classify::SharedClassifier, + DefaultMakeSpan, + DefaultOnRequest, + DefaultOnResponse, + DefaultOnBodyChunk, +>; + +impl DriveClient { + /// Create a new DriveClient with gRPC request tracing and connection reuse. + /// + /// This method attempts to validate the connection by making a test gRPC call to ensure + /// the Drive service is reachable and responding correctly. If the Drive + /// service cannot be reached, the error is logged and the client is still returned so the + /// caller can operate in a degraded mode while health checks surface the issue. + pub async fn new(uri: &str) -> Result { + info!("Creating Drive client for: {}", uri); + let channel = Self::create_channel(uri)?; + + // Configure clients with larger message sizes. + // Compression (gzip) is intentionally DISABLED at rs-dapi level; Envoy handles it. + info!("Drive client compression: disabled (handled by Envoy)"); + const MAX_DECODING_BYTES: usize = 64 * 1024 * 1024; // 64 MiB + const MAX_ENCODING_BYTES: usize = 32 * 1024 * 1024; // 32 MiB + + let client = Self { + base_url: Arc::new(uri.to_string()), + client: PlatformClient::new(channel.clone()) + .max_decoding_message_size(MAX_DECODING_BYTES) + .max_encoding_message_size(MAX_ENCODING_BYTES), + internal_client: DriveInternalClient::new(channel.clone()) + .max_decoding_message_size(MAX_DECODING_BYTES) + .max_encoding_message_size(MAX_ENCODING_BYTES), + }; + + // Validate connection by making a test status call; log warnings but allow degraded operation. + trace!("Validating Drive connection at: {}", uri); + let test_request = GetStatusRequest { version: None }; + match client.get_drive_status(&test_request).await { + Ok(_) => { + debug!("Drive connection validated successfully"); + } + Err(e) => { + warn!( + error = %e, + "Failed to validate Drive connection; continuing with degraded health" + ); + } + } + + Ok(client) + } + + /// Build a traced gRPC channel to Drive with error normalization. + fn create_channel(uri: &str) -> Result { + let endpoint = dapi_grpc::tonic::transport::Endpoint::from_shared(uri.to_string()) + .map_err(|e| { + error!("Invalid Drive service URI {}: {}", uri, e); + tonic::Status::invalid_argument(format!("Invalid URI: {}", e)) + })?; + + let raw_channel = endpoint.connect_lazy(); + + let channel: Trace< + tonic::transport::Channel, + tower_http::classify::SharedClassifier, + DefaultMakeSpan, + DefaultOnRequest, + DefaultOnResponse, + DefaultOnBodyChunk, + > = ServiceBuilder::new() + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().include_headers(true)) + .on_request(DefaultOnRequest::new().level(Level::TRACE)) + .on_response( + DefaultOnResponse::new() + .level(Level::INFO) + .latency_unit(LatencyUnit::Micros), + ) + .on_failure(DefaultOnFailure::new().level(Level::WARN)) + .on_eos(DefaultOnEos::new().level(Level::DEBUG)) + .on_body_chunk(DefaultOnBodyChunk::new()), + ) + .service(raw_channel); + + Ok(channel) + } + + /// Call the Drive `getStatus` endpoint and map the response into simplified structs. + pub async fn get_drive_status( + &self, + request: &GetStatusRequest, + ) -> Result { + trace!("Making get_status gRPC call to Drive"); + // Make gRPC call to Drive with timing + let drive_response = self.get_client().get_status(*request).await?.into_inner(); + + // Convert Drive's GetStatusResponse to our DriveStatusResponse format + if let Some(dapi_grpc::platform::v0::get_status_response::Version::V0(v0)) = + drive_response.version + { + let mut drive_status = DriveStatusResponse::default(); + + // Extract version information + if let Some(version) = v0.version { + let mut drive_version = DriveVersion::default(); + + if let Some(software) = version.software { + drive_version.software = Some(DriveSoftware { + drive: software.drive, + }); + } + + if let Some(protocol) = version.protocol + && let Some(drive_proto) = protocol.drive + { + drive_version.protocol = Some(DriveProtocol { + drive: Some(DriveProtocolVersion { + current: Some(drive_proto.current as u64), + latest: Some(drive_proto.latest as u64), + }), + }); + } + + drive_status.version = Some(drive_version); + } + + // Extract chain information + if let Some(chain) = v0.chain { + drive_status.chain = Some(DriveChain { + core_chain_locked_height: chain.core_chain_locked_height.map(|h| h as u64), + }); + } + + // Extract time information + if let Some(time) = v0.time { + drive_status.time = Some(DriveTime { + block: Some(time.local), + genesis: time.genesis, + epoch: time.epoch.map(|e| e as u64), + }); + } + + Ok(drive_status) + } else { + Err(tonic::Status::internal( + "Drive returned unexpected response format", + )) + } + } + + /// Return a clone of the public Platform gRPC client. + pub fn get_client(&self) -> PlatformClient { + self.client.clone() + } + + /// Return a clone of the internal Drive gRPC client. + pub fn get_internal_client(&self) -> DriveInternalClient { + self.internal_client.clone() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_drive_client_tracing_integration() { + // Test that DriveClient can be created with tracing interceptor + // Note: This should succeed even if no server is running; connectivity validation logs a warning. + match DriveClient::new("http://localhost:1443").await { + Ok(client) => { + // If connection succeeds, verify the structure + assert_eq!(client.base_url.to_string(), "http://localhost:1443"); + } + Err(_) => { + // Expected when no server is running - this is okay for unit tests + // The important thing is that the method signature and error handling work + } + } + + // Note: In a real integration test with a running Drive instance, + // you would see tracing logs like: + // [TRACE] Sending gRPC request + // [TRACE] gRPC request successful (status: OK, duration: 45ms) + // + // The interceptor and log_grpc_result function automatically log: + // - Request method and timing + // - Response status and duration + // - Error classification (technical vs service errors) + } +} diff --git a/packages/rs-dapi/src/clients/mod.rs b/packages/rs-dapi/src/clients/mod.rs new file mode 100644 index 00000000000..bc538be8ef4 --- /dev/null +++ b/packages/rs-dapi/src/clients/mod.rs @@ -0,0 +1,16 @@ +pub mod core_client; +pub mod drive_client; +pub mod tenderdash_client; +pub mod tenderdash_websocket; + +use std::time::Duration; + +pub use core_client::CoreClient; +pub use drive_client::DriveClient; +pub use tenderdash_client::TenderdashClient; +pub use tenderdash_websocket::{TenderdashWebSocketClient, TransactionEvent, TransactionResult}; + +/// Default timeout for all Tenderdash HTTP requests +const REQUEST_TIMEOUT: Duration = Duration::from_secs(30); +/// Connection timeout for establishing HTTP connections; as we do local, 1s is enough +const CONNECT_TIMEOUT: Duration = Duration::from_secs(1); diff --git a/packages/rs-dapi/src/clients/tenderdash_client.rs b/packages/rs-dapi/src/clients/tenderdash_client.rs new file mode 100644 index 00000000000..2ab491ee5e9 --- /dev/null +++ b/packages/rs-dapi/src/clients/tenderdash_client.rs @@ -0,0 +1,528 @@ +use super::tenderdash_websocket::{TenderdashWebSocketClient, TransactionEvent}; +use crate::clients::tenderdash_websocket::BlockEvent; +use crate::clients::{CONNECT_TIMEOUT, REQUEST_TIMEOUT}; +use crate::error::{DAPIResult, DapiError}; +use crate::utils::{ + deserialize_string_number_or_null, deserialize_string_or_number, generate_jsonrpc_id, +}; +use reqwest::Client; +use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; +use reqwest_tracing::TracingMiddleware; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use std::fmt::Debug; +use std::sync::Arc; +use tokio::sync::broadcast; +use tracing::{debug, error, info, trace, warn}; + +#[derive(Debug, Clone)] +/// HTTP client for interacting with Tenderdash consensus engine +/// +/// This client includes automatic HTTP request/response tracing via reqwest-tracing middleware. +/// All HTTP requests will be logged at TRACE level with: +/// - Request method, URL, and headers +/// - Response status code, timing, and size +/// - Error details for failed requests +/// +/// Error handling follows client-layer architecture: +/// - Technical failures (network errors, timeouts) are logged with `tracing::error!` +/// - Service errors (HTTP error codes) are logged with `tracing::debug!` +pub struct TenderdashClient { + client: ClientWithMiddleware, + base_url: String, + websocket_client: Arc, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TenderdashResponse { + pub jsonrpc: String, + pub id: Value, + pub result: Option, + pub error: Option, +} + +#[derive(Debug, Serialize)] +struct JsonRpcRequest { + jsonrpc: &'static str, + method: &'static str, + params: T, + id: String, +} + +impl JsonRpcRequest { + fn new(method: &'static str, params: T) -> Self { + Self { + jsonrpc: "2.0", + method, + params, + id: generate_jsonrpc_id(), + } + } +} + +#[derive(Debug, Serialize, Default)] +struct EmptyParams {} + +#[derive(Debug, Serialize)] +struct BroadcastTxParams<'a> { + tx: &'a str, +} + +#[derive(Debug, Serialize)] +struct CheckTxParams<'a> { + tx: &'a str, +} + +#[derive(Debug, Serialize)] +struct UnconfirmedTxParams<'a> { + hash: &'a str, +} + +#[derive(Debug, Serialize)] +struct TxParams<'a> { + hash: &'a str, + prove: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ResultStatus { + #[serde(default)] + pub node_info: NodeInfo, + #[serde(default)] + pub application_info: ApplicationInfo, + #[serde(default)] + pub sync_info: SyncInfo, + #[serde(default)] + pub validator_info: ValidatorInfo, + #[serde(default)] + pub light_client_info: Value, +} + +pub type TenderdashStatusResponse = ResultStatus; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ApplicationInfo { + #[serde(default)] + pub version: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct NodeInfo { + #[serde(default)] + pub protocol_version: ProtocolVersion, + #[serde(default)] + pub id: String, + #[serde(default)] + pub listen_addr: String, + #[serde(rename = "ProTxHash", default)] + pub pro_tx_hash: String, + #[serde(default)] + pub network: String, + #[serde(default)] + pub version: String, + #[serde(default)] + pub channels: Vec, + #[serde(default)] + pub moniker: String, + #[serde(default)] + pub other: NodeInfoOther, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct NodeInfoOther { + #[serde(default)] + pub tx_index: String, + #[serde(default)] + pub rpc_address: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ProtocolVersion { + #[serde(default)] + pub p2p: String, + #[serde(default)] + pub block: String, + #[serde(default)] + pub app: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct SyncInfo { + #[serde(default)] + pub latest_block_hash: String, + #[serde(default)] + pub latest_app_hash: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub latest_block_height: i64, + #[serde(default)] + pub latest_block_time: String, + #[serde(default)] + pub earliest_block_hash: String, + #[serde(default)] + pub earliest_app_hash: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub earliest_block_height: i64, + #[serde(default)] + pub earliest_block_time: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub max_peer_block_height: i64, + #[serde(default)] + pub catching_up: bool, + #[serde(default)] + pub total_synced_time: String, + #[serde(default)] + pub remaining_time: String, + #[serde(default)] + pub total_snapshots: String, + #[serde(default)] + pub chunk_process_avg_time: String, + #[serde(default)] + pub snapshot_height: String, + #[serde(default)] + pub snapshot_chunks_count: String, + #[serde(rename = "backfilled_blocks", default)] + pub backfilled_blocks: String, + #[serde(rename = "backfill_blocks_total", default)] + pub backfill_blocks_total: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ValidatorInfo { + #[serde(default)] + pub pro_tx_hash: String, + #[serde(default)] + pub voting_power: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ResultNetInfo { + #[serde(default)] + pub listening: bool, + #[serde(default)] + pub listeners: Vec, + #[serde( + rename = "n_peers", + default, + deserialize_with = "deserialize_string_or_number" + )] + pub n_peers: u32, + #[serde(default)] + pub peers: Vec, +} + +pub type NetInfoResponse = ResultNetInfo; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct Peer { + #[serde(rename = "node_id", default)] + pub node_id: String, + #[serde(default)] + pub url: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ResultBroadcastTx { + #[serde(default)] + pub code: u32, + #[serde(default)] + pub data: String, + #[serde(default)] + pub codespace: String, + #[serde(default)] + pub hash: String, + #[serde(default)] + pub info: String, +} + +pub type BroadcastTxResponse = ResultBroadcastTx; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ResultCheckTx { + #[serde(default)] + pub code: u32, + #[serde(default)] + pub data: String, + #[serde(default)] + pub info: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub gas_wanted: i64, + #[serde(default)] + pub codespace: String, + #[serde(default)] + pub sender: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub priority: i64, +} + +pub type CheckTxResponse = ResultCheckTx; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ResultUnconfirmedTx { + #[serde(default)] + pub tx: String, +} + +pub type UnconfirmedTxResponse = ResultUnconfirmedTx; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ResultTx { + #[serde(default)] + pub hash: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub height: i64, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub index: u32, + #[serde(rename = "tx_result", default)] + pub tx_result: ExecTxResult, + #[serde(default)] + pub tx: String, + #[serde(default)] + pub proof: Value, +} + +pub type TxResponse = ResultTx; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ExecTxResult { + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub code: u32, + #[serde(default, deserialize_with = "deserialize_string_number_or_null")] + pub data: String, + #[serde(default, deserialize_with = "deserialize_string_number_or_null")] + pub info: String, + #[serde(default, deserialize_with = "deserialize_string_number_or_null")] + pub log: String, + #[serde(default, deserialize_with = "deserialize_string_or_number")] + pub gas_used: i64, + #[serde(default, deserialize_with = "deserialize_string_number_or_null")] + pub codespace: String, + #[serde(default)] + pub events: Vec, +} + +impl ExecTxResult { + /// Check if all fields are at their default values. Useful to detect absent results. + pub fn is_empty(&self) -> bool { + self.code == 0 + && self.data.is_empty() + && self.info.is_empty() + && self.log.is_empty() + && self.gas_used == 0 + && self.codespace.is_empty() + && self.events.is_empty() + } +} + +pub type TxResult = ExecTxResult; + +impl TenderdashClient { + /// Generic POST method for Tenderdash RPC calls + /// Serializes the request, performs the call, and maps protocol errors to `DapiError`. + async fn post(&self, request: &R) -> DAPIResult + where + T: serde::de::DeserializeOwned + Debug, + R: Serialize + Debug, + { + let start = tokio::time::Instant::now(); + + let response_body = self + .client + .post(&self.base_url) + .header("Content-Type", "application/json") + .json(request) + .send() + .await + .map_err(|e| { + error!( + "Failed to send request to Tenderdash at {}: {}", + self.base_url, e + ); + DapiError::Client(format!("Failed to send request: {}", e)) + })? + .text() + .await + .map_err(|e| { + error!("Failed to read Tenderdash response body: {}", e); + DapiError::Client(format!("Failed to read response body: {}", e)) + })?; + + let response: TenderdashResponse = + serde_json::from_str(&response_body).map_err(|e| { + error!( + "Failed to parse Tenderdash response: {}; full body: {}", + e, response_body + ); + DapiError::Client(format!("Failed to parse response: {}", e)) + })?; + + tracing::trace!( + elapsed = ?start.elapsed(), + request = ?request, + response = ?response, + "tenderdash_client request executed"); + + if let Some(error) = response.error { + debug!("Tenderdash RPC returned error: {}", error); + return Err(DapiError::from_tenderdash_error(error)); + } + + response.result.ok_or_else(|| { + DapiError::Client("Tenderdash response missing result field".to_string()) + }) + } + + /// Create a new TenderdashClient with HTTP and WebSocket support. + /// + /// This method attempts to validate both HTTP and WebSocket connectivity before returning. + /// If either check fails, the error is logged and the client is still returned so callers can + /// operate in a degraded state while health checks surface the issue. + pub async fn new(uri: &str, ws_uri: &str) -> DAPIResult { + trace!( + uri = %uri, + ws_uri = %ws_uri, + "Creating Tenderdash client with WebSocket support" + ); + + let http_client = Client::builder() + .connect_timeout(CONNECT_TIMEOUT) + .timeout(REQUEST_TIMEOUT) + .build() + .map_err(|e| { + error!("Failed to build Tenderdash HTTP client: {}", e); + DapiError::Client(format!("Failed to build Tenderdash HTTP client: {}", e)) + })?; + + let client = ClientBuilder::new(http_client) + .with(TracingMiddleware::default()) + .build(); + + let websocket_client = Arc::new(TenderdashWebSocketClient::new(ws_uri.to_string(), 256)); + + let tenderdash_client = Self { + client, + base_url: uri.to_string(), + websocket_client: websocket_client.clone(), + }; + + if let Err(e) = tenderdash_client.validate_connection().await { + warn!( + error = %e, + "Tenderdash HTTP connection validation failed; continuing with degraded health" + ); + } + + if let Err(e) = TenderdashWebSocketClient::test_connection(ws_uri).await { + warn!( + error = %e, + "Tenderdash WebSocket connection validation failed; continuing with retries" + ); + } + + Ok(tenderdash_client) + } + + /// Perform a lightweight status call to ensure the Tenderdash HTTP endpoint is reachable. + async fn validate_connection(&self) -> DAPIResult<()> { + // Validate HTTP connection by making a test status call + trace!( + "Validating Tenderdash HTTP connection at: {}", + self.base_url + ); + match self.status().await { + Ok(_) => { + info!("Tenderdash HTTP connection validated successfully"); + Ok(()) + } + Err(e) => Err(DapiError::server_unavailable( + self.base_url.clone(), + e.to_string(), + )), + } + } + + /// Query Tenderdash for node and sync status information via JSON-RPC `status`. + pub async fn status(&self) -> DAPIResult { + trace!("Making status request to Tenderdash at: {}", self.base_url); + let request = JsonRpcRequest::new("status", EmptyParams::default()); + + self.post(&request).await + } + + /// Retrieve network peer statistics, falling back to defaults on transport errors. + pub async fn net_info(&self) -> DAPIResult { + match self.net_info_internal().await { + Ok(netinfo) => { + trace!("Successfully retrieved Tenderdash net_info"); + Ok(netinfo) + } + Err(e) => { + error!( + error = ?e, + "Failed to get Tenderdash net_info - technical failure, returning defaults" + ); + Ok(NetInfoResponse::default()) + } + } + } + + /// Internal helper that performs the `net_info` RPC call without error masking. + async fn net_info_internal(&self) -> DAPIResult { + let request = JsonRpcRequest::new("net_info", EmptyParams::default()); + + self.post(&request).await + } + + /// Broadcast a transaction to the Tenderdash network + pub async fn broadcast_tx(&self, tx: String) -> DAPIResult { + trace!("Broadcasting transaction to Tenderdash: {} bytes", tx.len()); + let params = BroadcastTxParams { tx: tx.as_str() }; + let request = JsonRpcRequest::new("broadcast_tx_sync", params); + + self.post(&request).await + } + + /// Check a transaction without adding it to the mempool + pub async fn check_tx(&self, tx: String) -> DAPIResult { + let params = CheckTxParams { tx: tx.as_str() }; + let request = JsonRpcRequest::new("check_tx", params); + + self.post(&request).await + } + + /// Get a single unconfirmed transaction by its hash + pub async fn unconfirmed_tx(&self, hash: &str) -> DAPIResult { + let params = UnconfirmedTxParams { hash }; + let request = JsonRpcRequest::new("unconfirmed_tx", params); + + self.post(&request).await + } + + /// Get transaction by hash + pub async fn tx(&self, hash: String) -> DAPIResult { + let params = TxParams { + hash: hash.as_str(), + prove: false, + }; + let request = JsonRpcRequest::new("tx", params); + + self.post(&request).await + } + /// Subscribe to streaming Tenderdash transaction events if WebSocket is available. + pub fn subscribe_to_transactions(&self) -> broadcast::Receiver { + self.websocket_client.subscribe() + } + /// Subscribe to block events from Tenderdash via WebSocket. + pub fn subscribe_to_blocks(&self) -> broadcast::Receiver { + self.websocket_client.subscribe_blocks() + } + + /// Return whether the internal WebSocket client currently maintains a connection. + pub fn is_websocket_connected(&self) -> bool { + self.websocket_client.is_connected() + } + + /// Return a clone of the underlying WebSocket client to allow shared listeners. + pub fn websocket_client(&self) -> Arc { + self.websocket_client.clone() + } +} diff --git a/packages/rs-dapi/src/clients/tenderdash_websocket.rs b/packages/rs-dapi/src/clients/tenderdash_websocket.rs new file mode 100644 index 00000000000..be6f761cc5d --- /dev/null +++ b/packages/rs-dapi/src/clients/tenderdash_websocket.rs @@ -0,0 +1,750 @@ +use crate::{ + DAPIResult, DapiError, + clients::{CONNECT_TIMEOUT, tenderdash_client::ExecTxResult}, + utils::{deserialize_string_or_number, deserialize_to_string, generate_jsonrpc_id}, +}; +use futures::{SinkExt, StreamExt}; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeSet; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; +use tokio::{sync::broadcast, time::timeout}; +use tokio_tungstenite::{connect_async, tungstenite::Message}; +use tracing::{debug, error, info, trace, warn}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TransactionEvent { + pub hash: String, + pub height: u64, + pub result: TransactionResult, + pub tx: Option>, +} + +/// Block event placeholder (TODO) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockEvent {} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TransactionResult { + Success, + Error { + code: u32, + info: String, + data: Option, + }, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct TenderdashWsMessage { + jsonrpc: String, + id: Option, + result: Option, + error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct EventData { + #[serde(rename = "type")] + event_type: String, + value: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct TxEvent { + #[serde(deserialize_with = "deserialize_string_or_number")] + height: u64, + tx: Option, + result: Option, + events: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct EventAttribute { + key: String, + #[serde(deserialize_with = "deserialize_to_string")] + value: String, +} + +#[derive(Debug)] +pub struct TenderdashWebSocketClient { + ws_url: String, + event_sender: broadcast::Sender, + is_connected: Arc, + block_sender: broadcast::Sender, +} + +impl TenderdashWebSocketClient { + /// Create a WebSocket client with broadcast channels for transactions and blocks. + pub fn new(ws_url: String, buffer_size: usize) -> Self { + let (event_sender, _) = broadcast::channel(buffer_size); + let (block_sender, _) = broadcast::channel(buffer_size); + + Self { + ws_url, + event_sender, + is_connected: Arc::new(AtomicBool::new(false)), + block_sender, + } + } + + /// Subscribe to transaction event updates emitted by the listener. + pub fn subscribe(&self) -> broadcast::Receiver { + self.event_sender.subscribe() + } + + /// Indicate whether a WebSocket connection is currently active. + pub fn is_connected(&self) -> bool { + self.is_connected.load(Ordering::Relaxed) + } + + /// Subscribe to Tenderdash new-block notifications. + pub fn subscribe_blocks(&self) -> broadcast::Receiver { + self.block_sender.subscribe() + } + + /// Test WebSocket connection without establishing a persistent connection + pub async fn test_connection(ws_url: &str) -> DAPIResult<()> { + tracing::trace!("Testing WebSocket connection to {}", ws_url); + + // Validate URL format + let _url = url::Url::parse(ws_url)?; + + // Try to connect + let (mut ws_stream, _) = timeout(CONNECT_TIMEOUT, connect_async(ws_url)) + .await + .map_err(|e| { + DapiError::timeout(format!("WebSocket connection test timed out: {e}")) + })??; + + ws_stream + .close(None) + .await + .map_err(|e| DapiError::Client(format!("WebSocket connection close failed: {e}")))?; + tracing::trace!("WebSocket connection test successful"); + Ok(()) + } + + /// Establish a WebSocket connection, subscribe to events, and forward messages to subscribers. + pub async fn connect_and_listen(&self) -> DAPIResult<()> { + tracing::trace!(ws_url = self.ws_url, "Connecting to Tenderdash WebSocket"); + + // Validate URL format + let _url = url::Url::parse(&self.ws_url)?; + let (ws_stream, _) = timeout(CONNECT_TIMEOUT, connect_async(&self.ws_url)) + .await + .map_err(|e| DapiError::timeout(format!("WebSocket connect timed out: {e}")))??; + + self.is_connected.store(true, Ordering::Relaxed); + tracing::debug!(ws_url = self.ws_url, "Connected to Tenderdash WebSocket"); + + let (mut ws_sender, mut ws_receiver) = ws_stream.split(); + + // Subscribe to transaction events + let subscribe_msg = serde_json::json!({ + "jsonrpc": "2.0", + "method": "subscribe", + "id": generate_jsonrpc_id(), + "params": { + "query": "tm.event = 'Tx'" + } + }); + + ws_sender + .send(Message::Text(subscribe_msg.to_string())) + .await?; + + // Subscribe to new block events + let subscribe_block_msg = serde_json::json!({ + "jsonrpc": "2.0", + "method": "subscribe", + "id": generate_jsonrpc_id(), + "params": { + "query": "tm.event = 'NewBlock'" + } + }); + ws_sender + .send(Message::Text(subscribe_block_msg.to_string())) + .await?; + + debug!("Subscribed to Tenderdash transaction events"); + + let event_sender = self.event_sender.clone(); + let is_connected = Arc::clone(&self.is_connected); + + // Listen for messages + while let Some(msg) = ws_receiver.next().await { + match msg { + Ok(Message::Text(text)) => { + if let Err(e) = self.handle_message(&text, &event_sender).await { + warn!("Failed to handle WebSocket message: {}", e); + } + } + Ok(Message::Close(_)) => { + info!("WebSocket connection closed"); + break; + } + Err(e) => { + error!("WebSocket error: {}", e); + break; + } + _ => { + // Ignore other message types (ping, pong, binary) + } + } + } + + is_connected.store(false, Ordering::Relaxed); + info!("Disconnected from Tenderdash WebSocket"); + + Ok(()) + } + + /// Process a raw WebSocket message, dispatching block and transaction events. + async fn handle_message( + &self, + message: &str, + event_sender: &broadcast::Sender, + ) -> DAPIResult<()> { + let ws_message: TenderdashWsMessage = serde_json::from_str(message).inspect_err(|e| { + debug!( + "Failed to parse WebSocket message as TenderdashWsMessage: {}", + e + ); + trace!("Raw message: {}", message); + })?; + + // Skip subscription confirmations and other non-event messages + if ws_message.result.is_none() { + return Ok(()); + } + + let result = ws_message.result.unwrap(); + + // NewBlock notifications include a query matching NewBlock + if let Some(query) = result.get("query").and_then(|q| q.as_str()) + && query.contains("NewBlock") + { + let _ = self.block_sender.send(BlockEvent {}); + return Ok(()); + } + + // Check if this is a tx event message + if result.get("events").is_some() + && let Some(data) = result.get("data") + && let Some(value) = data.get("value") + { + return self.handle_tx_event(value, event_sender, &result).await; + } + + Ok(()) + } + + /// Convert a Tenderdash transaction event payload into broadcastable events. + async fn handle_tx_event( + &self, + event_data: &serde_json::Value, + event_sender: &broadcast::Sender, + outer_result: &serde_json::Value, + ) -> DAPIResult<()> { + let tx_event: TxEvent = serde_json::from_value(event_data.clone())?; + + // Extract all transaction hashes from events + let hashes = self.extract_all_tx_hashes(&tx_event.events, outer_result)?; + + if hashes.is_empty() { + warn!( + ?tx_event, + "No transaction hashes found in event attributes for event.", + ); + return Err(DapiError::TransactionHashNotFound); + } + + // Log if we found multiple hashes (unusual case) + if hashes.len() > 1 { + warn!( + "Found {} transaction hashes in single WebSocket message: {:?}", + hashes.len(), + hashes + ); + } + + // Process each hash (typically just one) + for hash in hashes { + let height = tx_event.height; + + // Decode transaction if present + let tx: Option> = if let Some(tx_base64) = &tx_event.tx { + Some(base64::prelude::Engine::decode( + &base64::prelude::BASE64_STANDARD, + tx_base64, + )?) + } else { + None + }; + + // Determine transaction result + let result = if let Some(tx_result) = &tx_event.result { + if tx_result.code == 0 { + TransactionResult::Success + } else { + TransactionResult::Error { + code: tx_result.code, + info: tx_result.info.clone(), + data: if tx_result.data.is_empty() { + None + } else { + Some(tx_result.data.clone()) + }, + } + } + } else { + TransactionResult::Success + }; + + let transaction_event = TransactionEvent { + hash: hash.clone(), + height, + result: result.clone(), + tx: tx.clone(), + }; + + debug!(hash = %hash, "Broadcasting transaction event for hash"); + + // Broadcast the event (ignore if no subscribers) + let _ = event_sender.send(transaction_event); + } + + Ok(()) + } + + /// Gather unique transaction hashes from outer and inner event attribute sets. + fn extract_all_tx_hashes( + &self, + inner_events: &Option>, + outer_result: &serde_json::Value, + ) -> DAPIResult> { + let mut hashes = Vec::new(); + + // First extract from outer events (result.events) - this is the primary location + if let Some(outer_events) = outer_result.get("events").and_then(|e| e.as_array()) { + for event in outer_events { + if let Some(event_type) = event.get("type").and_then(|t| t.as_str()) + && event_type == "tx" + && let Some(attributes) = event.get("attributes").and_then(|a| a.as_array()) + { + for attr in attributes { + if let (Some(key), Some(value)) = ( + attr.get("key").and_then(|k| k.as_str()), + attr.get("value").and_then(|v| v.as_str()), + ) && key == "hash" + { + hashes.push(normalize_event_hash(value)); + } + } + } + } + } + + // Also check inner events (TxEvent.events) as fallback + if let Some(events) = inner_events { + for event in events { + if event.key == "hash" { + hashes.push(normalize_event_hash(&event.value)); + } + } + } + + // Remove duplicates while preserving order efficiently + let mut seen = BTreeSet::new(); + let unique_hashes: Vec = hashes + .into_iter() + .filter(|hash| seen.insert(hash.clone())) + .collect(); + + Ok(unique_hashes) + } +} + +/// Normalize hash strings by trimming prefixes and uppercasing hexadecimal characters. +fn normalize_event_hash(value: &str) -> String { + let trimmed = value.trim(); + let without_prefix = trimmed + .strip_prefix("0x") + .or_else(|| trimmed.strip_prefix("0X")) + .unwrap_or(trimmed); + + if without_prefix.chars().all(|c| c.is_ascii_hexdigit()) { + without_prefix.to_uppercase() + } else { + without_prefix.to_string() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_tx_event_deserialization_with_string_height() { + let json_data = json!({ + "height": "12345", + "tx": "dGVzdA==", + "result": { + "code": 0, + "data": null, + "info": "", + "log": "" + }, + "events": [] + }); + + let tx_event: TxEvent = serde_json::from_value(json_data).unwrap(); + assert_eq!(tx_event.height, 12345); + } + + #[test] + fn test_tx_event_deserialization_with_integer_height() { + let json_data = json!({ + "height": 12345, + "tx": "dGVzdA==", + "result": { + "code": 0, + "data": null, + "info": "", + "log": "" + }, + "events": [] + }); + + let tx_event: TxEvent = serde_json::from_value(json_data).unwrap(); + assert_eq!(tx_event.height, 12345); + } + + #[test] + fn test_tx_result_deserialization_with_string_code() { + let json_data = json!({ + "code": "1005", + "data": null, + "info": "test error", + "log": "" + }); + + let tx_result: ExecTxResult = serde_json::from_value(json_data).unwrap(); + assert_eq!(tx_result.code, 1005); + } + + #[test] + fn test_tx_result_deserialization_with_integer_code() { + let json_data = json!({ + "code": 1005, + "data": null, + "info": "test error", + "log": "" + }); + + let tx_result: ExecTxResult = serde_json::from_value(json_data).unwrap(); + assert_eq!(tx_result.code, 1005); + } + + #[test] + fn test_tx_result_deserialization_with_missing_code() { + let json_data = json!({ + "gas_used": 905760, + "data": null, + "info": "", + "log": "" + }); + + let tx_result: ExecTxResult = serde_json::from_value(json_data).unwrap(); + assert_eq!(tx_result.code, 0); // Should default to 0 (success) + } + + #[test] + fn test_real_websocket_message_deserialization() { + // This is the actual WebSocket message that was causing the "missing field `code`" error + let json_data = json!({ + "height": 1087, + "tx": "BwBKtJbhBYdn6SJx+oezzOb0KjQAhV2vh0pXlAsN3u0soZ1vsfjXvOK0TA6z9UnzQoIRj2entd3N2XUQ8qmFOYML/DuaygABAANBIIBqaHzVMKT/AvClrEuKY6/kwgtQmZmaOGSOrLqGEhrBVf62e/mcTkqIrUruBQ/xdtxDYs0tj/32zt+yVTJH7j8=", + "result": { + "gas_used": 905760 + // Note: no "code" field - should default to 0 + }, + "events": [ + { + "key": "hash", + "value": "13F2EF4097320B234DECCEF063FDAE6A0845AF4380CEC15F2185CE9FACC6EBD5" + }, + { + "key": "height", + "value": "1087" + } + ] + }); + + let tx_event: TxEvent = serde_json::from_value(json_data).unwrap(); + + // Verify all fields are correctly deserialized + assert_eq!(tx_event.height, 1087); + assert!(tx_event.tx.is_some()); + + // Verify the result has default code of 0 (success) + let result = tx_event.result.unwrap(); + assert_eq!(result.code, 0); + + // Verify events are correctly parsed + let events = tx_event.events.unwrap(); + assert_eq!(events.len(), 2); + assert_eq!(events[0].key, "hash"); + assert_eq!( + events[0].value, + "13F2EF4097320B234DECCEF063FDAE6A0845AF4380CEC15F2185CE9FACC6EBD5" + ); + assert_eq!(events[1].key, "height"); + assert_eq!(events[1].value, "1087"); // String conversion of integer value + } + + #[test] + fn test_full_websocket_message_deserialization() { + // This is the complete WebSocket message that was failing, including the outer JSON-RPC wrapper + let full_message = r#"{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "subscription_id": "", + "query": "tm.event = 'Tx'", + "data": { + "type": "tendermint/event/Tx", + "value": { + "height": 1087, + "tx": "BwBKtJbhBYdn6SJx+oezzOb0KjQAhV2vh0pXlAsN3u0soZ1vsfjXvOK0TA6z9UnzQoIRj2entd3N2XUQ8qmFOYML/DuaygABAANBIIBqaHzVMKT/AvClrEuKY6/kwgtQmZmaOGSOrLqGEhrBVf62e/mcTkqIrUruBQ/xdtxDYs0tj/32zt+yVTJH7j8=", + "result": { + "gas_used": 905760 + } + } + }, + "events": [ + { + "type": "tm", + "attributes": [ + { + "key": "event", + "value": "Tx", + "index": false + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "hash", + "value": "13F2EF4097320B234DECCEF063FDAE6A0845AF4380CEC15F2185CE9FACC6EBD5", + "index": false + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "height", + "value": "1087", + "index": false + } + ] + } + ] + } + }"#; + + // Test that the outer message parses correctly + let ws_message: TenderdashWsMessage = serde_json::from_str(full_message).unwrap(); + assert_eq!(ws_message.jsonrpc, "2.0"); + assert!(ws_message.result.is_some()); + + // Test that we can extract the inner tx event data + let result = ws_message.result.unwrap(); + let data = result.get("data").unwrap(); + let value = data.get("value").unwrap(); + + // This should deserialize without the "missing field `code`" error + let tx_event: TxEvent = serde_json::from_value(value.clone()).unwrap(); + assert_eq!(tx_event.height, 1087); + + // Verify the result defaults to code 0 when missing + let tx_result = tx_event.result.unwrap(); + assert_eq!(tx_result.code, 0); + } + + #[test] + fn test_hash_in_outer_events_websocket_message() { + // This reproduces the actual failing WebSocket message structure where the hash + // is in the outer events array, not in the inner tx_event.events + let full_message = r#"{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "subscription_id": "", + "query": "tm.event = 'Tx'", + "data": { + "type": "tendermint/event/Tx", + "value": { + "height": 1143, + "tx": "AwAEAAACAAAAABRoMDrccS7MNWBQ3j8+Irst5weWvAAAAQIAAQAAFDDoQkib1LvN+VIdf/tBEjPb8tmgAAACAgACAAAUjB/xAqiSZfRjX/0gvUCXATi06uQAAAMCAwEAABSqQPiOK2TfNerKRS3LkaD2x8G6GwAAxgEBcFMtXqPhk3AVd47C+6SSmXWl6BS8ehgBC6CSbbbU8hQBAAAAQCPGVEX1xA4ur9Iz2LdDyyfS8YE4x5Q6mYG/SS0xAGx6v3Gcn7oGsRFemDL+rYN5/cg3CqDLrXIl2SsotyB5BI79o8jb7Nf6MwHM0ZKU3ikwss37YUwNvJkZ57UZPf4txIqg7qN0oEjEynsX4tjv6BWrPlaEWTiyVjuYOCbuvHZBpPQ55cJ4+9ya/05J1C8KdIjaGuyB1r0yA6eLaXNBmu8DAAgAAXBTLV6j4ZNwFXeOwvukkpl1pegUvHoYAQugkm221PIUAQAAAGpHMEQCIC4nPoswVruvuSo5uIMs8vW7N1IowC8PxfjYlTnUy4fXAiAsgVn9e1kGYaunZI+LOeiJ1ghEMAS7u5WPP13tS7L9ZQEhA1xnCKgAxtiWPLxpfBMPmBetAiJKQn//lQLmSMatlduV/////wLA6iEBAAAAAAJqABiPRzkAAAAAGXapFDPxaffrRV2b5uJzofsIIsP3xBWiiKwAAAAAJAEBwOohAQAAAAAZdqkUtQHJZWYFWMlOKQjvCePbD4EAi8CIrAAAQR/5fcqaM3VWmUOBwWHSHQtbDNCKopIN/L6USHBk5jNp+gne/1nL/Cd0UjtaFGkuAkJbdLTgrDEIQU1rbtZQ3lBSMbRnV8B6UIWAY3z9q2tOSeTQ3FybD5iEd0Oo/dzJldM=", + "result": { + "gas_used": 130192500 + } + } + }, + "events": [ + { + "type": "tm", + "attributes": [ + { + "key": "event", + "value": "Tx", + "index": false + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "hash", + "value": "FCF3B0D09B8042B7A41F514107CBE1E09BD33C222005A8669A3EBE4B1D59BDDF", + "index": false + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "height", + "value": "1143", + "index": false + } + ] + } + ] + } + }"#; + + // Test that the outer message parses correctly + let ws_message: TenderdashWsMessage = serde_json::from_str(full_message).unwrap(); + let result = ws_message.result.unwrap(); + let data = result.get("data").unwrap(); + let value = data.get("value").unwrap(); + + // The inner tx event should deserialize but won't have events + let tx_event: TxEvent = serde_json::from_value(value.clone()).unwrap(); + assert_eq!(tx_event.height, 1143); + + // The inner tx_event.events is None, but we should be able to extract hash from outer events + assert!(tx_event.events.is_none()); + + // Test that the modified extract_all_tx_hashes function now works with outer events + let client = TenderdashWebSocketClient::new("ws://test".to_string(), 100); + let hashes = client + .extract_all_tx_hashes(&tx_event.events, &result) + .unwrap(); + + assert_eq!(hashes.len(), 1); + assert_eq!( + hashes[0], + "FCF3B0D09B8042B7A41F514107CBE1E09BD33C222005A8669A3EBE4B1D59BDDF" + ); + } + + #[test] + fn test_multiple_hashes_in_websocket_message() { + // Test case where multiple tx events each contain a hash (edge case) + let multiple_hash_message = r#"{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "query": "tm.event = 'Tx'", + "data": { + "type": "tendermint/event/Tx", + "value": { + "height": "200", + "tx": "dGVzdA==", + "result": {} + } + }, + "events": [ + { + "type": "tx", + "attributes": [ + { + "key": "hash", + "value": "HASH1", + "index": false + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "hash", + "value": "HASH2", + "index": false + } + ] + }, + { + "type": "tx", + "attributes": [ + { + "key": "height", + "value": "200", + "index": false + } + ] + } + ] + } + }"#; + + let ws_message: TenderdashWsMessage = serde_json::from_str(multiple_hash_message).unwrap(); + let result = ws_message.result.unwrap(); + let data = result.get("data").unwrap(); + let value = data.get("value").unwrap(); + + let tx_event: TxEvent = serde_json::from_value(value.clone()).unwrap(); + let client = TenderdashWebSocketClient::new("ws://test".to_string(), 100); + let hashes = client + .extract_all_tx_hashes(&tx_event.events, &result) + .unwrap(); + + // Should find both hashes + assert_eq!(hashes.len(), 2); + assert_eq!(hashes[0], "HASH1"); + assert_eq!(hashes[1], "HASH2"); + } + + #[test] + fn test_event_attribute_deserialization_with_integer_value() { + let json_data = json!({ + "key": "hash", + "value": 1005 + }); + + let event_attr: EventAttribute = serde_json::from_value(json_data).unwrap(); + assert_eq!(event_attr.value, "1005"); + } + + #[test] + fn test_event_attribute_deserialization_with_string_value() { + let json_data = json!({ + "key": "hash", + "value": "abc123" + }); + + let event_attr: EventAttribute = serde_json::from_value(json_data).unwrap(); + assert_eq!(event_attr.value, "abc123"); + } +} diff --git a/packages/rs-dapi/src/config/mod.rs b/packages/rs-dapi/src/config/mod.rs new file mode 100644 index 00000000000..d9ba073338f --- /dev/null +++ b/packages/rs-dapi/src/config/mod.rs @@ -0,0 +1,410 @@ +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + env, + net::{IpAddr, SocketAddr, ToSocketAddrs}, + path::PathBuf, +}; +use tracing::{debug, trace, warn}; + +use crate::{DAPIResult, DapiError}; + +mod utils; +use utils::{from_str_or_bool, from_str_or_number}; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[serde(default)] +pub struct Config { + /// Server configuration for ports and network binding + #[serde(flatten)] + pub server: ServerConfig, + /// DAPI-specific configuration for blockchain integration + #[serde(flatten)] + pub dapi: DapiConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct ServerConfig { + /// Port for the unified gRPC server (all services: Core, Platform, Streams) + #[serde( + rename = "dapi_grpc_server_port", + deserialize_with = "from_str_or_number" + )] + pub grpc_server_port: u16, + /// Port for JSON-RPC API server + #[serde(rename = "dapi_json_rpc_port", deserialize_with = "from_str_or_number")] + pub json_rpc_port: u16, + /// Port for metrics and health endpoints + #[serde(rename = "dapi_metrics_port", deserialize_with = "from_str_or_number")] + pub metrics_port: u16, + /// IP address to bind all servers to + #[serde(rename = "dapi_bind_address")] + pub bind_address: String, +} + +impl Default for ServerConfig { + fn default() -> Self { + Self { + grpc_server_port: 3005, + json_rpc_port: 3004, + metrics_port: 9090, + bind_address: "127.0.0.1".to_string(), + } + } +} + +impl ServerConfig { + /// Resolve the configured bind address into a socket address for the provided port. + pub fn address_with_port(&self, port: u16) -> DAPIResult { + socket_addr_from_bind(&self.bind_address, port) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct DapiConfig { + /// Drive (storage layer) client configuration + #[serde(flatten)] + pub drive: DriveConfig, + /// Tenderdash (consensus layer) client configuration + #[serde(flatten)] + pub tenderdash: TenderdashConfig, + /// Dash Core configuration for blockchain data + #[serde(flatten)] + pub core: CoreConfig, + /// Memory budget for cached Platform API responses (bytes) + #[serde( + rename = "dapi_platform_cache_bytes", + deserialize_with = "from_str_or_number" + )] + pub platform_cache_bytes: u64, + /// Timeout for waiting for state transition results (in milliseconds) + #[serde( + rename = "dapi_state_transition_wait_timeout", + deserialize_with = "from_str_or_number" + )] + pub state_transition_wait_timeout: u64, + /// Logging configuration + #[serde(flatten)] + pub logging: LoggingConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct DriveConfig { + /// URI for connecting to the Drive service + #[serde(rename = "dapi_drive_uri")] + pub uri: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct TenderdashConfig { + /// URI for connecting to the Tenderdash consensus service (HTTP RPC) + #[serde(rename = "dapi_tenderdash_uri")] + pub uri: String, + /// WebSocket URI for real-time events from Tenderdash + #[serde(rename = "dapi_tenderdash_websocket_uri")] + pub websocket_uri: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct CoreConfig { + /// ZMQ URI for receiving real-time blockchain events from Dash Core + #[serde(rename = "dapi_core_zmq_url")] + pub zmq_url: String, + /// JSON-RPC URL for Dash Core RPC (e.g., http://127.0.0.1:9998) + #[serde(rename = "dapi_core_rpc_url")] + pub rpc_url: String, + /// Dash Core RPC username + #[serde(rename = "dapi_core_rpc_user")] + pub rpc_user: String, + /// Dash Core RPC password + #[serde(rename = "dapi_core_rpc_pass")] + pub rpc_pass: String, + /// Memory budget for cached Core RPC responses (bytes) + #[serde( + rename = "dapi_core_cache_bytes", + deserialize_with = "from_str_or_number" + )] + pub cache_bytes: u64, +} + +impl Default for DapiConfig { + fn default() -> Self { + Self { + drive: DriveConfig::default(), + tenderdash: TenderdashConfig::default(), + core: CoreConfig::default(), + platform_cache_bytes: 2 * 1024 * 1024, + state_transition_wait_timeout: 30000, // 30 seconds default + logging: LoggingConfig::default(), + } + } +} + +impl Default for DriveConfig { + fn default() -> Self { + Self { + uri: "http://127.0.0.1:6000".to_string(), + } + } +} + +impl Default for TenderdashConfig { + fn default() -> Self { + Self { + uri: "http://127.0.0.1:26657".to_string(), + websocket_uri: "ws://127.0.0.1:26657/websocket".to_string(), + } + } +} + +impl Default for CoreConfig { + fn default() -> Self { + Self { + zmq_url: "tcp://127.0.0.1:29998".to_string(), + rpc_url: "http://127.0.0.1:9998".to_string(), + rpc_user: String::new(), + rpc_pass: String::new(), + cache_bytes: 64 * 1024 * 1024, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct LoggingConfig { + /// Main application log level or explicit RUST_LOG filter string + #[serde(rename = "dapi_logging_level")] + pub level: String, + /// Enable structured JSON logging for application logs + #[serde( + rename = "dapi_logging_json_format", + deserialize_with = "from_str_or_bool" + )] + pub json_format: bool, + /// Path to access log file. If set to non-empty value, access logging is enabled. + #[serde(rename = "dapi_logging_access_log_path")] + pub access_log_path: Option, + /// Access log format. Currently supports "combined" (Apache Common Log Format) + #[serde(rename = "dapi_logging_access_log_format")] + pub access_log_format: String, +} + +impl Default for LoggingConfig { + fn default() -> Self { + Self { + level: "info".to_string(), + json_format: false, + access_log_path: None, + access_log_format: "combined".to_string(), + } + } +} + +impl Config { + /// Load configuration from environment variables and .env file + pub fn load() -> DAPIResult { + let config = Self::from_env().map_err(|e| { + DapiError::Configuration(format!("Failed to load configuration: {}", e)) + })?; + config.validate()?; + Ok(config) + } + + /// Populate configuration from environment variables using `envy`. + fn from_env() -> Result { + envy::from_env() + } + + /// Load configuration from specific .env file and environment variables + pub fn load_from_dotenv(config_path: Option) -> DAPIResult { + Self::load_with_overrides(config_path, std::iter::empty::<(String, String)>()) + } + + /// Load configuration applying defaults, .env, environment variables, and CLI overrides (in that order). + pub fn load_with_overrides( + config_path: Option, + cli_overrides: I, + ) -> DAPIResult + where + I: IntoIterator, + K: Into, + V: Into, + { + trace!("Loading configuration from .env file, environment, and CLI overrides"); + + // Collect configuration values from layered sources + let mut layered: HashMap = HashMap::new(); + + if let Some(path) = config_path { + match dotenvy::from_path_iter(&path) { + Ok(iter) => { + for entry in iter { + let (key, value) = entry.map_err(|e| { + DapiError::Configuration(format!( + "Cannot parse config file {:?}: {}", + path, e + )) + })?; + layered.insert(key, value); + } + debug!("Loaded .env file from: {:?}", path); + } + Err(e) => { + return Err(DapiError::Configuration(format!( + "Cannot load config file {:?}: {}", + path, e + ))); + } + } + } else { + match dotenvy::dotenv_iter() { + Ok(iter) => { + for entry in iter { + let (key, value) = entry.map_err(|e| { + DapiError::Configuration(format!( + "Cannot parse config file entry: {}", + e + )) + })?; + layered.insert(key, value); + } + debug!("Loaded .env file from default location"); + } + Err(e) => { + if e.not_found() { + warn!("Cannot find any matching .env file"); + } else { + return Err(DapiError::Configuration(format!( + "Cannot load config file: {}", + e + ))); + } + } + } + } + + // Environment variables override .env contents + layered.extend(env::vars()); + + // CLI overrides have the highest priority + for (key, value) in cli_overrides.into_iter() { + layered.insert(key.into(), value.into()); + } + + match envy::from_iter::<_, Self>(layered) { + Ok(config) => { + debug!("Configuration loaded successfully from layered sources"); + config.validate()?; + Ok(config) + } + Err(e) => Err(DapiError::Configuration(format!( + "Failed to load configuration: {}", + e + ))), + } + } + + /// Build the socket address for the unified gRPC endpoint. + pub fn grpc_server_addr(&self) -> DAPIResult { + self.server.address_with_port(self.server.grpc_server_port) + } + + /// Build the socket address for the JSON-RPC endpoint. + pub fn json_rpc_addr(&self) -> DAPIResult { + self.server.address_with_port(self.server.json_rpc_port) + } + + /// Return the configured metrics listener port. + pub fn metrics_port(&self) -> u16 { + self.server.metrics_port + } + + /// Determine whether metrics should be exposed (port non-zero). + pub fn metrics_enabled(&self) -> bool { + self.server.metrics_port != 0 + } + + /// Build the metrics socket address if metrics are enabled. + pub fn metrics_addr(&self) -> DAPIResult> { + if !self.metrics_enabled() { + return Ok(None); + } + + self.server + .address_with_port(self.server.metrics_port) + .map(Some) + } + + /// Validate configuration to ensure dependent subsystems can start successfully. + pub fn validate(&self) -> DAPIResult<()> { + self.grpc_server_addr()?; + self.json_rpc_addr()?; + self.metrics_addr()?; + Ok(()) + } +} + +#[cfg(test)] +mod tests; + +/// Create a `SocketAddr` by combining a bind address string with a port number. +/// Supports IPv4, IPv6, and hostname/FQDN values for the bind address. +fn socket_addr_from_bind(bind_address: &str, port: u16) -> DAPIResult { + let trimmed = bind_address.trim(); + + if trimmed.is_empty() { + return Err(DapiError::Configuration( + "Bind address cannot be empty".to_string(), + )); + } + + // Reject addresses that already contain an explicit port to avoid ambiguity. + if trimmed.parse::().is_ok() { + return Err(DapiError::Configuration(format!( + "Bind address '{}' must not include a port", + trimmed + ))); + } + + // Direct IPv4/IPv6 literal. + if let Ok(ip_addr) = trimmed.parse::() { + return Ok(SocketAddr::new(ip_addr, port)); + } + + // Handle bracketed IPv6 literals like `[::1]`. + if trimmed.starts_with('[') && trimmed.ends_with(']') { + let inner = &trimmed[1..trimmed.len() - 1]; + if let Ok(ip_addr) = inner.parse::() { + return Ok(SocketAddr::new(ip_addr, port)); + } + } + + // Attempt DNS resolution for hostnames/FQDNs and IPv6 literals without brackets. + let address = if trimmed.contains(':') && !trimmed.starts_with('[') && !trimmed.contains(']') { + format!("[{}]:{}", trimmed, port) + } else { + format!("{}:{}", trimmed, port) + }; + + let mut candidates = address.to_socket_addrs().map_err(|e| { + DapiError::Configuration(format!( + "Invalid bind address '{}': failed to resolve ({})", + trimmed, e + )) + })?; + + candidates + .next() + .ok_or_else(|| { + DapiError::Configuration(format!( + "Invalid bind address '{}': no address records found", + trimmed + )) + }) + .map(|resolved| SocketAddr::new(resolved.ip(), port)) +} diff --git a/packages/rs-dapi/src/config/tests.rs b/packages/rs-dapi/src/config/tests.rs new file mode 100644 index 00000000000..62ebe034581 --- /dev/null +++ b/packages/rs-dapi/src/config/tests.rs @@ -0,0 +1,422 @@ +use super::{Config, ServerConfig}; +use serial_test::serial; +use std::fs; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::path::PathBuf; +use tempfile::NamedTempFile; + +/// Helper function to clean up all DAPI environment variables +fn cleanup_env_vars() { + let env_vars = [ + "DAPI_GRPC_SERVER_PORT", + "DAPI_JSON_RPC_PORT", + "DAPI_METRICS_PORT", + "DAPI_BIND_ADDRESS", + "DAPI_DRIVE_URI", + "DAPI_TENDERDASH_URI", + "DAPI_TENDERDASH_WEBSOCKET_URI", + "DAPI_CORE_ZMQ_URL", + "DAPI_STATE_TRANSITION_WAIT_TIMEOUT", + ]; + + for var in &env_vars { + remove_env_var(var); + } +} + +fn set_env_var(key: &str, value: &str) { + // SAFETY: manipulating process environment is inherently unsafe when multiple + // threads are running. Tests using these helpers are serialized to avoid races. + unsafe { + std::env::set_var(key, value); + } +} + +fn remove_env_var(key: &str) { + // SAFETY: see set_env_var comment; tests are serialized. + unsafe { + std::env::remove_var(key); + } +} + +#[test] +fn test_default_config_uses_uris() { + let config = Config::default(); + + // Test that default config uses proper URIs + assert_eq!(config.dapi.drive.uri, "http://127.0.0.1:6000"); + assert_eq!(config.dapi.tenderdash.uri, "http://127.0.0.1:26657"); +} + +#[test] +#[serial] +fn test_config_load_with_uri_env_vars() { + // Set environment variables + set_env_var("DAPI_DRIVE_URI", "http://custom-drive:8000"); + set_env_var("DAPI_TENDERDASH_URI", "http://custom-tenderdash:9000"); + + let config = Config::load().expect("Config should load successfully"); + + // Test that environment variables override defaults + assert_eq!(config.dapi.drive.uri, "http://custom-drive:8000"); + assert_eq!(config.dapi.tenderdash.uri, "http://custom-tenderdash:9000"); + + // Clean up + remove_env_var("DAPI_DRIVE_URI"); + remove_env_var("DAPI_TENDERDASH_URI"); +} + +#[tokio::test] +async fn test_clients_can_be_created_with_uris() { + use crate::clients::{DriveClient, TenderdashClient}; + + let config = Config::default(); + + // Test that clients can be created with URIs from config + // These create lazy connections that may operate in degraded mode until upstreams respond + DriveClient::new(&config.dapi.drive.uri) + .await + .expect("DriveClient should be constructed even if no server is running"); + TenderdashClient::new( + &config.dapi.tenderdash.uri, + &config.dapi.tenderdash.websocket_uri, + ) + .await + .expect("TenderdashClient should be constructed even if no server is running"); +} + +#[test] +#[serial] +fn test_config_load_from_dotenv_file() { + // Clean up any existing environment variables first + cleanup_env_vars(); + + // Create a temporary .env file + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let env_content = r#" +# Test configuration +DAPI_GRPC_SERVER_PORT=4005 +DAPI_JSON_RPC_PORT=4004 +DAPI_METRICS_PORT=9091 +DAPI_BIND_ADDRESS=0.0.0.0 +DAPI_DRIVE_URI=http://test-drive:7000 +DAPI_TENDERDASH_URI=http://test-tenderdash:8000 +DAPI_TENDERDASH_WEBSOCKET_URI=ws://test-tenderdash:8000/websocket +DAPI_CORE_ZMQ_URL=tcp://test-core:30000 +DAPI_STATE_TRANSITION_WAIT_TIMEOUT=45000 +"#; + + fs::write(temp_file.path(), env_content).expect("Failed to write temp file"); + + // Load config from the temp file + let config = Config::load_from_dotenv(Some(temp_file.path().to_path_buf())) + .expect("Config should load from dotenv file"); + + // Verify all values were loaded correctly + assert_eq!(config.server.grpc_server_port, 4005); + assert_eq!(config.server.json_rpc_port, 4004); + assert_eq!(config.server.metrics_port, 9091); + assert_eq!(config.server.bind_address, "0.0.0.0"); + assert_eq!(config.dapi.drive.uri, "http://test-drive:7000"); + assert_eq!(config.dapi.tenderdash.uri, "http://test-tenderdash:8000"); + assert_eq!( + config.dapi.tenderdash.websocket_uri, + "ws://test-tenderdash:8000/websocket" + ); + assert_eq!(config.dapi.core.zmq_url, "tcp://test-core:30000"); + assert_eq!(config.dapi.state_transition_wait_timeout, 45000); + + // Cleanup + cleanup_env_vars(); +} + +#[test] +#[serial] +fn test_config_load_from_dotenv_file_partial() { + // Clean up any existing environment variables first + cleanup_env_vars(); + + // Create a temporary .env file with only some values + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let env_content = r#" +# Partial test configuration +DAPI_GRPC_SERVER_PORT=5005 +DAPI_DRIVE_URI=http://partial-drive:8000 +"#; + + fs::write(temp_file.path(), env_content).expect("Failed to write temp file"); + + // Load config from the temp file + let config = Config::load_from_dotenv(Some(temp_file.path().to_path_buf())) + .expect("Config should load from dotenv file"); + + // Verify specified values were loaded + assert_eq!(config.server.grpc_server_port, 5005); + assert_eq!(config.dapi.drive.uri, "http://partial-drive:8000"); + + // Verify defaults are used for unspecified values + assert_eq!(config.dapi.tenderdash.uri, "http://127.0.0.1:26657"); // default + assert_eq!(config.dapi.state_transition_wait_timeout, 30000); // default + + // Cleanup + cleanup_env_vars(); +} + +#[test] +fn test_config_load_from_nonexistent_dotenv_file() { + let nonexistent_path = PathBuf::from("/nonexistent/path/to/.env"); + + // Should return an error for nonexistent file + let result = Config::load_from_dotenv(Some(nonexistent_path)); + assert!(result.is_err()); + + // Error message should mention the file path + let error_msg = result.unwrap_err().to_string(); + assert!(error_msg.contains("Cannot load config file")); +} + +#[test] +fn test_server_config_address_with_port_ipv4_literal() { + let mut server = ServerConfig::default(); + server.bind_address = "0.0.0.0".to_string(); + + let addr = server + .address_with_port(1234) + .expect("IPv4 bind address should resolve"); + + assert_eq!(addr.ip(), IpAddr::V4(Ipv4Addr::UNSPECIFIED)); + assert_eq!(addr.port(), 1234); +} + +#[test] +fn test_server_config_address_with_port_ipv6_literal() { + let mut server = ServerConfig::default(); + server.bind_address = "::1".to_string(); + + let addr = server + .address_with_port(4321) + .expect("IPv6 bind address should resolve"); + + assert_eq!(addr.ip(), IpAddr::V6(Ipv6Addr::LOCALHOST)); + assert_eq!(addr.port(), 4321); +} + +#[test] +fn test_server_config_address_with_port_hostname() { + let mut server = ServerConfig::default(); + server.bind_address = "localhost".to_string(); + + let addr = server + .address_with_port(8080) + .expect("Hostname bind address should resolve"); + + assert!(addr.ip().is_loopback()); + assert_eq!(addr.port(), 8080); +} + +#[test] +fn test_server_config_rejects_port_in_bind_address() { + let mut server = ServerConfig::default(); + server.bind_address = "127.0.0.1:9000".to_string(); + + let err = server + .address_with_port(5000) + .expect_err("Port in bind address should be rejected"); + + assert!( + err.to_string() + .contains("Bind address '127.0.0.1:9000' must not include a port") + ); +} + +#[test] +fn test_server_config_invalid_bind_address() { + let mut server = ServerConfig::default(); + server.bind_address = "invalid host".to_string(); + + let err = server + .address_with_port(6000) + .expect_err("Invalid bind address should fail"); + + assert!( + err.to_string() + .contains("Invalid bind address 'invalid host'") + ); +} + +#[test] +#[serial] +fn test_config_load_from_dotenv_with_env_override() { + // Clean up any existing environment variables first + cleanup_env_vars(); + + // Create a temporary .env file + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let env_content = r#" +DAPI_GRPC_SERVER_PORT=6005 +DAPI_DRIVE_URI=http://dotenv-drive:9000 +"#; + + fs::write(temp_file.path(), env_content).expect("Failed to write temp file"); + + // Set environment variables that should override .env file + set_env_var("DAPI_GRPC_SERVER_PORT", "7005"); + set_env_var("DAPI_TENDERDASH_URI", "http://env-tenderdash:10000"); + + // Load config from the temp file + let config = Config::load_from_dotenv(Some(temp_file.path().to_path_buf())) + .expect("Config should load from dotenv file"); + + // Environment variables should override .env file values + assert_eq!(config.server.grpc_server_port, 7005); // from env, not .env file + assert_eq!(config.dapi.tenderdash.uri, "http://env-tenderdash:10000"); // from env + + // Values only in .env file should still be loaded + assert_eq!(config.dapi.drive.uri, "http://dotenv-drive:9000"); // from .env file + + // Clean up environment variables + cleanup_env_vars(); +} + +#[test] +#[serial] +fn test_config_load_with_cli_overrides() { + // Ensure we start from a clean environment + cleanup_env_vars(); + + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let env_content = r#" +DAPI_GRPC_SERVER_PORT=6005 +DAPI_DRIVE_URI=http://dotenv-drive:9000 +"#; + + fs::write(temp_file.path(), env_content).expect("Failed to write temp file"); + + set_env_var("DAPI_GRPC_SERVER_PORT", "7005"); + + let overrides = [ + ("DAPI_GRPC_SERVER_PORT", "8005"), + ("DAPI_TENDERDASH_URI", "http://cli-tenderdash:11000"), + ]; + + let config = Config::load_with_overrides(Some(temp_file.path().to_path_buf()), overrides) + .expect("Config should load with CLI overrides"); + + assert_eq!(config.server.grpc_server_port, 8005); // CLI override wins + assert_eq!(config.dapi.tenderdash.uri, "http://cli-tenderdash:11000"); // CLI override + assert_eq!(config.dapi.drive.uri, "http://dotenv-drive:9000"); // .env retains value for unset keys + + cleanup_env_vars(); +} + +#[test] +#[serial] +fn test_config_load_from_dotenv_invalid_values() { + // Clean up any existing environment variables first + cleanup_env_vars(); + + // Create a temporary .env file with invalid port value + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let env_content = r#" +DAPI_GRPC_SERVER_PORT=not_a_number +DAPI_DRIVE_URI=http://test-drive:8000 +"#; + + fs::write(temp_file.path(), env_content).expect("Failed to write temp file"); + + // Loading should fail due to invalid port value + let result = Config::load_from_dotenv(Some(temp_file.path().to_path_buf())); + + // Should either return error or fallback gracefully (depending on implementation) + // The current implementation should fallback to manual loading which would fail + let error = result.expect_err("valid config").to_string(); + assert!(error.contains("invalid digit found in string")); + + // Cleanup + cleanup_env_vars(); +} + +#[test] +fn test_config_socket_addresses() { + let config = Config::default(); + + // Test that socket addresses are properly formatted + assert_eq!( + config + .grpc_server_addr() + .expect("gRPC address should parse") + .to_string(), + "127.0.0.1:3005" + ); + assert_eq!( + config + .json_rpc_addr() + .expect("JSON-RPC address should parse") + .to_string(), + "127.0.0.1:3004" + ); + assert_eq!( + config + .metrics_addr() + .expect("metrics address should parse") + .expect("metrics address should be present") + .to_string(), + "127.0.0.1:9090" + ); +} + +#[test] +fn test_config_socket_addresses_custom_bind() { + let mut config = Config::default(); + config.server.bind_address = "0.0.0.0".to_string(); + config.server.grpc_server_port = 4000; + + // Test that custom bind address and port work + assert_eq!( + config + .grpc_server_addr() + .expect("custom gRPC address should parse") + .to_string(), + "0.0.0.0:4000" + ); +} + +#[test] +fn test_metrics_disabled_when_port_zero() { + let mut config = Config::default(); + config.server.metrics_port = 0; + + assert!(!config.metrics_enabled()); + assert!( + config + .metrics_addr() + .expect("metrics address check should succeed") + .is_none() + ); +} + +#[test] +fn test_validate_default_config_succeeds() { + let config = Config::default(); + config + .validate() + .expect("Default configuration should be valid"); +} + +#[test] +fn test_validate_fails_on_invalid_bind_address() { + let mut config = Config::default(); + config.server.bind_address = "invalid-address".to_string(); + + let error = config + .validate() + .expect_err("Invalid bind address should fail validation"); + + assert!( + error + .to_string() + .contains("Invalid bind address 'invalid-address'"), + "unexpected error message: {}", + error + ); +} diff --git a/packages/rs-dapi/src/config/utils.rs b/packages/rs-dapi/src/config/utils.rs new file mode 100644 index 00000000000..ad00da24558 --- /dev/null +++ b/packages/rs-dapi/src/config/utils.rs @@ -0,0 +1,64 @@ +use crate::utils::deserialize_string_or_number; +use serde::Deserializer; +use serde::de::{Error as DeError, Visitor}; +use std::fmt; +use std::str::FromStr; + +/// Custom deserializer that handles both string and numeric representations +/// This is useful for environment variables which are always strings but need to be parsed as numbers +pub fn from_str_or_number<'de, D, T>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: FromStr, + ::Err: std::fmt::Display, +{ + deserialize_string_or_number(deserializer) +} + +/// Custom deserializer for boolean values that handles both string and boolean representations +/// Accepts: "true", "false", "1", "0", "yes", "no" (case insensitive) +pub fn from_str_or_bool<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + struct BoolOrStringVisitor; + + impl<'de> Visitor<'de> for BoolOrStringVisitor { + type Value = bool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or a string representing a boolean") + } + + fn visit_bool(self, value: bool) -> Result { + Ok(value) + } + + fn visit_str(self, value: &str) -> Result + where + E: DeError, + { + parse_bool(value).map_err(E::custom) + } + + fn visit_string(self, value: String) -> Result + where + E: DeError, + { + self.visit_str(&value) + } + } + + fn parse_bool(input: &str) -> Result { + let normalized = input.to_lowercase(); + match normalized.as_str() { + "true" | "1" | "yes" | "on" => Ok(true), + "false" | "0" | "no" | "off" => Ok(false), + _ => input + .parse::() + .map_err(|err| format!("failed to parse bool '{}': {}", input, err)), + } + } + + deserializer.deserialize_any(BoolOrStringVisitor) +} diff --git a/packages/rs-dapi/src/error.rs b/packages/rs-dapi/src/error.rs new file mode 100644 index 00000000000..cc058521194 --- /dev/null +++ b/packages/rs-dapi/src/error.rs @@ -0,0 +1,334 @@ +// Custom error types for rs-dapi using thiserror + +use dashcore_rpc::{self, jsonrpc}; +use serde_json::Value; +use sha2::Digest; +use thiserror::Error; +use tokio::task::JoinError; + +use crate::services::platform_service::TenderdashStatus; + +/// Result type alias for DAPI operations +pub type DapiResult = std::result::Result; + +/// Main error type for DAPI operations +#[derive(Error, Debug)] +pub enum DapiError { + #[error("ZMQ connection error: {0}")] + ZmqConnection(#[from] zeromq::ZmqError), + + #[error("Configuration error: {0}")] + Configuration(String), + + #[error("Streaming service error: {0}")] + StreamingService(String), + + #[error("Client error: {0}")] + Client(String), + + #[error("Cannot connect to server {0}: {1}")] + /// Server unavailable error (URI, detailed message) + ServerUnavailable(String, String), + + #[error("Server error: {0}")] + Server(String), + + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + #[error("Transport error: {0}")] + Transport(#[from] tonic::transport::Error), + + #[error("Status error: {0}")] + Status(#[from] tonic::Status), + + #[error("HTTP error: {0}")] + Http(#[from] axum::http::Error), + + #[error("WebSocket error: {0}")] + WebSocket(#[from] Box), + + #[error("Task join error: {0}")] + TaskJoin(#[from] tokio::task::JoinError), + + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("Request error: {0}")] + Request(#[from] reqwest::Error), + + #[error("URL parse error: {0}")] + UrlParse(#[from] url::ParseError), + + #[error("Base64 decode error: {0}")] + Base64Decode(#[from] base64::DecodeError), + + #[error("Transaction hash not found in event attributes")] + TransactionHashNotFound, + + #[error("Invalid data: {0}")] + InvalidData(String), + + // Standardized categories for RPC-like errors + #[error("Not found: {0}")] + NotFound(String), + + #[error("Already exists: {0}")] + AlreadyExists(String), + + #[error("Invalid request: {0}")] + InvalidRequest(String), + + #[error("Invalid argument: {0}")] + InvalidArgument(String), + + #[error("Resource exhausted: {0}")] + ResourceExhausted(String), + + #[error("Aborted: {0}")] + Aborted(String), + + #[error("Unavailable: {0}")] + Unavailable(String), + + #[error("Failed precondition: {0}")] + FailedPrecondition(String), + + #[error("Service unavailable: {0}")] + ServiceUnavailable(String), + + #[error("Timeout error: {0}")] + Timeout(String), + + #[error("Internal error: {0}")] + Internal(String), + + #[error("Connection closed")] + ConnectionClosed, + + #[error("Client is gone: {0}")] + ClientGone(String), + + #[error("No valid proof found for tx: {0}")] + NoValidTxProof(String), + + #[error("{0}")] + MethodNotFound(String), + + #[error("Tenderdash request error: {0:?}")] + TenderdashClientError(TenderdashStatus), +} + +/// Result type alias for DAPI operations +pub type DAPIResult = std::result::Result; + +// Add From implementation for boxed errors +impl From> for DapiError { + /// Collapse boxed dynamic errors into an internal error variant. + fn from(err: Box) -> Self { + Self::Internal(err.to_string()) + } +} + +impl From for DapiError { + /// Wrap tungstenite errors in the WebSocket variant. + fn from(err: tokio_tungstenite::tungstenite::Error) -> Self { + Self::WebSocket(Box::new(err)) + } +} + +impl From for tonic::Status { + /// Convert `DapiError` directly into `tonic::Status` using `to_status`. + fn from(err: DapiError) -> Self { + err.to_status() + } +} + +impl DapiError { + /// Create a tonic::Status from DapiError. + /// + /// Defaults to internal status if status cannot be converted. + pub fn to_status(&self) -> tonic::Status { + match self { + DapiError::Status(status) => status.clone(), + DapiError::NotFound(msg) => tonic::Status::not_found(msg.clone()), + DapiError::AlreadyExists(msg) => tonic::Status::already_exists(msg.clone()), + DapiError::InvalidArgument(msg) => tonic::Status::invalid_argument(msg.clone()), + DapiError::ResourceExhausted(msg) => tonic::Status::resource_exhausted(msg.clone()), + DapiError::Aborted(msg) => tonic::Status::aborted(msg.clone()), + DapiError::Unavailable(msg) | DapiError::ServiceUnavailable(msg) => { + tonic::Status::unavailable(msg.clone()) + } + DapiError::FailedPrecondition(msg) => tonic::Status::failed_precondition(msg.clone()), + DapiError::MethodNotFound(msg) => tonic::Status::unimplemented(msg.clone()), + DapiError::TenderdashClientError(error) => error.to_status(), + _ => tonic::Status::internal(self.to_string()), + } + } + + /// Construct a `DapiError` from a raw Tenderdash JSON status response. + pub fn from_tenderdash_error(value: Value) -> Self { + DapiError::TenderdashClientError(TenderdashStatus::from(value)) + } + + /// Create a no proof error for a transaction. + /// + /// Note that this assumes that if tx is 32 bytes, it is already a hash. + /// If the input has a different size, it will be hashed. + /// It can lead to false positives if a non-hash 32-byte array is passed. + pub fn no_valid_tx_proof(tx: &[u8]) -> Self { + let tx_hash = if tx.len() == sha2::Sha256::output_size() { + // possible false positive if tx is not a hash but still a 32-byte array + hex::encode(tx) + } else { + let digest = sha2::Sha256::digest(tx); + hex::encode(digest) + }; + Self::NoValidTxProof(tx_hash) + } + + /// Create a configuration error + pub fn configuration>(msg: S) -> Self { + Self::Configuration(msg.into()) + } + + /// Create a streaming service error + pub fn streaming_service>(msg: S) -> Self { + Self::StreamingService(msg.into()) + } + + /// Create a client error + pub fn client>(msg: S) -> Self { + Self::Client(msg.into()) + } + + /// Convert this error into a tonic::Status while preserving legacy codes/messages when available. + pub fn into_legacy_status(self) -> tonic::Status { + match self { + DapiError::NotFound(msg) => tonic::Status::new(tonic::Code::NotFound, msg), + DapiError::AlreadyExists(msg) => tonic::Status::new(tonic::Code::AlreadyExists, msg), + DapiError::InvalidArgument(msg) => { + tonic::Status::new(tonic::Code::InvalidArgument, msg) + } + DapiError::ResourceExhausted(msg) => { + tonic::Status::new(tonic::Code::ResourceExhausted, msg) + } + DapiError::FailedPrecondition(msg) => { + tonic::Status::new(tonic::Code::FailedPrecondition, msg) + } + DapiError::Client(msg) => tonic::Status::new(tonic::Code::InvalidArgument, msg), + DapiError::ServiceUnavailable(msg) | DapiError::Unavailable(msg) => { + tonic::Status::new(tonic::Code::Unavailable, msg) + } + DapiError::MethodNotFound(msg) => tonic::Status::new(tonic::Code::Unimplemented, msg), + DapiError::Timeout(msg) => tonic::Status::new(tonic::Code::DeadlineExceeded, msg), + DapiError::Aborted(msg) => tonic::Status::new(tonic::Code::Aborted, msg), + other => other.to_status(), + } + } + + /// Create a connection validation error + pub fn server_unavailable(uri: U, msg: S) -> Self { + Self::ServerUnavailable(uri.to_string(), msg.to_string()) + } + + /// Create a server error + pub fn server>(msg: S) -> Self { + Self::Server(msg.into()) + } + + /// Create an invalid data error + pub fn invalid_data>(msg: S) -> Self { + Self::InvalidData(msg.into()) + } + + /// Create a service unavailable error + pub fn service_unavailable>(msg: S) -> Self { + Self::ServiceUnavailable(msg.into()) + } + + /// Create a timeout error + pub fn timeout>(msg: S) -> Self { + Self::Timeout(msg.into()) + } + + /// Create an internal error + pub fn internal>(msg: S) -> Self { + Self::Internal(msg.into()) + } + + /// Handle task join errors + pub fn map_join_result>( + msg: Result, JoinError>, + ) -> Result { + match msg { + Ok(Ok(inner)) => Ok(inner), + Ok(Err(e)) => Err(e.into()), + Err(join_err) => Err(DapiError::TaskJoin(join_err)), + } + } +} + +pub trait MapToDapiResult { + /// Convert nested or join results into `DAPIResult` for convenience APIs. + fn to_dapi_result(self) -> DAPIResult; +} + +impl> MapToDapiResult for Result, JoinError> { + /// Flatten `Result>` from spawned tasks into `DAPIResult`. + fn to_dapi_result(self) -> DAPIResult { + match self { + Ok(Ok(inner)) => Ok(inner), + Ok(Err(e)) => Err(e.into()), + Err(e) => Err(e.into()), + } + } +} + +impl> MapToDapiResult for Result { + /// Flatten `Result>` from spawned tasks into `DAPIResult`. + fn to_dapi_result(self) -> DAPIResult { + match self { + Ok(inner) => Ok(inner), + Err(e) => Err(e.into()), + } + } +} + +// Provide a conversion from dashcore-rpc Error to our DapiError so callers can +// use generic helpers like MapToDapiResult without custom closures. +impl From for DapiError { + /// Map dashcore RPC errors into rich `DapiError` variants for uniform handling. + fn from(e: dashcore_rpc::Error) -> Self { + match e { + dashcore_rpc::Error::JsonRpc(jerr) => match jerr { + jsonrpc::Error::Rpc(rpc) => { + let code = rpc.code; + let msg = rpc.message; + match code { + -5 => DapiError::NotFound(msg), // Invalid address or key / Not found + -8 => DapiError::NotFound(msg), // Block height out of range + -1 => DapiError::InvalidArgument(msg), // Invalid parameter + -27 => DapiError::AlreadyExists(msg), // Already in chain + -26 => DapiError::FailedPrecondition(msg), // RPC_VERIFY_REJECTED + -25 | -22 => DapiError::InvalidArgument(msg), // Deserialization/Verify error + _ => DapiError::Unavailable(format!("Core RPC error {}: {}", code, msg)), + } + } + jsonrpc::Error::Transport(_) => DapiError::Unavailable(jerr.to_string()), + jsonrpc::Error::Json(_) => DapiError::InvalidData(jerr.to_string()), + _ => DapiError::Unavailable(jerr.to_string()), + }, + dashcore_rpc::Error::BitcoinSerialization(e) => DapiError::InvalidData(e.to_string()), + dashcore_rpc::Error::Hex(e) => DapiError::InvalidData(e.to_string()), + dashcore_rpc::Error::Json(e) => DapiError::InvalidData(e.to_string()), + dashcore_rpc::Error::Io(e) => DapiError::Io(e), + dashcore_rpc::Error::InvalidAmount(e) => DapiError::InvalidData(e.to_string()), + dashcore_rpc::Error::Secp256k1(e) => DapiError::InvalidData(e.to_string()), + dashcore_rpc::Error::InvalidCookieFile => { + DapiError::Unavailable("invalid cookie file".to_string()) + } + dashcore_rpc::Error::UnexpectedStructure(s) => DapiError::InvalidData(s), + } + } +} diff --git a/packages/rs-dapi/src/lib.rs b/packages/rs-dapi/src/lib.rs new file mode 100644 index 00000000000..460ce4e6e59 --- /dev/null +++ b/packages/rs-dapi/src/lib.rs @@ -0,0 +1,16 @@ +// lib.rs - rs-dapi library + +pub mod cache; +pub mod clients; +pub mod config; +pub mod error; +pub mod logging; +pub mod metrics; +pub mod protocol; +pub mod server; +pub mod services; +pub mod sync; +pub mod utils; + +// Re-export main error types for convenience +pub use error::{DAPIResult, DapiError}; diff --git a/packages/rs-dapi/src/logging/access_log.rs b/packages/rs-dapi/src/logging/access_log.rs new file mode 100644 index 00000000000..b4c807fece0 --- /dev/null +++ b/packages/rs-dapi/src/logging/access_log.rs @@ -0,0 +1,378 @@ +//! Access log entry structures and formatting +//! +//! Supports Apache Combined Log Format for compatibility with standard log analyzers. + +use chrono::{DateTime, Utc}; +use serde_json::{Map, Value}; +use std::net::IpAddr; + +/// An access log entry containing request/response information +#[derive(Debug, Clone)] +pub struct AccessLogEntry { + /// Client IP address + pub remote_addr: Option, + /// Remote user (usually "-" for API servers) + pub remote_user: Option, + /// Request timestamp + pub timestamp: DateTime, + /// HTTP method + pub method: String, + /// Request path/URI + pub uri: String, + /// HTTP version (e.g., "HTTP/1.1") + pub http_version: String, + /// HTTP status code + pub status: u16, + /// Response body size in bytes + pub body_bytes: u64, + /// Referer header value + pub referer: Option, + /// User-Agent header value + pub user_agent: Option, + /// Request processing time in microseconds + pub duration_us: u64, + /// Protocol type (HTTP, gRPC, WebSocket) + pub protocol: String, + /// gRPC service and method (for gRPC requests) + pub grpc_service: Option, + pub grpc_method: Option, + /// gRPC status code (for gRPC requests) + pub grpc_status: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AccessLogFormat { + Combined, + Json, +} + +impl AccessLogEntry { + /// Create a new access log entry for HTTP requests + pub fn new_http( + remote_addr: Option, + method: String, + uri: String, + http_version: String, + status: u16, + body_bytes: u64, + duration_us: u64, + ) -> Self { + Self { + remote_addr, + remote_user: None, + timestamp: Utc::now(), + method, + uri, + http_version, + status, + body_bytes, + referer: None, + user_agent: None, + duration_us, + protocol: "HTTP".to_string(), + grpc_service: None, + grpc_method: None, + grpc_status: None, + } + } + + /// Create a new access log entry for gRPC requests + pub fn new_grpc( + remote_addr: Option, + uri: String, + service: String, + method: String, + grpc_status: u32, + body_bytes: u64, + duration_us: u64, + ) -> Self { + Self { + remote_addr, + remote_user: None, + timestamp: Utc::now(), + method: "POST".to_string(), // gRPC always uses POST + uri, + http_version: "HTTP/2.0".to_string(), // gRPC uses HTTP/2 + status: grpc_status_to_http_status(grpc_status), + body_bytes, + referer: None, + user_agent: None, + duration_us, + protocol: "gRPC".to_string(), + grpc_service: Some(service), + grpc_method: Some(method), + grpc_status: Some(grpc_status), + } + } + + /// Set user agent from request headers + pub fn with_user_agent(mut self, user_agent: String) -> Self { + self.user_agent = Some(user_agent); + self + } + + /// Set referer from request headers + pub fn with_referer(mut self, referer: String) -> Self { + self.referer = Some(referer); + self + } + + /// Format as Apache Combined Log Format + /// Format: remote_addr - remote_user [timestamp] "method uri version" status size "referer" "user_agent" duration_us protocol + pub fn to_combined_format(&self) -> String { + let remote_addr = self + .remote_addr + .map(|addr| addr.to_string()) + .unwrap_or_else(|| "-".to_string()); + + let remote_user = self.remote_user.as_deref().unwrap_or("-"); + + let timestamp = self.timestamp.format("%d/%b/%Y:%H:%M:%S %z"); + + let referer = self.referer.as_deref().unwrap_or("-"); + + let user_agent = self.user_agent.as_deref().unwrap_or("-"); + + // Extended format with additional fields + format!( + r#"{} - {} [{}] "{} {} {}" {} {} "{}" "{}" {}us {}"#, + remote_addr, + remote_user, + timestamp, + self.method, + self.uri, + self.http_version, + self.status, + self.body_bytes, + referer, + user_agent, + self.duration_us, + self.protocol + ) + } + + /// Format as JSON string suitable for structured logging pipelines + pub fn to_json_string(&self) -> String { + let value = self.to_json_value(); + serde_json::to_string(&value).unwrap_or_else(|_| "{}".to_string()) + } + + /// Convert the log entry into a serde `Value` preserving optional fields. + fn to_json_value(&self) -> Value { + let mut map = Map::new(); + + map.insert( + "remote_addr".to_string(), + self.remote_addr + .map(|addr| Value::String(addr.to_string())) + .unwrap_or(Value::Null), + ); + map.insert( + "remote_user".to_string(), + self.remote_user + .as_ref() + .map(|user| Value::String(user.clone())) + .unwrap_or(Value::Null), + ); + map.insert( + "timestamp".to_string(), + Value::String(self.timestamp.to_rfc3339()), + ); + map.insert("method".to_string(), Value::String(self.method.clone())); + map.insert("uri".to_string(), Value::String(self.uri.clone())); + map.insert( + "http_version".to_string(), + Value::String(self.http_version.clone()), + ); + map.insert("status".to_string(), Value::Number(self.status.into())); + map.insert( + "body_bytes".to_string(), + Value::Number(self.body_bytes.into()), + ); + map.insert( + "referer".to_string(), + self.referer + .as_ref() + .map(|referer| Value::String(referer.clone())) + .unwrap_or(Value::Null), + ); + map.insert( + "user_agent".to_string(), + self.user_agent + .as_ref() + .map(|ua| Value::String(ua.clone())) + .unwrap_or(Value::Null), + ); + map.insert( + "duration_us".to_string(), + Value::Number(self.duration_us.into()), + ); + map.insert("protocol".to_string(), Value::String(self.protocol.clone())); + + if let Some(service) = &self.grpc_service { + map.insert("grpc_service".to_string(), Value::String(service.clone())); + } + + if let Some(method) = &self.grpc_method { + map.insert("grpc_method".to_string(), Value::String(method.clone())); + } + + if let Some(status) = self.grpc_status { + map.insert("grpc_status".to_string(), Value::Number(status.into())); + } + + Value::Object(map) + } +} + +/// Convert gRPC status code to HTTP status code for logging +fn grpc_status_to_http_status(grpc_status: u32) -> u16 { + match grpc_status { + 0 => 200, // OK + 1 => 499, // CANCELLED -> Client Closed Request + 2 => 500, // UNKNOWN -> Internal Server Error + 3 => 400, // INVALID_ARGUMENT -> Bad Request + 4 => 504, // DEADLINE_EXCEEDED -> Gateway Timeout + 5 => 404, // NOT_FOUND -> Not Found + 6 => 409, // ALREADY_EXISTS -> Conflict + 7 => 403, // PERMISSION_DENIED -> Forbidden + 8 => 429, // RESOURCE_EXHAUSTED -> Too Many Requests + 9 => 412, // FAILED_PRECONDITION -> Precondition Failed + 10 => 409, // ABORTED -> Conflict + 11 => 400, // OUT_OF_RANGE -> Bad Request + 12 => 501, // UNIMPLEMENTED -> Not Implemented + 13 => 500, // INTERNAL -> Internal Server Error + 14 => 503, // UNAVAILABLE -> Service Unavailable + 15 => 500, // DATA_LOSS -> Internal Server Error + 16 => 401, // UNAUTHENTICATED -> Unauthorized + _ => 500, // Unknown -> Internal Server Error + } +} + +/// Logger for access log entries +#[derive(Debug, Clone)] +pub struct AccessLogger { + writer: std::sync::Arc>>, + format: AccessLogFormat, +} + +impl AccessLogger { + /// Create a new access logger with specified file path + pub async fn new(file_path: String, format: AccessLogFormat) -> Result { + let file = tokio::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&file_path) + .await?; + + Ok(Self { + writer: std::sync::Arc::new(tokio::sync::Mutex::new(Some(file))), + format, + }) + } + + /// Log an access log entry + pub async fn log(&self, entry: &AccessLogEntry) { + let log_line = match self.format { + AccessLogFormat::Combined => entry.to_combined_format(), + AccessLogFormat::Json => entry.to_json_string(), + } + "\n"; + + let mut writer_guard = self.writer.lock().await; + if let Some(ref mut file) = *writer_guard { + use tokio::io::AsyncWriteExt; + if let Err(e) = file.write_all(log_line.as_bytes()).await { + tracing::warn!("Failed to write access log: {}", e); + } + if let Err(e) = file.flush().await { + tracing::warn!("Failed to flush access log: {}", e); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::Value; + use std::net::{IpAddr, Ipv4Addr}; + + #[test] + fn test_http_access_log_format() { + let entry = AccessLogEntry::new_http( + Some(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100))), + "GET".to_string(), + "/v1/platform/status".to_string(), + "HTTP/1.1".to_string(), + 200, + 1024, + 5000, + ) + .with_user_agent("Mozilla/5.0".to_string()); + + let log_line = entry.to_combined_format(); + + assert!(log_line.contains("192.168.1.100")); + assert!(log_line.contains("GET /v1/platform/status HTTP/1.1")); + assert!(log_line.contains("200 1024")); + assert!(log_line.contains("Mozilla/5.0")); + assert!(log_line.contains("5000us")); + assert!(log_line.contains("HTTP")); + } + + #[test] + fn test_grpc_access_log_format() { + let entry = AccessLogEntry::new_grpc( + Some(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))), + "/org.dash.platform.dapi.v0.Platform/getStatus".to_string(), + "org.dash.platform.dapi.v0.Platform".to_string(), + "getStatus".to_string(), + 0, // OK + 2048, + 10000, + ); + + let log_line = entry.to_combined_format(); + + assert!(log_line.contains("127.0.0.1")); + assert!(log_line.contains("POST /org.dash.platform.dapi.v0.Platform/getStatus HTTP/2.0")); + assert!(log_line.contains("200 2048")); + assert!(log_line.contains("10000us")); + assert!(log_line.contains("gRPC")); + } + + #[test] + fn test_grpc_status_conversion() { + assert_eq!(grpc_status_to_http_status(0), 200); // OK + assert_eq!(grpc_status_to_http_status(3), 400); // INVALID_ARGUMENT + assert_eq!(grpc_status_to_http_status(5), 404); // NOT_FOUND + assert_eq!(grpc_status_to_http_status(13), 500); // INTERNAL + assert_eq!(grpc_status_to_http_status(16), 401); // UNAUTHENTICATED + } + + #[test] + fn test_access_log_json_format() { + let entry = AccessLogEntry::new_http( + Some(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1))), + "POST".to_string(), + "/rpc".to_string(), + "HTTP/1.1".to_string(), + 201, + 256, + 2500, + ) + .with_user_agent("curl/8.0".to_string()) + .with_referer("https://example.net".to_string()); + + let json_line = entry.to_json_string(); + let value: Value = serde_json::from_str(&json_line).expect("valid json"); + assert_eq!(value["method"], "POST"); + assert_eq!(value["status"], 201); + assert_eq!(value["body_bytes"], 256); + assert_eq!(value["duration_us"], 2500); + assert_eq!(value["user_agent"], "curl/8.0"); + assert_eq!(value["referer"], "https://example.net"); + assert_eq!(value["protocol"], "HTTP"); + assert_eq!(value["remote_addr"], "10.0.0.1"); + } +} diff --git a/packages/rs-dapi/src/logging/middleware.rs b/packages/rs-dapi/src/logging/middleware.rs new file mode 100644 index 00000000000..6338386d2bc --- /dev/null +++ b/packages/rs-dapi/src/logging/middleware.rs @@ -0,0 +1,406 @@ +//! Middleware for access logging across different protocols +//! +//! Provides Tower layers for HTTP and gRPC access logging with +//! structured logging. + +use crate::logging::access_log::{AccessLogEntry, AccessLogger}; +use axum::extract::ConnectInfo; +use axum::http::{Request, Response, Version}; +use std::future::Future; +use std::net::{IpAddr, SocketAddr}; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Instant; +use tonic::Status as TonicStatus; +use tonic::transport::server::TcpConnectInfo; +use tower::{Layer, Service}; +use tracing::{Instrument, debug, error, info_span}; + +/// Tower layer for access logging +#[derive(Clone)] +pub struct AccessLogLayer { + access_logger: AccessLogger, +} + +impl AccessLogLayer { + /// Wrap the provided access logger in a Tower layer for HTTP/gRPC services. + pub fn new(access_logger: AccessLogger) -> Self { + Self { access_logger } + } +} + +impl Layer for AccessLogLayer { + type Service = AccessLogService; + + fn layer(&self, service: S) -> Self::Service { + // Wrap the inner service with an access logging capability. + AccessLogService { + inner: service, + access_logger: self.access_logger.clone(), + } + } +} + +#[derive(Clone)] +pub struct AccessLogService { + inner: S, + access_logger: AccessLogger, +} + +impl Service> for AccessLogService +where + S: Service, Response = Response> + Clone + Send + 'static, + S::Future: Send + 'static, + S::Error: Send + 'static, + ReqBody: Send + 'static, + ResBody: Send + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // Delegate readiness checks to the inner service. + self.inner.poll_ready(cx) + } + + /// Capture request metadata, invoke the inner service, and emit access logs. + fn call(&mut self, req: Request) -> Self::Future { + let start_time = Instant::now(); + let method = req.method().to_string(); + let uri = req.uri().clone(); + let uri_display = uri.to_string(); + let request_target = uri + .path_and_query() + .map(|pq| pq.as_str()) + .unwrap_or_else(|| uri.path()) + .to_string(); + let version = format!("{:?}", req.version()); + + // Detect protocol type + let protocol_type = detect_protocol_type(&req); + + // Extract client IP + let remote_addr = extract_remote_ip(&req); + + // Extract user agent + let user_agent = req + .headers() + .get("user-agent") + .and_then(|v| v.to_str().ok()) + .map(|s| s.to_string()); + + // Extract referer + let referer = req + .headers() + .get("referer") + .and_then(|v| v.to_str().ok()) + .map(|s| s.to_string()); + + let mut inner = self.inner.clone(); + let access_logger = self.access_logger.clone(); + + Box::pin(async move { + // Create span for structured logging with protocol info + let span = info_span!( + "request", + method = %method, + uri = %uri_display, + protocol = %protocol_type, + remote_addr = ?remote_addr + ); + + let result = inner.call(req).instrument(span).await; + + match result { + Ok(response) => { + let duration = start_time.elapsed(); + let status = response.status().as_u16(); + let grpc_status_code = extract_grpc_status(&response, status); + + // TODO: Get actual response body size + // This would require buffering the response which adds complexity + let body_bytes = 0; + + // Create appropriate access log entry based on protocol + let entry = match protocol_type.as_str() { + "gRPC" => { + let (service, method_name) = parse_grpc_path(&request_target); + AccessLogEntry::new_grpc( + remote_addr, + request_target.clone(), + service, + method_name, + grpc_status_code, + body_bytes, + duration.as_micros() as u64, + ) + } + _ => AccessLogEntry::new_http( + remote_addr, + method.clone(), + request_target.clone(), + version, + status, + body_bytes, + duration.as_micros() as u64, + ), + }; + + let mut entry = entry; + + if let Some(ref ua) = user_agent { + entry = entry.with_user_agent(ua.clone()); + } + + if let Some(ref ref_) = referer { + entry = entry.with_referer(ref_.clone()); + } + + access_logger.log(&entry).await; + + // Log to structured logging + debug!( + method = %method, + uri = %uri_display, + protocol = %protocol_type, + status = status, + duration_us = duration.as_micros() as u64, + "Request completed" + ); + + Ok(response) + } + Err(err) => { + let duration = start_time.elapsed(); + + error!( + method = %method, + uri = %uri_display, + protocol = %protocol_type, + duration_us = duration.as_micros() as u64, + "Request failed" + ); + + Err(err) + } + } + }) + } +} + +/// Detect protocol type from HTTP request +pub(crate) fn detect_protocol_type(req: &Request) -> String { + // Check Content-Type header for JSON-RPC + if let Some(content_type) = req.headers().get("content-type") + && let Ok(ct_str) = content_type.to_str() + && ct_str.contains("application/json") + { + // Could be JSON-RPC, but we need to check the path or method + return "JSON-RPC".to_string(); + } + + // Check if this is a gRPC request + // gRPC requests typically have content-type: application/grpc + // or use HTTP/2 and have specific headers + if let Some(content_type) = req.headers().get("content-type") + && let Ok(ct_str) = content_type.to_str() + && ct_str.starts_with("application/grpc") + { + return "gRPC".to_string(); + } + + // Check for gRPC-specific headers + if req.headers().contains_key("grpc-encoding") + || req.headers().contains_key("grpc-accept-encoding") + || req.headers().contains_key("te") + { + return "gRPC".to_string(); + } + + // Check HTTP version - gRPC typically uses HTTP/2 + if req.version() == Version::HTTP_2 { + // Could be gRPC, but let's be more specific + let path = req.uri().path(); + if path.contains('.') && path.matches('/').count() >= 2 { + // Looks like a gRPC service path: /package.service/method + return "gRPC".to_string(); + } + } + + // Default to plain HTTP + "HTTP".to_string() +} + +/// Parse gRPC service and method from request path +/// Path format: /./ +pub(crate) fn parse_grpc_path(path: &str) -> (String, String) { + let path_component = if let Some(scheme_pos) = path.find("://") { + let after_scheme = &path[scheme_pos + 3..]; + if let Some(path_start) = after_scheme.find('/') { + &after_scheme[path_start..] + } else { + "" + } + } else { + path + }; + + let normalized = path_component + .trim_start_matches('/') + .split('?') + .next() + .unwrap_or_default(); + + if normalized.is_empty() { + return ("unknown".to_string(), "unknown".to_string()); + } + + if let Some((service, method)) = normalized.rsplit_once('/') { + if service.is_empty() || method.is_empty() { + ("unknown".to_string(), "unknown".to_string()) + } else { + (service.to_string(), method.to_string()) + } + } else { + ("unknown".to_string(), normalized.to_string()) + } +} + +/// Convert HTTP status code to gRPC status code +pub(crate) fn http_status_to_grpc_status(http_status: u16) -> u32 { + match http_status { + 200 => 0, // OK + 400 => 3, // INVALID_ARGUMENT + 401 => 16, // UNAUTHENTICATED + 403 => 7, // PERMISSION_DENIED + 404 => 5, // NOT_FOUND + 409 => 6, // ALREADY_EXISTS + 412 => 9, // FAILED_PRECONDITION + 429 => 8, // RESOURCE_EXHAUSTED + 499 => 1, // CANCELLED + 500 => 13, // INTERNAL + 501 => 12, // UNIMPLEMENTED + 503 => 14, // UNAVAILABLE + 504 => 4, // DEADLINE_EXCEEDED + _ => 2, // UNKNOWN + } +} + +/// Retrieve the remote IP address from Axum or Tonic connection metadata. +fn extract_remote_ip(req: &Request) -> Option { + if let Some(connect_info) = req.extensions().get::>() { + return Some(connect_info.ip()); + } + + if let Some(connect_info) = req.extensions().get::() + && let Some(addr) = connect_info.remote_addr() + { + return Some(addr.ip()); + } + + None +} + +/// Determine the gRPC status code from response headers, extensions, or fallback mapping. +pub(crate) fn extract_grpc_status(response: &Response, http_status: u16) -> u32 { + if let Some(value) = response.headers().get("grpc-status") + && let Ok(as_str) = value.to_str() + && let Ok(code) = as_str.parse::() + { + return code; + } + + if let Some(status) = response.extensions().get::() { + return status.code() as u32; + } + + if http_status == 200 { + 0 + } else { + http_status_to_grpc_status(http_status) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use axum::http::HeaderValue; + use axum::http::{Request, Response}; + use std::net::{Ipv4Addr, SocketAddr}; + use tonic::Code; + + #[test] + fn extract_remote_ip_from_connect_info() { + let mut req: Request<()> = Request::default(); + let addr = SocketAddr::from((Ipv4Addr::new(10, 1, 2, 3), 8080)); + req.extensions_mut().insert(ConnectInfo(addr)); + + assert_eq!(extract_remote_ip(&req), Some(addr.ip())); + } + + #[test] + fn extract_remote_ip_from_tcp_connect_info() { + let mut req: Request<()> = Request::default(); + let addr = SocketAddr::from((Ipv4Addr::new(192, 168, 0, 5), 9000)); + let connect_info = TcpConnectInfo { + local_addr: None, + remote_addr: Some(addr), + }; + req.extensions_mut().insert(connect_info); + + assert_eq!(extract_remote_ip(&req), Some(addr.ip())); + } + + #[test] + fn extract_grpc_status_reads_header() { + let mut response: Response<()> = Response::new(()); + response + .headers_mut() + .insert("grpc-status", HeaderValue::from_static("7")); + + assert_eq!(extract_grpc_status(&response, 200), 7); + } + + #[test] + fn extract_grpc_status_reads_extension() { + let mut response: Response<()> = Response::new(()); + response + .extensions_mut() + .insert(tonic::Status::new(Code::Unavailable, "server unavailable")); + + assert_eq!( + extract_grpc_status(&response, 200), + Code::Unavailable as u32 + ); + } + + #[test] + fn extract_grpc_status_falls_back_to_http_status() { + let response: Response<()> = Response::new(()); + assert_eq!(extract_grpc_status(&response, 503), 14); + } + + #[test] + fn parse_grpc_path_handles_standard_path() { + let (service, method) = parse_grpc_path("/org.dash.platform.dapi.v0.Platform/getStatus"); + assert_eq!(service, "org.dash.platform.dapi.v0.Platform"); + assert_eq!(method, "getStatus"); + } + + #[test] + fn parse_grpc_path_handles_absolute_uri() { + let (service, method) = parse_grpc_path( + "http://127.0.0.1:2443/org.dash.platform.dapi.v0.Platform/waitForStateTransitionResult", + ); + assert_eq!(service, "org.dash.platform.dapi.v0.Platform"); + assert_eq!(method, "waitForStateTransitionResult"); + } + + #[test] + fn parse_grpc_path_missing_segments() { + let (service, method) = parse_grpc_path("/"); + assert_eq!(service, "unknown"); + assert_eq!(method, "unknown"); + } +} diff --git a/packages/rs-dapi/src/logging/mod.rs b/packages/rs-dapi/src/logging/mod.rs new file mode 100644 index 00000000000..09c724cb41b --- /dev/null +++ b/packages/rs-dapi/src/logging/mod.rs @@ -0,0 +1,196 @@ +//! Logging infrastructure for rs-dapi +//! +//! This module provides structured logging with access logging in standard formats, +//! and log rotation support. + +use tracing_subscriber::{Registry, layer::SubscriberExt, util::SubscriberInitExt}; + +use crate::config::LoggingConfig; + +pub mod access_log; +pub mod middleware; + +pub use access_log::{AccessLogEntry, AccessLogFormat, AccessLogger}; +pub use middleware::AccessLogLayer; + +/// Initialize logging subsystem with given configuration +/// Returns Some(AccessLogger) if access logging is configured with a non-empty path, None otherwise +pub async fn init_logging( + config: &LoggingConfig, + cli_config: &LoggingCliConfig, +) -> Result, String> { + // Set up the main application logger + setup_application_logging(config, cli_config)?; + + let access_log_format = parse_access_log_format(&config.access_log_format)?; + + // Set up access logging if configured with a non-empty path + let access_logger = if let Some(ref path) = config.access_log_path { + if !path.trim().is_empty() { + Some( + AccessLogger::new(path.clone(), access_log_format) + .await + .map_err(|e| format!("Failed to create access logger {}: {}", path, e))?, + ) + } else { + None + } + } else { + None + }; + + Ok(access_logger) +} + +/// Configure tracing subscribers based on config and CLI overrides, initializing global logging. +fn setup_application_logging( + config: &LoggingConfig, + cli_config: &LoggingCliConfig, +) -> Result<(), String> { + use tracing_subscriber::{filter::EnvFilter, fmt}; + + // Determine log level based on verbose flags + let env_filter_value = if cli_config.debug || cli_config.verbose > 0 { + match cli_config.verbose.max(if cli_config.debug { 2 } else { 0 }) { + 1 => "rs_dapi=debug,tower_http::trace=debug,info".to_string(), + 2 => "rs_dapi=trace,tower_http::trace=debug,info".to_string(), + 3 => "rs_dapi=trace,tower_http::trace=trace,h2=info,tower=info,hyper_util=info,debug" + .to_string(), + 4 => "rs_dapi=trace,tower_http::trace=trace,debug".to_string(), + _ => "rs_dapi=trace,trace".to_string(), + } + } else if let Some(filter) = filter_from_logging_config(config) { + filter + } else { + // Use RUST_LOG if set, otherwise default + std::env::var("RUST_LOG").unwrap_or_else(|_| "rs_dapi=info,warn".to_string()) + }; + + let env_filter = EnvFilter::try_new(env_filter_value.clone()) + .map_err(|e| format!("Invalid log filter '{}': {}", env_filter_value, e))?; + + let registry = Registry::default().with(env_filter); + + if config.json_format { + // JSON structured logging + let fmt_layer = fmt::layer() + .json() + .with_current_span(false) + .with_span_list(false) + .with_ansi(cli_config.color.unwrap_or(false)); + + registry.with(fmt_layer).init(); + } else { + // Human-readable logging + let fmt_layer = fmt::layer().with_ansi(cli_config.color.unwrap_or(true)); + + registry.with(fmt_layer).init(); + } + + Ok(()) +} + +// CLI configuration structure for compatibility +pub struct LoggingCliConfig { + pub verbose: u8, + pub debug: bool, + pub color: Option, +} + +/// Derive an EnvFilter specification string from the logging config if provided. +fn filter_from_logging_config(config: &LoggingConfig) -> Option { + let raw = config.level.trim(); + + if raw.is_empty() { + return None; + } + + let lower = raw.to_ascii_lowercase(); + + match lower.as_str() { + "error" | "warn" | "info" | "debug" | "trace" => Some(format!("rs_dapi={},warn", lower)), + "off" | "silent" => Some("off".to_string()), + _ => Some(raw.to_string()), + } +} + +/// Normalize the configured access log format value into an enum variant. +fn parse_access_log_format(raw: &str) -> Result { + let normalized = raw.trim().to_ascii_lowercase(); + + match normalized.as_str() { + "" | "combined" => Ok(AccessLogFormat::Combined), + "json" => Ok(AccessLogFormat::Json), + other => Err(format!("Unsupported access log format: {}", other)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn filter_from_logging_config_returns_expected_for_levels() { + let config = LoggingConfig { + level: "Debug".to_string(), + ..LoggingConfig::default() + }; + + assert_eq!( + filter_from_logging_config(&config), + Some("rs_dapi=debug,warn".to_string()) + ); + + let config_off = LoggingConfig { + level: "silent".to_string(), + ..LoggingConfig::default() + }; + + assert_eq!( + filter_from_logging_config(&config_off), + Some("off".to_string()) + ); + } + + #[test] + fn filter_from_logging_config_allows_custom_specs() { + let config = LoggingConfig { + level: "rs_dapi=trace,hyper=warn".to_string(), + ..LoggingConfig::default() + }; + + assert_eq!( + filter_from_logging_config(&config), + Some("rs_dapi=trace,hyper=warn".to_string()) + ); + } + + #[test] + fn filter_from_logging_config_ignores_empty_values() { + let config = LoggingConfig { + level: " ".to_string(), + ..LoggingConfig::default() + }; + + assert_eq!(filter_from_logging_config(&config), None); + } + + #[test] + fn parse_access_log_format_accepts_supported_values() { + assert_eq!( + parse_access_log_format("combined"), + Ok(AccessLogFormat::Combined) + ); + assert_eq!(parse_access_log_format("json"), Ok(AccessLogFormat::Json)); + assert_eq!( + parse_access_log_format(" "), + Ok(AccessLogFormat::Combined) + ); + } + + #[test] + fn parse_access_log_format_rejects_unknown_values() { + let err = parse_access_log_format("xml").unwrap_err(); + assert!(err.contains("Unsupported access log format")); + } +} diff --git a/packages/rs-dapi/src/main.rs b/packages/rs-dapi/src/main.rs new file mode 100644 index 00000000000..7508eddb897 --- /dev/null +++ b/packages/rs-dapi/src/main.rs @@ -0,0 +1,281 @@ +use clap::{ArgAction, Parser, Subcommand}; +use rs_dapi::DAPIResult; +use rs_dapi::error::DapiError; +use std::path::PathBuf; +use std::process::ExitCode; +use tracing::{error, info, trace}; + +use rs_dapi::config::Config; +use rs_dapi::logging::{LoggingCliConfig, init_logging}; +use rs_dapi::server::DapiServer; + +#[derive(Debug, Subcommand)] +enum Commands { + /// Start the DAPI server + /// + /// Starts all configured services including gRPC API, gRPC Streams, + /// JSON-RPC, and Health Check/Metrics endpoints. + /// The server will run until interrupted with Ctrl+C. + #[command()] + Start, + /// Display current configuration + /// + /// Shows all configuration variables and their current values from: + /// 1. Environment variables + /// 2. .env file (if specified or found) + /// 3. Default values + /// + /// This is useful for debugging configuration issues and verifying + /// which settings will be used. + /// + /// WARNING: Output may contain sensitive data like API keys or URIs! + #[command()] + Config, + /// Print current software version + /// + /// Display the version information for rs-dapi and exit. + #[command()] + Version, +} + +/// DAPI (Distributed API) server for Dash Platform +/// +/// Provides gRPC and JSON-RPC endpoints for blockchain and platform data. +#[derive(Debug, Parser)] +#[command( + name = "rs-dapi", + version, + about = "DAPI (Distributed API) server for Dash Platform", + long_about = include_str!("../README.md") +)] +struct Cli { + #[command(subcommand)] + command: Option, + + /// Path to the config (.env) file + /// + /// If not specified, rs-dapi will look for .env in the current directory. + /// Variables in the environment always override .env file values. + #[arg(short, long, value_hint = clap::ValueHint::FilePath)] + config: Option, + + /// Enable verbose logging. Use multiple times for even more logs + /// + /// Repeat 'v' multiple times to increase log verbosity: + /// + /// * none - default to 'info' level for rs-dapi, 'warn' for libraries + /// * -v - 'debug' level for rs-dapi, 'info' for libraries + /// * -vv - 'trace' level for rs-dapi, 'debug' for libraries + /// * -vvv - 'trace' level for all components + /// + /// Note: Using -v overrides any settings defined in RUST_LOG. + #[arg( + short = 'v', + long = "verbose", + action = ArgAction::Count, + global = true + )] + verbose: u8, + + /// Display colorful logs + /// + /// Controls whether log output includes ANSI color codes. + /// If not specified, color is automatically detected based on terminal capabilities. + #[arg(long)] + color: Option, + + /// Enable debug mode (equivalent to -vv) + /// + /// This is a convenience flag that sets the same log level as -vv: + /// 'trace' level for rs-dapi, 'debug' level for libraries. + #[arg(long)] + debug: bool, +} + +impl Cli { + /// Executes the selected CLI command after loading config and logging. + /// Returns `Ok` on success or an error string suitable for user-facing output. + /// Server failures are mapped to descriptive messages for exit handling. + async fn run(self) -> Result<(), String> { + // Load configuration + let config = load_config(&self.config); + + // Configure logging and access logging + let access_logger = configure_logging(&self, &config.dapi.logging).await?; + + match self.command.unwrap_or(Commands::Start) { + Commands::Start => { + info!( + version = env!("CARGO_PKG_VERSION"), + rust = env!("CARGO_PKG_RUST_VERSION"), + "rs-dapi server initializing", + ); + + let mut server_future = run_server(config, access_logger); + tokio::pin!(server_future); + + let outcome = tokio::select! { + result = &mut server_future => Some(result), + signal = shutdown_signal() => { + match signal { + Ok(()) => { + info!("Shutdown signal received; stopping rs-dapi"); + } + Err(err) => { + error!(error = %err, "Error while awaiting shutdown signal"); + return Err(format!("Signal handling error: {}", err)); + } + } + None + } + }; + + if let Some(result) = outcome { + if let Err(e) = result { + error!("Server error: {}", e); + + // Check if this is a connection-related error and set appropriate exit code + match &e { + DapiError::ServerUnavailable(_, _) => { + error!(error = %e, + "Upstream service connection failed. Check drive-abci and tenderdash and try again." + ); + return Err(format!("Connection error: {}", e)); + } + DapiError::Client(msg) if msg.contains("Failed to connect") => { + error!(error = %msg, + "Client connection failed. Check drive-abci and tenderdash and try again." + ); + return Err(format!("Connection error: {}", e)); + } + DapiError::Transport(_) => { + error!( + error = %e, + "Transport error occurred. Check drive-abci and tenderdash and try again." + ); + return Err(format!("Connection error: {}", e)); + } + _ => { + error!(error = %e, "Cannot start server."); + return Err(e.to_string()); + } + } + } + } + Ok(()) + } + Commands::Config => dump_config(&config), + Commands::Version => { + print_version(); + Ok(()) + } + } + } +} + +/// Load configuration from the optional `.env` path, exiting on failure. +fn load_config(path: &Option) -> Config { + match Config::load_from_dotenv(path.clone()) { + Ok(config) => config, + Err(e) => { + eprintln!("Failed to load configuration: {}", e); + std::process::exit(1); + } + } +} + +/// Initialize structured logging and access logging based on CLI overrides. +async fn configure_logging( + cli: &Cli, + logging_config: &rs_dapi::config::LoggingConfig, +) -> Result, String> { + let cli_config = LoggingCliConfig { + verbose: cli.verbose, + debug: cli.debug, + color: cli.color, + }; + + init_logging(logging_config, &cli_config).await +} + +/// Construct and run the DAPI server until shutdown, wiring configured services. +async fn run_server( + config: Config, + access_logger: Option, +) -> DAPIResult<()> { + trace!("Creating DAPI server instance..."); + + let server = DapiServer::new(std::sync::Arc::new(config), access_logger).await?; + + info!("rs-dapi server starting on configured ports"); + + trace!("Starting server main loop..."); + server.run().await?; + + info!("rs-dapi server shutdown complete"); + Ok(()) +} + +/// Print the current configuration as pretty JSON, warning about sensitive data. +fn dump_config(config: &Config) -> Result<(), String> { + println!("# rs-dapi Configuration"); + println!("# WARNING: This output may contain sensitive data!"); + println!(); + + match serde_json::to_string_pretty(config) { + Ok(json) => { + println!("{}", json); + Ok(()) + } + Err(e) => Err(format!("Failed to serialize configuration: {}", e)), + } +} + +/// Print the rs-dapi and Rust toolchain versions to stdout. +fn print_version() { + println!("rs-dapi {}", env!("CARGO_PKG_VERSION")); + println!("Built with Rust {}", env!("CARGO_PKG_RUST_VERSION")); +} + +/// Wait for an OS shutdown signal (SIGTERM/SIGINT on Unix, Ctrl+C elsewhere). +/// Returning Ok indicates a signal was received; errors surface issues with signal handlers. +async fn shutdown_signal() -> std::io::Result<()> { + #[cfg(unix)] + { + use tokio::signal::unix::{SignalKind, signal}; + + let mut sigterm = signal(SignalKind::terminate())?; + let mut sigint = signal(SignalKind::interrupt())?; + + tokio::select! { + _ = sigterm.recv() => {}, + _ = sigint.recv() => {}, + } + + Ok(()) + } + + #[cfg(not(unix))] + { + tokio::signal::ctrl_c().await + } +} + +/// Initialize a Tokio runtime and execute the CLI runner, mapping failures to exit codes. +fn main() -> Result<(), ExitCode> { + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(4) + .enable_all() + .build() + .expect("Failed to create Tokio runtime"); + + let cli = Cli::parse(); + + match rt.block_on(cli.run()) { + Ok(()) => Ok(()), + Err(e) => { + eprintln!("Error: {}", e); + Err(ExitCode::FAILURE) + } + } +} diff --git a/packages/rs-dapi/src/metrics.rs b/packages/rs-dapi/src/metrics.rs new file mode 100644 index 00000000000..7692fd14bb9 --- /dev/null +++ b/packages/rs-dapi/src/metrics.rs @@ -0,0 +1,569 @@ +use axum::http::{Extensions, Request, Response}; +use once_cell::sync::Lazy; +use prometheus::{ + Encoder, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, TextEncoder, + register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge, + register_int_gauge_vec, +}; +use std::any::type_name_of_val; +use std::borrow::Cow; +use std::fmt; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Instant; +use tower::{Layer, Service}; + +use crate::logging::middleware::{ + detect_protocol_type, extract_grpc_status, http_status_to_grpc_status, parse_grpc_path, +}; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub struct MethodLabel(Cow<'static, str>); + +impl MethodLabel { + pub fn from_type_name(name: &'static str) -> Self { + Self(Cow::Borrowed(name)) + } + + pub fn from_owned(name: String) -> Self { + Self(Cow::Owned(name)) + } + + pub fn as_str(&self) -> &str { + &self.0 + } +} + +impl fmt::Display for MethodLabel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +pub fn method_label(value: &T) -> MethodLabel { + MethodLabel::from_type_name(type_name_of_val(value)) +} + +pub fn attach_method_label(extensions: &mut Extensions, method: MethodLabel) { + extensions.insert(method); +} + +/// Enum for all metric names used in rs-dapi +#[derive(Copy, Clone, Debug)] +pub enum Metric { + /// Cache events counter: labels [cache, method, outcome] + CacheEvent, + /// Cache memory usage gauge + CacheMemoryUsage, + /// Cache memory capacity gauge + CacheMemoryCapacity, + /// Cache entries gauge + CacheEntries, + /// Requests counter: labels [protocol, endpoint, status] + RequestCount, + /// Request duration histogram: labels [protocol, endpoint, status] + RequestDuration, + /// Platform events: active sessions gauge + PlatformEventsActiveSessions, + /// Platform events: commands processed, labels [op] + PlatformEventsCommands, + /// Platform events: forwarded events counter + PlatformEventsForwardedEvents, + /// Platform events: forwarded acks counter + PlatformEventsForwardedAcks, + /// Platform events: forwarded errors counter + PlatformEventsForwardedErrors, + /// Platform events: upstream streams started counter + PlatformEventsUpstreamStreams, + /// Active worker tasks gauge + WorkersActive, +} + +impl Metric { + /// Return the Prometheus metric name associated with this enum variant. + pub const fn name(self) -> &'static str { + match self { + Metric::CacheEvent => "rsdapi_cache_events_total", + Metric::CacheMemoryUsage => "rsdapi_cache_memory_usage_bytes", + Metric::CacheMemoryCapacity => "rsdapi_cache_memory_capacity_bytes", + Metric::CacheEntries => "rsdapi_cache_entries", + Metric::RequestCount => "rsdapi_requests_total", + Metric::RequestDuration => "rsdapi_request_duration_seconds", + Metric::PlatformEventsActiveSessions => "rsdapi_platform_events_active_sessions", + Metric::PlatformEventsCommands => "rsdapi_platform_events_commands_total", + Metric::PlatformEventsForwardedEvents => { + "rsdapi_platform_events_forwarded_events_total" + } + Metric::PlatformEventsForwardedAcks => "rsdapi_platform_events_forwarded_acks_total", + Metric::PlatformEventsForwardedErrors => { + "rsdapi_platform_events_forwarded_errors_total" + } + Metric::PlatformEventsUpstreamStreams => { + "rsdapi_platform_events_upstream_streams_total" + } + Metric::WorkersActive => "rsdapi_workers_active_tasks", + } + } + + /// Return the human-readable help string for the Prometheus metric. + pub const fn help(self) -> &'static str { + match self { + Metric::CacheEvent => "Cache events by method and outcome (hit|miss)", + Metric::CacheMemoryUsage => "Approximate cache memory usage in bytes", + Metric::CacheMemoryCapacity => "Configured cache memory capacity in bytes", + Metric::CacheEntries => "Number of items currently stored in the cache", + Metric::RequestCount => "Requests received by protocol, endpoint, and status", + Metric::RequestDuration => { + "Request latency in seconds by protocol, endpoint, and status" + } + Metric::PlatformEventsActiveSessions => { + "Current number of active Platform events sessions" + } + Metric::PlatformEventsCommands => "Platform events commands processed by operation", + Metric::PlatformEventsForwardedEvents => "Platform events forwarded to clients", + Metric::PlatformEventsForwardedAcks => "Platform acks forwarded to clients", + Metric::PlatformEventsForwardedErrors => "Platform errors forwarded to clients", + Metric::PlatformEventsUpstreamStreams => { + "Upstream subscribePlatformEvents streams started" + } + Metric::WorkersActive => "Current number of active background worker tasks", + } + } +} + +/// Outcome label values for cache events +#[derive(Copy, Clone, Debug)] +pub enum Outcome { + Hit, + Miss, +} + +impl Outcome { + /// Convert the outcome into a label-friendly string literal. + pub const fn as_str(self) -> &'static str { + match self { + Outcome::Hit => "hit", + Outcome::Miss => "miss", + } + } +} + +/// Label keys used across metrics +#[derive(Copy, Clone, Debug)] +pub enum Label { + Cache, + Method, + Outcome, + Protocol, + // TODO: ensure we have a limited set of endpoints, so that cardinality is controlled and we don't overload Prometheus + Endpoint, + Status, + Op, +} + +impl Label { + /// Return the label key used in Prometheus metrics. + pub const fn name(self) -> &'static str { + match self { + Label::Cache => "cache", + Label::Method => "method", + Label::Outcome => "outcome", + Label::Protocol => "protocol", + Label::Endpoint => "endpoint", + Label::Status => "status", + Label::Op => "op", + } + } +} + +pub static CACHE_EVENTS: Lazy = Lazy::new(|| { + register_int_counter_vec!( + Metric::CacheEvent.name(), + Metric::CacheEvent.help(), + &[ + Label::Cache.name(), + Label::Method.name(), + Label::Outcome.name() + ] + ) + .expect("create counter") +}); + +pub static CACHE_MEMORY_USAGE: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + Metric::CacheMemoryUsage.name(), + Metric::CacheMemoryUsage.help(), + &[Label::Cache.name()] + ) + .expect("create gauge") +}); + +pub static CACHE_MEMORY_CAPACITY: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + Metric::CacheMemoryCapacity.name(), + Metric::CacheMemoryCapacity.help(), + &[Label::Cache.name()] + ) + .expect("create gauge") +}); + +pub static CACHE_ENTRIES: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + Metric::CacheEntries.name(), + Metric::CacheEntries.help(), + &[Label::Cache.name()] + ) + .expect("create gauge") +}); + +pub static PLATFORM_EVENTS_ACTIVE_SESSIONS: Lazy = Lazy::new(|| { + register_int_gauge!( + Metric::PlatformEventsActiveSessions.name(), + Metric::PlatformEventsActiveSessions.help() + ) + .expect("create gauge") +}); + +pub static REQUEST_COUNTER: Lazy = Lazy::new(|| { + register_int_counter_vec!( + Metric::RequestCount.name(), + Metric::RequestCount.help(), + &[ + Label::Protocol.name(), + Label::Endpoint.name(), + Label::Status.name() + ] + ) + .expect("create counter vec") +}); + +pub static REQUEST_DURATION_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( + Metric::RequestDuration.name(), + Metric::RequestDuration.help(), + &[ + Label::Protocol.name(), + Label::Endpoint.name(), + Label::Status.name() + ] + ) + .expect("create histogram vec") +}); + +pub static PLATFORM_EVENTS_COMMANDS: Lazy = Lazy::new(|| { + register_int_counter_vec!( + Metric::PlatformEventsCommands.name(), + Metric::PlatformEventsCommands.help(), + &[Label::Op.name()] + ) + .expect("create counter vec") +}); + +pub static PLATFORM_EVENTS_FORWARDED_EVENTS: Lazy = Lazy::new(|| { + register_int_counter!( + Metric::PlatformEventsForwardedEvents.name(), + Metric::PlatformEventsForwardedEvents.help() + ) + .expect("create counter") +}); + +pub static PLATFORM_EVENTS_FORWARDED_ACKS: Lazy = Lazy::new(|| { + register_int_counter!( + Metric::PlatformEventsForwardedAcks.name(), + Metric::PlatformEventsForwardedAcks.help() + ) + .expect("create counter") +}); + +pub static PLATFORM_EVENTS_FORWARDED_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + Metric::PlatformEventsForwardedErrors.name(), + Metric::PlatformEventsForwardedErrors.help() + ) + .expect("create counter") +}); + +pub static PLATFORM_EVENTS_UPSTREAM_STREAMS: Lazy = Lazy::new(|| { + register_int_counter!( + Metric::PlatformEventsUpstreamStreams.name(), + Metric::PlatformEventsUpstreamStreams.help() + ) + .expect("create counter") +}); + +pub static WORKERS_ACTIVE: Lazy = Lazy::new(|| { + register_int_gauge!(Metric::WorkersActive.name(), Metric::WorkersActive.help()) + .expect("create gauge") +}); + +/// Root typed accessor for metrics +pub struct Metrics; + +impl Metrics { + /// Increment cache events counter with explicit outcome + #[inline] + pub fn cache_events_inc(cache: &str, method: &MethodLabel, outcome: Outcome) { + CACHE_EVENTS + .with_label_values(&[cache, method.as_str(), outcome.as_str()]) + .inc(); + } + + /// Mark cache hit for method + #[inline] + pub fn cache_events_hit(cache: &str, method: &MethodLabel) { + Self::cache_events_inc(cache, method, Outcome::Hit); + } + + /// Mark cache miss for method + #[inline] + pub fn cache_events_miss(cache: &str, method: &MethodLabel) { + Self::cache_events_inc(cache, method, Outcome::Miss); + } +} + +#[inline] +pub fn record_cache_event(cache: &str, method: &MethodLabel, outcome: Outcome) { + CACHE_EVENTS + .with_label_values(&[cache, method.as_str(), outcome.as_str()]) + .inc(); +} + +#[inline] +pub fn cache_hit(cache: &str, method: &MethodLabel) { + record_cache_event(cache, method, Outcome::Hit); +} + +#[inline] +pub fn cache_miss(cache: &str, method: &MethodLabel) { + record_cache_event(cache, method, Outcome::Miss); +} + +#[inline] +fn clamp_to_i64(value: u64) -> i64 { + value.min(i64::MAX as u64) as i64 +} + +#[inline] +pub fn cache_memory_usage_bytes(cache: &str, bytes: u64) { + CACHE_MEMORY_USAGE + .with_label_values(&[cache]) + .set(clamp_to_i64(bytes)); +} + +#[inline] +pub fn cache_memory_capacity_bytes(cache: &str, bytes: u64) { + CACHE_MEMORY_CAPACITY + .with_label_values(&[cache]) + .set(clamp_to_i64(bytes)); +} + +#[inline] +pub fn cache_entries(cache: &str, entries: usize) { + CACHE_ENTRIES + .with_label_values(&[cache]) + .set(clamp_to_i64(entries as u64)); +} + +#[inline] +pub fn requests_inc(protocol: &str, endpoint: &str, status: &str) { + REQUEST_COUNTER + .with_label_values(&[protocol, endpoint, status]) + .inc(); +} + +#[inline] +pub fn request_duration_observe(protocol: &str, endpoint: &str, status: &str, seconds: f64) { + REQUEST_DURATION_SECONDS + .with_label_values(&[protocol, endpoint, status]) + .observe(seconds); +} + +/// Gather Prometheus metrics into an encoded buffer and its corresponding content type. +pub fn gather_prometheus() -> (Vec, String) { + let metric_families = prometheus::gather(); + let mut buffer = Vec::new(); + let encoder = TextEncoder::new(); + encoder + .encode(&metric_families, &mut buffer) + .unwrap_or_default(); + let content_type = encoder.format_type().to_string(); + (buffer, content_type) +} + +// ---- Request metrics middleware ---- + +#[derive(Clone, Default)] +pub struct MetricsLayer; + +impl MetricsLayer { + pub fn new() -> Self { + Self + } +} + +#[derive(Clone)] +pub struct MetricsService { + inner: S, +} + +impl Layer for MetricsLayer { + type Service = MetricsService; + + fn layer(&self, service: S) -> Self::Service { + MetricsService { inner: service } + } +} + +impl Service> for MetricsService +where + S: Service, Response = Response> + Clone + Send + 'static, + S::Future: Send + 'static, + S::Error: Send + 'static, + ReqBody: Send + 'static, + ResBody: Send + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let start_time = Instant::now(); + let protocol_type = detect_protocol_type(&req); + let path = req.uri().path().to_string(); + let request_method_hint = req.extensions().get::().cloned(); + + let mut inner = self.inner.clone(); + + Box::pin(async move { + let result = inner.call(req).await; + match result { + Ok(response) => { + let duration = start_time.elapsed(); + let status = response.status().as_u16(); + let method_hint = response.extensions().get::(); + let endpoint_label = endpoint_label( + &protocol_type, + &path, + method_hint.or(request_method_hint.as_ref()), + ); + + let status_code = if protocol_type == "gRPC" { + extract_grpc_status(&response, status) + } else { + http_status_to_grpc_status(status) + }; + let status_label = status_code.to_string(); + + requests_inc( + protocol_type.as_str(), + endpoint_label.as_str(), + status_label.as_str(), + ); + request_duration_observe( + protocol_type.as_str(), + endpoint_label.as_str(), + status_label.as_str(), + duration.as_secs_f64(), + ); + + Ok(response) + } + Err(err) => { + let duration = start_time.elapsed(); + let endpoint_label = + endpoint_label(&protocol_type, &path, request_method_hint.as_ref()); + let status_label = http_status_to_grpc_status(500).to_string(); + + requests_inc( + protocol_type.as_str(), + endpoint_label.as_str(), + status_label.as_str(), + ); + request_duration_observe( + protocol_type.as_str(), + endpoint_label.as_str(), + status_label.as_str(), + duration.as_secs_f64(), + ); + + Err(err) + } + } + }) + } +} + +#[inline] +fn endpoint_label(protocol: &str, path: &str, method_hint: Option<&MethodLabel>) -> String { + if protocol == "gRPC" { + if let Some(method) = method_hint { + return method.as_str().to_string(); + } + let (service, method) = parse_grpc_path(path); + if service == "unknown" && method == "unknown" { + path.to_string() + } else { + format!("{}/{}", service, method) + } + } else if protocol == "JSON-RPC" { + if let Some(method) = method_hint { + method.as_str().to_string() + } else { + path.to_string() + } + } else { + path.to_string() + } +} + +// ---- Platform events (proxy) helpers ---- + +#[inline] +pub fn platform_events_active_sessions_inc() { + PLATFORM_EVENTS_ACTIVE_SESSIONS.inc(); +} + +#[inline] +pub fn platform_events_active_sessions_dec() { + PLATFORM_EVENTS_ACTIVE_SESSIONS.dec(); +} + +#[inline] +pub fn platform_events_command(op: &str) { + PLATFORM_EVENTS_COMMANDS.with_label_values(&[op]).inc(); +} + +#[inline] +pub fn platform_events_forwarded_event() { + PLATFORM_EVENTS_FORWARDED_EVENTS.inc(); +} + +#[inline] +pub fn platform_events_forwarded_ack() { + PLATFORM_EVENTS_FORWARDED_ACKS.inc(); +} + +#[inline] +pub fn platform_events_forwarded_error() { + PLATFORM_EVENTS_FORWARDED_ERRORS.inc(); +} + +#[inline] +pub fn platform_events_upstream_stream_started() { + PLATFORM_EVENTS_UPSTREAM_STREAMS.inc(); +} + +#[inline] +pub fn workers_active_inc() { + WORKERS_ACTIVE.inc(); +} + +#[inline] +pub fn workers_active_dec() { + WORKERS_ACTIVE.dec(); +} diff --git a/packages/rs-dapi/src/protocol/jsonrpc_translator/error.rs b/packages/rs-dapi/src/protocol/jsonrpc_translator/error.rs new file mode 100644 index 00000000000..2fc57d0a908 --- /dev/null +++ b/packages/rs-dapi/src/protocol/jsonrpc_translator/error.rs @@ -0,0 +1,70 @@ +use serde_json::Value; + +use dapi_grpc::tonic::Code; + +use crate::error::DapiError; + +/// JSON-RPC error code for "not found" errors. +/// +/// For backwards compatibility with existing clients, we use -32602 (Invalid params) for not found errors. +const ERR_NOT_FOUND: i32 = -32602; + +/// Translate a `DapiError` into JSON-RPC error code, message, and optional data payload. +/// Collapses related client-side errors into shared codes and defers gRPC statuses for finer handling. +pub fn map_error(error: &DapiError) -> (i32, String, Option) { + match error { + DapiError::InvalidArgument(msg) + | DapiError::InvalidData(msg) + | DapiError::FailedPrecondition(msg) + | DapiError::AlreadyExists(msg) + | DapiError::NoValidTxProof(msg) + | DapiError::Client(msg) => (-32602, msg.clone(), None), + DapiError::ServiceUnavailable(msg) + | DapiError::Unavailable(msg) + | DapiError::Timeout(msg) => (-32003, msg.clone(), None), + DapiError::MethodNotFound(msg) => (-32601, msg.clone(), None), + DapiError::InvalidRequest(msg) => (-32600, msg.clone(), None), + DapiError::NotFound(msg) => (ERR_NOT_FOUND, msg.clone(), None), + DapiError::Status(status) => map_status(status), + _ => ( + -32603, + "Internal error".to_string(), + Some(Value::String(error.to_string())), + ), + } +} + +/// Map a gRPC `Status` into JSON-RPC semantics with fallback messaging. +/// Normalizes empty status messages and groups transport vs validation failures. +fn map_status(status: &dapi_grpc::tonic::Status) -> (i32, String, Option) { + let raw_message = status.message().to_string(); + let normalized = if raw_message.is_empty() { + match status.code() { + Code::InvalidArgument => "Invalid params".to_string(), + Code::FailedPrecondition => "Failed precondition".to_string(), + Code::AlreadyExists => "Already exists".to_string(), + Code::NotFound => "Not found".to_string(), + Code::Aborted => "Aborted".to_string(), + Code::ResourceExhausted => "Resource exhausted".to_string(), + Code::Unavailable => "Service unavailable".to_string(), + _ => "Internal error".to_string(), + } + } else { + raw_message + }; + + match status.code() { + Code::InvalidArgument + | Code::FailedPrecondition + | Code::AlreadyExists + | Code::Aborted + | Code::ResourceExhausted => (-32602, normalized, None), + Code::NotFound => (ERR_NOT_FOUND, normalized, None), + Code::Unavailable | Code::DeadlineExceeded | Code::Cancelled => (-32003, normalized, None), + _ => ( + -32603, + "Internal error".to_string(), + Some(Value::String(status.to_string())), + ), + } +} diff --git a/packages/rs-dapi/src/protocol/jsonrpc_translator/mod.rs b/packages/rs-dapi/src/protocol/jsonrpc_translator/mod.rs new file mode 100644 index 00000000000..b98a4f0ab75 --- /dev/null +++ b/packages/rs-dapi/src/protocol/jsonrpc_translator/mod.rs @@ -0,0 +1,269 @@ +mod error; +mod params; +mod types; + +use dapi_grpc::core::v0::BroadcastTransactionRequest; +use dapi_grpc::platform::v0::{GetStatusRequest, GetStatusResponse}; +use serde_json::Value; + +use crate::error::{DapiError, DapiResult}; + +pub use types::{JsonRpcError, JsonRpcRequest, JsonRpcResponse}; + +#[derive(Debug, Default, Clone)] +pub struct JsonRpcTranslator; + +/// Supported JSON-RPC calls handled by the gateway +#[derive(Debug)] +pub enum JsonRpcCall { + PlatformGetStatus(GetStatusRequest), + CoreGetBestBlockHash, + CoreGetBlockHash { height: u32 }, + CoreBroadcastTransaction(BroadcastTransactionRequest), +} + +impl JsonRpcTranslator { + /// Create a new translator that maps between JSON-RPC payloads and gRPC requests. + pub fn new() -> Self { + Self + } + + /// Interpret an incoming JSON-RPC request and produce the corresponding gRPC call marker. + /// Validates parameters and converts them into typed messages or structured errors. + pub async fn translate_request(&self, json_rpc: JsonRpcRequest) -> DapiResult { + if json_rpc.jsonrpc != "2.0" { + return Err(DapiError::InvalidRequest("jsonrpc must be \"2.0\"".into())); + } + + match json_rpc.method.as_str() { + "getStatus" => Ok(self.translate_platform_status()), + "getBestBlockHash" => Ok(JsonRpcCall::CoreGetBestBlockHash), + "getBlockHash" => { + let height = params::parse_first_u32_param(json_rpc.params) + .map_err(DapiError::InvalidArgument)?; + Ok(JsonRpcCall::CoreGetBlockHash { height }) + } + "sendRawTransaction" => { + let (tx, allow_high_fees, bypass_limits) = + params::parse_send_raw_tx_params(json_rpc.params) + .map_err(DapiError::InvalidArgument)?; + let req = BroadcastTransactionRequest { + transaction: tx, + allow_high_fees, + bypass_limits, + }; + Ok(JsonRpcCall::CoreBroadcastTransaction(req)) + } + _ => Err(DapiError::MethodNotFound("Method not found".to_string())), + } + } + + /// Convert a gRPC Platform status response into a JSON-RPC success envelope. + /// Serializes the message to JSON, wrapping serialization failures as internal errors. + /// Propagates the original request id. + pub async fn translate_response( + &self, + response: GetStatusResponse, + id: Option, + ) -> DapiResult { + let result = serde_json::to_value(&response) + .map_err(|e| DapiError::Internal(format!("Failed to serialize response: {}", e)))?; + Ok(JsonRpcResponse::ok(result, id)) + } + + /// Build a JSON-RPC error response from a rich `DapiError` using protocol mappings. + pub fn error_response>( + &self, + error: E, + id: Option, + ) -> JsonRpcResponse { + let (code, message, data) = error::map_error(&error.into()); + JsonRpcResponse::error(code, message, data, id) + } + + /// Build a JSON-RPC success response with the provided JSON result payload. + pub fn ok_response(&self, result: Value, id: Option) -> JsonRpcResponse { + JsonRpcResponse::ok(result, id) + } + + /// Construct the gRPC request variant for the `getStatus` Platform call. + fn translate_platform_status(&self) -> JsonRpcCall { + use dapi_grpc::platform::v0::get_status_request::GetStatusRequestV0; + + let request_v0 = GetStatusRequestV0 {}; + let grpc_request = GetStatusRequest { + version: Some(dapi_grpc::platform::v0::get_status_request::Version::V0( + request_v0, + )), + }; + JsonRpcCall::PlatformGetStatus(grpc_request) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[tokio::test] + async fn translate_get_status_request() { + let t = JsonRpcTranslator::new(); + let req = JsonRpcRequest { + jsonrpc: "2.0".to_string(), + method: "getStatus".to_string(), + params: None, + id: Some(json!(1)), + }; + let call = t.translate_request(req).await.expect("translate ok"); + match call { + JsonRpcCall::PlatformGetStatus(_) => {} + _ => panic!("expected PlatformGetStatus"), + } + } + + #[tokio::test] + async fn translate_get_best_block_hash_request() { + let t = JsonRpcTranslator::new(); + let req = JsonRpcRequest { + jsonrpc: "2.0".to_string(), + method: "getBestBlockHash".to_string(), + params: None, + id: Some(json!(2)), + }; + let call = t.translate_request(req).await.expect("translate ok"); + match call { + JsonRpcCall::CoreGetBestBlockHash => {} + _ => panic!("expected CoreGetBestBlockHash"), + } + } + + #[tokio::test] + async fn translate_get_block_hash_with_height() { + let t = JsonRpcTranslator::new(); + let req = JsonRpcRequest { + jsonrpc: "2.0".to_string(), + method: "getBlockHash".to_string(), + params: Some(json!({"height": 12345})), + id: Some(json!(3)), + }; + let call = t.translate_request(req).await.expect("translate ok"); + match call { + JsonRpcCall::CoreGetBlockHash { height } => assert_eq!(height, 12345), + _ => panic!("expected CoreGetBlockHash"), + } + } + + #[tokio::test] + async fn translate_get_block_hash_missing_param_errors() { + let t = JsonRpcTranslator::new(); + let req = JsonRpcRequest { + jsonrpc: "2.0".to_string(), + method: "getBlockHash".to_string(), + params: Some(json!({})), + id: Some(json!(4)), + }; + let err = t.translate_request(req).await.unwrap_err(); + match err { + DapiError::InvalidArgument(msg) => assert!(msg.contains("required property")), + _ => panic!("expected InvalidArgument"), + } + } + + #[test] + fn parse_first_param_validates_types() { + use super::params::parse_first_u32_param; + + assert_eq!( + parse_first_u32_param(Some(json!({"height": 0}))).unwrap(), + 0 + ); + assert!( + parse_first_u32_param(Some(json!(null))) + .unwrap_err() + .contains("params must be object") + ); + assert!( + parse_first_u32_param(Some(json!({}))) + .unwrap_err() + .contains("required property") + ); + assert!( + parse_first_u32_param(Some(json!({"height": -1}))) + .unwrap_err() + .contains(">= 0") + ); + assert!( + parse_first_u32_param(Some(json!({"height": 0.5}))) + .unwrap_err() + .contains("integer") + ); + assert!( + parse_first_u32_param(Some(json!({"height": (u32::MAX as u64) + 1}))) + .unwrap_err() + .contains("<= 4294967295") + ); + } + + #[tokio::test] + async fn translate_response_wraps_result() { + let t = JsonRpcTranslator::new(); + let resp = GetStatusResponse { version: None }; + let out = t + .translate_response(resp, Some(json!(5))) + .await + .expect("serialize ok"); + assert_eq!(out.jsonrpc, "2.0"); + assert_eq!(out.id, Some(json!(5))); + assert!(out.error.is_none()); + assert!(out.result.is_some()); + } + + #[test] + fn error_response_codes_match() { + let t = JsonRpcTranslator::new(); + let r = t.error_response(DapiError::InvalidArgument("bad".into()), Some(json!(1))); + assert_eq!(r.error.as_ref().unwrap().code, -32602); + let r = t.error_response(DapiError::NotFound("nope".into()), None); + assert_eq!(r.error.as_ref().unwrap().code, -32602); + let r = t.error_response(DapiError::ServiceUnavailable("x".into()), None); + assert_eq!(r.error.as_ref().unwrap().code, -32003); + let r = t.error_response(DapiError::Internal("x".into()), None); + assert_eq!(r.error.as_ref().unwrap().code, -32603); + } + + #[tokio::test] + async fn translate_send_raw_transaction_basic() { + let t = JsonRpcTranslator::new(); + let req = JsonRpcRequest { + jsonrpc: "2.0".to_string(), + method: "sendRawTransaction".to_string(), + params: Some(json!(["deadbeef"])), + id: Some(json!(7)), + }; + let call = t.translate_request(req).await.expect("translate ok"); + match call { + JsonRpcCall::CoreBroadcastTransaction(r) => { + assert_eq!(r.transaction, hex::decode("deadbeef").unwrap()); + assert!(!r.allow_high_fees); + assert!(!r.bypass_limits); + } + _ => panic!("expected CoreBroadcastTransaction"), + } + } + + #[test] + fn parse_send_raw_tx_params_variants() { + use super::params::parse_send_raw_tx_params; + + let (tx, a, b) = parse_send_raw_tx_params(Some(json!("ff"))).unwrap(); + assert_eq!(tx, vec![0xff]); + assert!(!a && !b); + + let (tx, a, b) = parse_send_raw_tx_params(Some(json!(["ff", true, true]))).unwrap(); + assert_eq!(tx, vec![0xff]); + assert!(a && b); + + assert!(parse_send_raw_tx_params(Some(json!([]))).is_err()); + assert!(parse_send_raw_tx_params(Some(json!([123]))).is_err()); + } +} diff --git a/packages/rs-dapi/src/protocol/jsonrpc_translator/params.rs b/packages/rs-dapi/src/protocol/jsonrpc_translator/params.rs new file mode 100644 index 00000000000..b2353b43864 --- /dev/null +++ b/packages/rs-dapi/src/protocol/jsonrpc_translator/params.rs @@ -0,0 +1,73 @@ +use serde_json::Value; + +fn parse_bool_flag(value: Option<&Value>, name: &str) -> Result { + match value { + Some(Value::Bool(b)) => Ok(*b), + Some(Value::String(s)) if s == "true" => Ok(true), + Some(Value::String(s)) if s == "false" => Ok(false), + None | Some(Value::Null) => Ok(false), + _ => Err(format!("{name} must be boolean")), + } +} + +/// Extract the `height` field from JSON-RPC params, validating numeric bounds. +/// Accepts object-based params and returns friendly error strings for schema issues. +pub fn parse_first_u32_param(params: Option) -> Result { + let map = match params { + Some(Value::Object(map)) => map, + _ => return Err("params must be object".to_string()), + }; + + let value = map + .get("height") + .ok_or_else(|| "must have required property 'height'".to_string())?; + match value { + Value::Number(num) => { + if let Some(raw) = num.as_i64() { + if raw < 0 { + return Err("params/height must be >= 0".to_string()); + } + if raw > i64::from(u32::MAX) { + return Err("params/height must be <= 4294967295".to_string()); + } + Ok(raw as u32) + } else if let Some(raw) = num.as_u64() { + if raw > u32::MAX as u64 { + return Err("params/height must be <= 4294967295".to_string()); + } + Ok(raw as u32) + } else { + Err("params/height must be integer".to_string()) + } + } + _ => Err("params/height must be integer".to_string()), + } +} + +/// Parse raw transaction parameters, supporting string or array forms with fee flags. +/// Returns the decoded bytes plus `allow_high_fees` and `bypass_limits` toggles. +pub fn parse_send_raw_tx_params(params: Option) -> Result<(Vec, bool, bool), String> { + match params { + Some(Value::Array(a)) => { + if a.is_empty() { + return Err("missing raw transaction parameter".to_string()); + } + let raw_hex = a[0] + .as_str() + .ok_or_else(|| "raw transaction must be a hex string".to_string())?; + let tx = hex::decode(raw_hex) + .map_err(|_| "raw transaction must be valid hex".to_string())?; + + let allow_high_fees = parse_bool_flag(a.get(1), "allow_high_fees")?; + let bypass_limits = parse_bool_flag(a.get(2), "bypass_limits")?; + + Ok((tx, allow_high_fees, bypass_limits)) + } + Some(Value::String(s)) => { + let tx = + hex::decode(&s).map_err(|_| "raw transaction must be valid hex".to_string())?; + Ok((tx, false, false)) + } + _ => Err("params must be an array or hex string".to_string()), + } +} diff --git a/packages/rs-dapi/src/protocol/jsonrpc_translator/types.rs b/packages/rs-dapi/src/protocol/jsonrpc_translator/types.rs new file mode 100644 index 00000000000..2f3f32f89a7 --- /dev/null +++ b/packages/rs-dapi/src/protocol/jsonrpc_translator/types.rs @@ -0,0 +1,53 @@ +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +#[derive(Debug, Deserialize)] +pub struct JsonRpcRequest { + pub jsonrpc: String, + pub method: String, + pub params: Option, + pub id: Option, +} + +#[derive(Debug, Serialize)] +pub struct JsonRpcResponse { + pub jsonrpc: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub result: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + pub id: Option, +} + +#[derive(Debug, Serialize)] +pub struct JsonRpcError { + pub code: i32, + pub message: String, + pub data: Option, +} + +impl JsonRpcResponse { + /// Create a JSON-RPC 2.0 success envelope with the provided result payload. + pub fn ok(result: Value, id: Option) -> Self { + Self { + jsonrpc: "2.0".to_string(), + result: Some(result), + error: None, + id, + } + } + + /// Create a JSON-RPC 2.0 error envelope with code, message, optional data, and id. + pub fn error(code: i32, message: String, data: Option, id: Option) -> Self { + Self { + jsonrpc: "2.0".to_string(), + result: None, + error: Some(JsonRpcError { + code, + message, + data, + }), + id, + } + } +} diff --git a/packages/rs-dapi/src/protocol/mod.rs b/packages/rs-dapi/src/protocol/mod.rs new file mode 100644 index 00000000000..29d45efd2c3 --- /dev/null +++ b/packages/rs-dapi/src/protocol/mod.rs @@ -0,0 +1,2 @@ +pub mod jsonrpc_translator; +pub use jsonrpc_translator::*; diff --git a/packages/rs-dapi/src/server/grpc.rs b/packages/rs-dapi/src/server/grpc.rs new file mode 100644 index 00000000000..0bc5b5503e4 --- /dev/null +++ b/packages/rs-dapi/src/server/grpc.rs @@ -0,0 +1,275 @@ +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; +use tracing::{info, trace}; + +use crate::error::DAPIResult; +use crate::logging::AccessLogLayer; +use crate::metrics::MetricsLayer; +use axum::http::{HeaderMap, Request, Response}; +use dapi_grpc::core::v0::core_server::CoreServer; +use dapi_grpc::platform::v0::platform_server::PlatformServer; +use dapi_grpc::tonic::Status; +use tower::layer::util::{Identity, Stack}; +use tower::util::Either; +use tower::{Layer, Service}; + +use super::DapiServer; + +/// Timeouts for regular requests - sync with envoy config if changed there +const UNARY_TIMEOUT_SECS: u64 = 15; +/// Timeouts for streaming requests - sync with envoy config if changed there +const STREAMING_TIMEOUT_SECS: u64 = 600; +/// Safety margin to ensure we respond before client-side gRPC deadlines fire +const GRPC_REQUEST_TIME_SAFETY_MARGIN: Duration = Duration::from_millis(50); + +impl DapiServer { + /// Start the unified gRPC server that exposes both Platform and Core services. + /// Configures timeouts, message limits, optional access logging, and then awaits completion. + /// Returns when the server stops serving. + pub(super) async fn start_unified_grpc_server(&self) -> DAPIResult<()> { + let addr = self.config.grpc_server_addr()?; + info!( + "Starting unified gRPC server on {} (Core + Platform services)", + addr + ); + + let platform_service = self.platform_service.clone(); + let core_service = self.core_service.clone(); + + const MAX_DECODING_BYTES: usize = 64 * 1024 * 1024; // 64 MiB + const MAX_ENCODING_BYTES: usize = 32 * 1024 * 1024; // 32 MiB + + let builder = dapi_grpc::tonic::transport::Server::builder() + .tcp_keepalive(Some(Duration::from_secs(25))) + .timeout(Duration::from_secs( + STREAMING_TIMEOUT_SECS.max(UNARY_TIMEOUT_SECS) + 5, + )); // failsafe timeout - we handle timeouts in the timeout_layer + + // Create timeout layer with different timeouts for unary vs streaming + let timeout_layer = TimeoutLayer::new( + Duration::from_secs(UNARY_TIMEOUT_SECS), + Duration::from_secs(STREAMING_TIMEOUT_SECS), + ); + + let metrics_layer = MetricsLayer::new(); + let access_layer = if let Some(ref access_logger) = self.access_logger { + Either::Left(AccessLogLayer::new(access_logger.clone())) + } else { + Either::Right(Identity::new()) + }; + + // Stack layers (execution order: metrics -> access log -> timeout) + let combined_layer = Stack::new(Stack::new(timeout_layer, access_layer), metrics_layer); + let mut builder = builder.layer(combined_layer); + + builder + .add_service( + PlatformServer::new(platform_service) + .max_decoding_message_size(MAX_DECODING_BYTES) + .max_encoding_message_size(MAX_ENCODING_BYTES), + ) + .add_service( + CoreServer::new(core_service) + .max_decoding_message_size(MAX_DECODING_BYTES) + .max_encoding_message_size(MAX_ENCODING_BYTES), + ) + .serve(addr) + .await?; + + Ok(()) + } +} + +/// Middleware layer to apply different timeouts based on gRPC method type. +/// +/// Streaming methods (subscriptions) get longer timeouts to support long-lived connections, +/// while unary methods get shorter timeouts to prevent resource exhaustion. +#[derive(Clone)] +struct TimeoutLayer { + unary_timeout: Duration, + streaming_timeout: Duration, +} + +impl TimeoutLayer { + fn new(unary_timeout: Duration, streaming_timeout: Duration) -> Self { + Self { + unary_timeout, + streaming_timeout, + } + } + + /// Determine the appropriate timeout for a given gRPC method path. + fn timeout_for_method(&self, path: &str) -> Duration { + // All known streaming methods in Core service (all use "stream" return type) + const STREAMING_METHODS: &[&str] = &[ + "/org.dash.platform.dapi.v0.Core/subscribeToBlockHeadersWithChainLocks", + "/org.dash.platform.dapi.v0.Core/subscribeToTransactionsWithProofs", + "/org.dash.platform.dapi.v0.Core/subscribeToMasternodeList", + "/org.dash.platform.dapi.v0.Platform/waitForStateTransitionResult", + "/org.dash.platform.dapi.v0.Platform/subscribePlatformEvents", + ]; + + // Check if this is a known streaming method + if STREAMING_METHODS.contains(&path) { + tracing::trace!( + path, + "Detected streaming gRPC method, applying streaming timeout" + ); + self.streaming_timeout + } else { + self.unary_timeout + } + } +} + +impl Layer for TimeoutLayer { + type Service = TimeoutService; + + fn layer(&self, inner: S) -> Self::Service { + TimeoutService { + inner, + config: self.clone(), + } + } +} + +/// Service wrapper that applies per-method timeouts. +#[derive(Clone)] +struct TimeoutService { + inner: S, + config: TimeoutLayer, +} + +impl Service> for TimeoutService +where + S: Service, Response = Response> + Clone + Send + 'static, + S::Future: Send + 'static, + S::Error: Into> + Send + 'static, + ReqBody: Send + 'static, + ResBody: Default + Send + 'static, +{ + type Response = S::Response; + type Error = Box; + type Future = + Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: Request) -> Self::Future { + let path = req.uri().path().to_owned(); + let default_timeout = self.config.timeout_for_method(&path); + let timeout_from_header = parse_grpc_timeout_header(req.headers()); + let effective_timeout = timeout_from_header + .and_then(|d| d.checked_sub(GRPC_REQUEST_TIME_SAFETY_MARGIN)) + .unwrap_or(default_timeout) + .min(default_timeout); + + if timeout_from_header.is_some() { + trace!( + path, + header_timeout = timeout_from_header.unwrap_or_default().as_secs_f32(), + timeout = effective_timeout.as_secs_f32(), + "Applying gRPC timeout from header" + ); + } else { + tracing::trace!( + path, + timeout = effective_timeout.as_secs_f32(), + "Applying default gRPC timeout" + ); + } + let timeout_duration = effective_timeout; + let timeout_secs = timeout_duration.as_secs_f64(); + let fut = tower::timeout::Timeout::new(self.inner.clone(), timeout_duration).call(req); + + Box::pin(async move { + fut.await.map_err(|err| { + if err.is::() { + // timeout from TimeoutLayer + Status::deadline_exceeded(format!( + "request timed out after {:.3}s: {err}", + timeout_secs + )) + .into() + } else { + err + } + }) + }) + } +} + +/// Parse inbound grpc-timeout header into Duration (RFC 8681 style units) +fn parse_grpc_timeout_header(headers: &HeaderMap) -> Option { + let value = headers.get("grpc-timeout")?; + let as_str = value.to_str().ok()?; + if as_str.is_empty() { + return None; + } + let (num_part, unit_part) = as_str.split_at(as_str.len().saturating_sub(1)); + let amount: u64 = num_part.parse().ok()?; + match unit_part { + "H" => Some(Duration::from_secs(amount.saturating_mul(60 * 60))), + "M" => Some(Duration::from_secs(amount.saturating_mul(60))), + "S" => Some(Duration::from_secs(amount)), + "m" => Some(Duration::from_millis(amount)), + "u" => Some(Duration::from_micros(amount)), + "n" => Some(Duration::from_nanos(amount)), + _ => None, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::future::Future; + use std::task::{Context, Poll}; + + #[derive(Clone)] + struct SlowService; + + impl Service> for SlowService { + type Response = Response<()>; + type Error = Box; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: Request<()>) -> Self::Future { + Box::pin(async { + tokio::time::sleep(Duration::from_millis(50)).await; + Ok(Response::new(())) + }) + } + } + + #[tokio::test] + async fn timeout_service_returns_deadline_exceeded_status() { + let timeout_layer = TimeoutLayer::new(Duration::from_millis(5), Duration::from_secs(1)); + let mut service = timeout_layer.layer(SlowService); + + let request = Request::builder().uri("/test").body(()).unwrap(); + + let err = service + .call(request) + .await + .expect_err("expected timeout error"); + + let status = err + .downcast::() + .expect("expected tonic status error"); + + assert_eq!(status.code(), dapi_grpc::tonic::Code::DeadlineExceeded); + assert!( + status.message().contains("0.005"), + "status message should include timeout value, got '{}'", + status.message() + ); + } +} diff --git a/packages/rs-dapi/src/server/jsonrpc.rs b/packages/rs-dapi/src/server/jsonrpc.rs new file mode 100644 index 00000000000..2dd5b64bc3f --- /dev/null +++ b/packages/rs-dapi/src/server/jsonrpc.rs @@ -0,0 +1,212 @@ +use axum::{ + Router, extract::State, response::IntoResponse, response::Json, response::Response, + routing::post, +}; +use serde_json::Value; +use tokio::net::TcpListener; +use tower_http::cors::CorsLayer; +use tracing::info; + +use crate::error::DAPIResult; +use crate::logging::middleware::AccessLogLayer; +use crate::metrics::MetricsLayer; +use crate::protocol::{JsonRpcCall, JsonRpcRequest}; + +use dapi_grpc::core::v0::core_server::Core; +use dapi_grpc::platform::v0::platform_server::Platform; + +use super::DapiServer; +use super::state::JsonRpcAppState; + +impl DapiServer { + /// Start the JSON-RPC HTTP server, configuring state, CORS, and access logging. + /// Extracts shared services for request handling and binds the listener on the configured address. + /// Returns when the server stops serving. + pub(super) async fn start_jsonrpc_server(&self) -> DAPIResult<()> { + let addr = self.config.json_rpc_addr()?; + info!("Starting JSON-RPC server on {}", addr); + + let app_state = JsonRpcAppState { + platform_service: self.platform_service.clone(), + core_service: self.core_service.clone(), + translator: self.jsonrpc_translator.clone(), + }; + + let mut app = Router::new() + .route("/", post(handle_jsonrpc_request)) + .with_state(app_state); + + app = app.layer(MetricsLayer::new()); + + if let Some(ref access_logger) = self.access_logger { + app = app.layer(AccessLogLayer::new(access_logger.clone())); + } + + app = app.layer(CorsLayer::permissive()); + + let listener = TcpListener::bind(addr).await?; + axum::serve(listener, app).await?; + + Ok(()) + } +} + +/// Handle a JSON-RPC request by translating it and delegating to the appropriate gRPC service. +/// Maps service responses and errors back into JSON-RPC payloads while preserving request ids. +/// Returns JSON suitable for Axum's response wrapper. +async fn handle_jsonrpc_request( + State(state): State, + Json(json_rpc): Json, +) -> Response { + let id = json_rpc.id.clone(); + let requested_method = json_rpc.method.clone(); + + let call = match state.translator.translate_request(json_rpc).await { + Ok(req) => req, + Err(e) => { + let error_response = state.translator.error_response(e, id.clone()); + return respond_with_method( + crate::metrics::MethodLabel::from_owned(requested_method), + Json(serde_json::to_value(error_response).unwrap_or_default()), + ); + } + }; + + match call { + JsonRpcCall::PlatformGetStatus(grpc_request) => { + let method_label = crate::metrics::method_label(&grpc_request); + let mut tonic_request = dapi_grpc::tonic::Request::new(grpc_request); + crate::metrics::attach_method_label( + tonic_request.extensions_mut(), + method_label.clone(), + ); + + let grpc_response = match state.platform_service.get_status(tonic_request).await { + Ok(resp) => resp.into_inner(), + Err(e) => { + let error_response = state.translator.error_response(e, id.clone()); + return respond_with_method( + method_label, + Json(serde_json::to_value(error_response).unwrap_or_default()), + ); + } + }; + + match state + .translator + .translate_response(grpc_response, id.clone()) + .await + { + Ok(json_rpc_response) => respond_with_method( + method_label.clone(), + Json(serde_json::to_value(json_rpc_response).unwrap_or_default()), + ), + Err(e) => { + let error_response = state.translator.error_response(e, id.clone()); + respond_with_method( + method_label, + Json(serde_json::to_value(error_response).unwrap_or_default()), + ) + } + } + } + JsonRpcCall::CoreBroadcastTransaction(req_broadcast) => { + let method_label = crate::metrics::method_label(&req_broadcast); + let mut tonic_request = dapi_grpc::tonic::Request::new(req_broadcast); + crate::metrics::attach_method_label( + tonic_request.extensions_mut(), + method_label.clone(), + ); + + let result = state + .core_service + .broadcast_transaction(tonic_request) + .await; + match result { + Ok(resp) => { + let txid = resp.into_inner().transaction_id; + let ok = state + .translator + .ok_response(serde_json::json!(txid), id.clone()); + respond_with_method( + method_label, + Json(serde_json::to_value(ok).unwrap_or_default()), + ) + } + Err(e) => { + let error_response = state.translator.error_response(e, id.clone()); + respond_with_method( + method_label, + Json(serde_json::to_value(error_response).unwrap_or_default()), + ) + } + } + } + JsonRpcCall::CoreGetBestBlockHash => { + use dapi_grpc::core::v0::GetBlockchainStatusRequest; + let request = GetBlockchainStatusRequest {}; + let method_label = crate::metrics::method_label(&request); + let mut tonic_request = dapi_grpc::tonic::Request::new(request); + crate::metrics::attach_method_label( + tonic_request.extensions_mut(), + method_label.clone(), + ); + let resp = match state + .core_service + .get_blockchain_status(tonic_request) + .await + { + Ok(r) => r.into_inner(), + Err(e) => { + let error_response = state.translator.error_response(e, id.clone()); + return respond_with_method( + method_label, + Json(serde_json::to_value(error_response).unwrap_or_default()), + ); + } + }; + let best_block_hash_hex = resp + .chain + .map(|c| hex::encode(c.best_block_hash)) + .unwrap_or_default(); + let ok = state + .translator + .ok_response(serde_json::json!(best_block_hash_hex), id.clone()); + respond_with_method( + method_label, + Json(serde_json::to_value(ok).unwrap_or_default()), + ) + } + JsonRpcCall::CoreGetBlockHash { height } => { + let result = state.core_service.core_client.get_block_hash(height).await; + match result { + Ok(hash) => { + let ok = state + .translator + .ok_response(serde_json::json!(hash.to_string()), id.clone()); + respond_with_method( + crate::metrics::MethodLabel::from_owned( + "CoreClient::get_block_hash".to_string(), + ), + Json(serde_json::to_value(ok).unwrap_or_default()), + ) + } + Err(e) => { + let error_response = state.translator.error_response(e, id.clone()); + respond_with_method( + crate::metrics::MethodLabel::from_owned( + "CoreClient::get_block_hash".to_string(), + ), + Json(serde_json::to_value(error_response).unwrap_or_default()), + ) + } + } + } + } +} + +fn respond_with_method(method: crate::metrics::MethodLabel, body: Json) -> Response { + let mut response = body.into_response(); + crate::metrics::attach_method_label(response.extensions_mut(), method); + response +} diff --git a/packages/rs-dapi/src/server/metrics.rs b/packages/rs-dapi/src/server/metrics.rs new file mode 100644 index 00000000000..124163acad1 --- /dev/null +++ b/packages/rs-dapi/src/server/metrics.rs @@ -0,0 +1,306 @@ +use axum::{Router, extract::State, http::StatusCode, response::Json, routing::get}; +use serde::Serialize; +use tokio::net::TcpListener; +use tokio::time::{Duration, timeout}; +use tracing::{error, info}; + +use crate::error::{DAPIResult, DapiError}; +use crate::logging::middleware::AccessLogLayer; + +use super::{DapiServer, state::MetricsAppState}; + +impl DapiServer { + /// Launch the health and Prometheus metrics server if configured. + /// Binds Axum routes and wraps them with access logging when available. + /// Returns early when metrics are disabled. + pub(super) async fn start_metrics_server(&self) -> DAPIResult<()> { + let Some(addr) = self.config.metrics_addr()? else { + info!("Metrics server disabled; skipping startup"); + return Ok(()); + }; + + info!("Starting metrics server (health + Prometheus) on {}", addr); + + let app_state = MetricsAppState { + platform_service: self.platform_service.clone(), + core_service: self.core_service.clone(), + }; + + let mut app = Router::new() + .route("/health", get(handle_health)) + .route("/metrics", get(handle_metrics)) + .with_state(app_state); + + if let Some(ref access_logger) = self.access_logger { + app = app.layer(AccessLogLayer::new(access_logger.clone())); + } + + let listener = TcpListener::bind(addr).await?; + axum::serve(listener, app).await?; + + Ok(()) + } +} + +/// Run health checks against upstream dependencies and expose consolidated status. +async fn handle_health(State(state): State) -> impl axum::response::IntoResponse { + const HEALTH_CHECK_TIMEOUT: Duration = Duration::from_secs(3); + + let platform_service = state.platform_service.clone(); + let websocket_connected = platform_service.websocket_client.is_connected(); + let core_client = state.core_service.core_client.clone(); + + let platform_result = timeout(HEALTH_CHECK_TIMEOUT, async move { + platform_service + .build_status_response_with_health() + .await + .map(|(_, health)| health) + }); + + let core_result = timeout(HEALTH_CHECK_TIMEOUT, async move { + core_client.get_block_count().await + }); + + let (platform_result, core_result) = tokio::join!(platform_result, core_result); + + let (platform_ok, platform_payload) = match platform_result { + Ok(Ok(health)) => { + let is_healthy = health.is_healthy(); + let payload = PlatformChecks { + status: if is_healthy { + "ok".into() + } else { + "degraded".into() + }, + error: None, + drive: Some(health.drive_error.as_ref().into()), + tenderdash_websocket: Some(ComponentCheck::from(websocket_connected)), + tenderdash_status: Some(health.tenderdash_status_error.as_ref().into()), + tenderdash_net_info: Some(health.tenderdash_netinfo_error.as_ref().into()), + }; + (is_healthy, payload) + } + Ok(Err(err)) => { + error!(error = %err, "Platform health check failed"); + ( + false, + PlatformChecks { + status: "error".into(), + error: Some(health_error_label(&err.into()).to_string()), + drive: None, + tenderdash_websocket: Some(ComponentCheck::from(websocket_connected)), + tenderdash_status: None, + tenderdash_net_info: None, + }, + ) + } + Err(_) => ( + false, + PlatformChecks { + status: "error".into(), + error: Some("timeout".into()), + drive: None, + tenderdash_websocket: Some(ComponentCheck::from(websocket_connected)), + tenderdash_status: None, + tenderdash_net_info: None, + }, + ), + }; + + let (core_ok, core_payload) = match core_result { + Ok(Ok(height)) => ( + true, + CoreRpcCheck { + status: "ok".into(), + latest_block_height: Some(height), + error: None, + }, + ), + Ok(Err(err)) => { + error!(error = %err, "Core RPC health check failed"); + ( + false, + CoreRpcCheck { + status: "error".into(), + latest_block_height: None, + error: Some(health_error_label(&err).to_string()), + }, + ) + } + Err(_) => ( + false, + CoreRpcCheck { + status: "error".into(), + latest_block_height: None, + error: Some("timeout".into()), + }, + ), + }; + + let websocket_ok = websocket_connected; + let failures = u8::from(!platform_ok) + u8::from(!core_ok) + u8::from(!websocket_ok); + + let overall_status = if failures == 0 { + "ok" + } else if failures == 1 { + "degraded" + } else { + "error" + }; + + let http_status = if overall_status == "ok" { + StatusCode::OK + } else { + StatusCode::SERVICE_UNAVAILABLE + }; + + let body = HealthResponse { + status: overall_status.to_string(), + timestamp: chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::AutoSi, false), + version: env!("CARGO_PKG_VERSION"), + checks: Checks { + platform: platform_payload, + core_rpc: core_payload, + }, + }; + + (http_status, Json(body)) +} + +/// Expose Prometheus-formatted metrics gathered from the registry. +async fn handle_metrics() -> axum::response::Response { + let (body, content_type) = crate::metrics::gather_prometheus(); + axum::response::Response::builder() + .status(200) + .header(axum::http::header::CONTENT_TYPE, content_type) + .body(axum::body::Body::from(body)) + .unwrap_or_else(|_| axum::response::Response::new(axum::body::Body::from(""))) +} + +#[derive(Serialize)] +struct HealthResponse { + status: String, + timestamp: String, + version: &'static str, + checks: Checks, +} + +#[derive(Serialize)] +struct Checks { + platform: PlatformChecks, + #[serde(rename = "coreRpc")] + core_rpc: CoreRpcCheck, +} + +#[derive(Serialize)] +struct PlatformChecks { + status: String, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, + #[serde(skip_serializing_if = "Option::is_none")] + drive: Option, + #[serde( + rename = "tenderdashWebSocket", + skip_serializing_if = "Option::is_none" + )] + tenderdash_websocket: Option, + #[serde(rename = "tenderdashStatus", skip_serializing_if = "Option::is_none")] + tenderdash_status: Option, + #[serde(rename = "tenderdashNetInfo", skip_serializing_if = "Option::is_none")] + tenderdash_net_info: Option, +} + +#[derive(Serialize)] +struct CoreRpcCheck { + status: String, + #[serde(rename = "latestBlockHeight", skip_serializing_if = "Option::is_none")] + latest_block_height: Option, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, +} + +#[derive(Serialize)] +struct ComponentCheck { + status: String, + #[serde(skip_serializing_if = "Option::is_none")] + error: Option, +} + +/// Produce a redacted error label suitable for public health endpoints. +/// This keeps logs detailed while preventing information leakage over HTTP. +fn health_error_label(err: &DapiError) -> &'static str { + use DapiError::*; + + let label = match err { + Configuration(_) => "configuration error", + StreamingService(_) => "streaming service error", + Client(_) | ClientGone(_) => "client error", + ServerUnavailable(_, _) | Unavailable(_) | ServiceUnavailable(_) => "service unavailable", + Server(_) => "server error", + Serialization(_) | InvalidData(_) | NoValidTxProof(_) => "invalid data", + Transport(_) | Http(_) | WebSocket(_) | Request(_) => "transport error", + TenderdashClientError(_) => "tenderdash error", + Status(_) => "upstream returned error", + TaskJoin(_) => "internal task error", + Io(_) => "io error", + UrlParse(_) => "invalid url", + Base64Decode(_) => "invalid base64 data", + TransactionHashNotFound => "transaction hash missing", + NotFound(_) => "not found", + AlreadyExists(_) => "already exists", + InvalidRequest(_) => "invalid request", + InvalidArgument(_) => "invalid argument", + ResourceExhausted(_) => "resource exhausted", + Aborted(_) => "aborted", + Timeout(_) => "timeout", + Internal(_) => "internal error", + ConnectionClosed => "connection closed", + MethodNotFound(_) => "method not found", + ZmqConnection(_) => "zmq connection error", + FailedPrecondition(_) => "failed precondition", + // no default to ensure new errors are handled explicitly + }; + tracing::trace!(error = ?err, label, "Mapping DapiError to health error label"); + + label +} + +impl From> for ComponentCheck +where + T: Into, +{ + fn from(option: Option) -> Self { + match option { + Some(value) => value.into(), + None => Self { + status: "ok".into(), + error: None, + }, + } + } +} + +impl From<&DapiError> for ComponentCheck { + fn from(err: &DapiError) -> Self { + Self { + status: "error".into(), + error: Some(health_error_label(err).to_string()), + } + } +} + +impl From for ComponentCheck { + fn from(is_ok: bool) -> Self { + if is_ok { + Self { + status: "ok".into(), + error: None, + } + } else { + Self { + status: "error".into(), + error: Some("failed".into()), + } + } + } +} diff --git a/packages/rs-dapi/src/server/mod.rs b/packages/rs-dapi/src/server/mod.rs new file mode 100644 index 00000000000..0cb0023d787 --- /dev/null +++ b/packages/rs-dapi/src/server/mod.rs @@ -0,0 +1,110 @@ +mod grpc; +mod jsonrpc; +mod metrics; +mod state; + +use futures::FutureExt; +use std::sync::Arc; +use tracing::{error, info}; + +use crate::clients::{CoreClient, DriveClient, TenderdashClient}; +use crate::config::Config; +use crate::error::{DAPIResult, DapiError}; +use crate::logging::AccessLogger; +use crate::protocol::JsonRpcTranslator; +use crate::services::{CoreServiceImpl, PlatformServiceImpl, StreamingServiceImpl}; + +pub struct DapiServer { + config: Arc, + core_service: CoreServiceImpl, + platform_service: PlatformServiceImpl, + jsonrpc_translator: JsonRpcTranslator, + access_logger: Option, +} + +impl DapiServer { + /// Construct the DAPI server by wiring clients, services, and translators from config. + /// Establishes Drive, Tenderdash, and Core connections while building streaming support. + /// Returns an error with context when dependencies cannot be initialized. + pub async fn new(config: Arc, access_logger: Option) -> DAPIResult { + let drive_client = DriveClient::new(&config.dapi.drive.uri) + .await + .map_err(|e| DapiError::Client(format!("Failed to create Drive client: {}", e)))?; + + let tenderdash_client = Arc::new( + TenderdashClient::new( + &config.dapi.tenderdash.uri, + &config.dapi.tenderdash.websocket_uri, + ) + .await?, + ); + + let core_client = CoreClient::new( + config.dapi.core.rpc_url.clone(), + config.dapi.core.rpc_user.clone(), + config.dapi.core.rpc_pass.clone().into(), + config.dapi.core.cache_bytes, + ) + .map_err(|e| DapiError::Client(format!("Failed to create Core RPC client: {}", e)))?; + + let streaming_service = Arc::new(StreamingServiceImpl::new( + drive_client.clone(), + tenderdash_client.clone(), + core_client.clone(), + config.clone(), + )?); + + let platform_service = PlatformServiceImpl::new( + drive_client.clone(), + tenderdash_client.clone(), + config.clone(), + streaming_service.subscriber_manager.clone(), + ) + .await; + + let core_service = + CoreServiceImpl::new(streaming_service, config.clone(), core_client).await; + + let jsonrpc_translator = JsonRpcTranslator::new(); + + Ok(Self { + config, + platform_service, + core_service, + jsonrpc_translator, + access_logger, + }) + } + + /// Run all configured server endpoints and await until one terminates. + /// gRPC, JSON-RPC, and optional metrics servers are started concurrently. + /// The first server to exit determines the result returned to the caller. + pub async fn run(self) -> DAPIResult<()> { + info!("Starting DAPI server..."); + + let grpc_server = self.start_unified_grpc_server(); + let jsonrpc_server = self.start_jsonrpc_server(); + + let metrics_server = if self.config.metrics_enabled() { + self.start_metrics_server().boxed() + } else { + futures::future::pending().map(|_: ()| Ok(())).boxed() // Never completes + }; + + // when any of the servers stop, log and return its result + tokio::select! { + result = grpc_server => { + error!("gRPC server stopped: {:?}", result); + result + }, + result = jsonrpc_server => { + error!("JSON-RPC server stopped: {:?}", result); + result + }, + result = metrics_server => { + error!("Metrics server stopped: {:?}", result); + result + }, + } + } +} diff --git a/packages/rs-dapi/src/server/state.rs b/packages/rs-dapi/src/server/state.rs new file mode 100644 index 00000000000..6df036de614 --- /dev/null +++ b/packages/rs-dapi/src/server/state.rs @@ -0,0 +1,15 @@ +use crate::protocol::JsonRpcTranslator; +use crate::services::{CoreServiceImpl, PlatformServiceImpl}; + +#[derive(Clone)] +pub(super) struct JsonRpcAppState { + pub platform_service: PlatformServiceImpl, + pub core_service: CoreServiceImpl, + pub translator: JsonRpcTranslator, +} + +#[derive(Clone)] +pub(super) struct MetricsAppState { + pub platform_service: PlatformServiceImpl, + pub core_service: CoreServiceImpl, +} diff --git a/packages/rs-dapi/src/services/core_service.rs b/packages/rs-dapi/src/services/core_service.rs new file mode 100644 index 00000000000..eb9137fd57a --- /dev/null +++ b/packages/rs-dapi/src/services/core_service.rs @@ -0,0 +1,602 @@ +// Core service implementation + +use crate::DapiError; +use crate::clients::CoreClient; +use crate::config::Config; +use crate::services::streaming_service::StreamingServiceImpl; +use dapi_grpc::core::v0::{ + BlockHeadersWithChainLocksRequest, BlockHeadersWithChainLocksResponse, + BroadcastTransactionRequest, BroadcastTransactionResponse, GetBestBlockHeightRequest, + GetBestBlockHeightResponse, GetBlockRequest, GetBlockResponse, GetBlockchainStatusRequest, + GetBlockchainStatusResponse, GetEstimatedTransactionFeeRequest, + GetEstimatedTransactionFeeResponse, GetMasternodeStatusRequest, GetMasternodeStatusResponse, + GetTransactionRequest, GetTransactionResponse, MasternodeListRequest, MasternodeListResponse, + TransactionsWithProofsRequest, TransactionsWithProofsResponse, core_server::Core, +}; +use dapi_grpc::tonic::{Request, Response, Status}; +use dashcore_rpc::dashcore::consensus::encode::deserialize as deserialize_tx; +use dashcore_rpc::dashcore::hashes::Hash; +use std::any::type_name_of_val; +use std::sync::Arc; +use tokio_stream::wrappers::ReceiverStream; +use tracing::{debug, info, trace, warn}; + +/// Core service implementation that handles blockchain and streaming operations. +/// +/// Supports cheap Clone operation, no need to put it into Arc. +#[derive(Clone)] +pub struct CoreServiceImpl { + pub streaming_service: Arc, + pub config: Arc, + pub core_client: CoreClient, +} + +impl CoreServiceImpl { + /// Build the Core service by wiring the streaming service, config, and RPC client. + /// Used by server startup to prepare gRPC handlers. + pub async fn new( + streaming_service: Arc, + config: Arc, + core_client: CoreClient, + ) -> Self { + Self { + streaming_service, + config, + core_client, + } + } +} + +#[dapi_grpc::tonic::async_trait] +impl Core for CoreServiceImpl { + type subscribeToBlockHeadersWithChainLocksStream = + ReceiverStream>; + type subscribeToTransactionsWithProofsStream = + ReceiverStream>; + type subscribeToMasternodeListStream = ReceiverStream>; + + /// Fetch a block by height or hash, translating Core errors into gRPC statuses. + async fn get_block( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received get_block request"); + let method = type_name_of_val(request.get_ref()); + let req = request.into_inner(); + let request_target = match req.block.as_ref() { + Some(dapi_grpc::core::v0::get_block_request::Block::Height(height)) => { + format!("height:{height}") + } + Some(dapi_grpc::core::v0::get_block_request::Block::Hash(hash)) => { + format!("hash:{}", hash.trim()) + } + None => "unspecified".to_string(), + }; + + let this = self; + let result: Result, Status> = async move { + let block_bytes = match req.block { + Some(dapi_grpc::core::v0::get_block_request::Block::Height(height)) => { + let hash = this + .core_client + .get_block_hash(height) + .await + .map_err(|err| match err { + DapiError::NotFound(_) => { + DapiError::InvalidArgument("Invalid block height".to_string()) + .into_legacy_status() + } + DapiError::InvalidArgument(msg) => { + DapiError::InvalidArgument(msg).into_legacy_status() + } + other => other.to_status(), + })?; + this.core_client + .get_block_bytes_by_hash(hash) + .await + .map_err(|err| match err { + DapiError::NotFound(_) => { + DapiError::NotFound("Block not found".to_string()) + .into_legacy_status() + } + other => other.to_status(), + })? + } + Some(dapi_grpc::core::v0::get_block_request::Block::Hash(hash_hex)) => { + if hash_hex.trim().is_empty() { + return Err(Status::invalid_argument("hash or height is not specified")); + } + + this.core_client + .get_block_bytes_by_hash_hex(&hash_hex) + .await + .map_err(|err| match err { + DapiError::InvalidArgument(msg) => { + DapiError::InvalidArgument(msg).into_legacy_status() + } + DapiError::NotFound(_) => { + DapiError::NotFound("Block not found".to_string()) + .into_legacy_status() + } + other => other.to_status(), + })? + } + None => { + return Err(Status::invalid_argument("hash or height is not specified")); + } + }; + + Ok(Response::new(GetBlockResponse { block: block_bytes })) + } + .await; + + match &result { + Ok(_) => info!(method, %request_target, "request succeeded"), + Err(status) => warn!(method, %request_target, error = %status, "request failed"), + } + + result + } + + /// Retrieve transaction details including confirmations and lock states. + async fn get_transaction( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received get_transaction request"); + let method = type_name_of_val(request.get_ref()); + let txid = request.into_inner().id; + let log_txid = txid.trim().to_owned(); + + let result: Result, Status> = async move { + if txid.trim().is_empty() { + return Err(Status::invalid_argument("id is not specified")); + } + + let info = + self.core_client + .get_transaction_info(&txid) + .await + .map_err(|err| match err { + DapiError::NotFound(_) => { + DapiError::NotFound("Transaction not found".to_string()) + .into_legacy_status() + } + DapiError::InvalidArgument(msg) => { + DapiError::InvalidArgument(msg).into_legacy_status() + } + DapiError::Client(msg) => DapiError::Client(msg).into_legacy_status(), + other => other.to_status(), + })?; + + let transaction = info.hex.clone(); + let block_hash = info + .blockhash + .map(|h| hex::decode(h.to_string()).unwrap_or_default()) + .unwrap_or_default(); + let height = match info.height { + Some(h) if h >= 0 => h as u32, + _ => 0, + }; + let confirmations = info.confirmations.unwrap_or(0); + let is_instant_locked = info.instantlock_internal; + let is_chain_locked = info.chainlock; + + let response = GetTransactionResponse { + transaction, + block_hash, + height, + confirmations, + is_instant_locked, + is_chain_locked, + }; + Ok(Response::new(response)) + } + .await; + + match &result { + Ok(_) => info!(method, txid = log_txid.as_str(), "request succeeded"), + Err(status) => { + warn!(method, txid = log_txid.as_str(), error = %status, "request failed") + } + } + + result + } + + /// Return the best block height from Dash Core for legacy clients. + async fn get_best_block_height( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received get_best_block_height request"); + let method = type_name_of_val(request.get_ref()); + let _ = request; + let result: Result, Status> = async { + let height = self + .core_client + .get_block_count() + .await + .map_err(tonic::Status::from)?; + + Ok(Response::new(GetBestBlockHeightResponse { height })) + } + .await; + + match &result { + Ok(response) => info!( + method, + height = response.get_ref().height, + "request succeeded" + ), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + /// Validate and broadcast a transaction to Dash Core, returning its txid. + async fn broadcast_transaction( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received broadcast_transaction request"); + let method = type_name_of_val(request.get_ref()); + let req = request.into_inner(); + let _allow_high_fees = req.allow_high_fees; + let _bypass_limits = req.bypass_limits; + + let result: Result, Status> = async { + if req.transaction.is_empty() { + return Err(Status::invalid_argument("transaction is not specified")); + } + + if let Err(err) = + deserialize_tx::(&req.transaction) + { + return Err(Status::invalid_argument(format!( + "invalid transaction: {}", + err + ))); + } + + // NOTE: dashcore-rpc Client does not expose options for allowhighfees/bypasslimits. + // We broadcast as-is. Future: add support if library exposes those options. + let txid = self + .core_client + .send_raw_transaction(&req.transaction) + .await + .map_err(|err| match err { + DapiError::InvalidArgument(msg) => { + DapiError::InvalidArgument(format!("invalid transaction: {}", msg)) + .into_legacy_status() + } + DapiError::FailedPrecondition(msg) => { + DapiError::FailedPrecondition(format!("Transaction is rejected: {}", msg)) + .into_legacy_status() + } + DapiError::AlreadyExists(msg) => { + DapiError::AlreadyExists(format!("Transaction already in chain: {}", msg)) + .into_legacy_status() + } + DapiError::Client(msg) => DapiError::Client(msg).into_legacy_status(), + other => other.to_status(), + })?; + + Ok(Response::new(BroadcastTransactionResponse { + transaction_id: txid, + })) + } + .await; + + match &result { + Ok(resp) => info!( + method, + txid = resp.get_ref().transaction_id, + "request succeeded" + ), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + /// Fetch blockchain status metrics (similar to `getblockchaininfo`). + async fn get_blockchain_status( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received get_blockchain_status request"); + let method = type_name_of_val(request.get_ref()); + let _ = request; + let result: Result, Status> = async { + trace!("Fetching blockchain_info and network_info from Core"); + let (bc_info_res, net_info_res) = tokio::join!( + self.core_client.get_blockchain_info(), + self.core_client.get_network_info() + ); + + if let Err(ref err) = bc_info_res { + debug!(error = ?err, "Failed to retrieve blockchain info from Core RPC"); + } + if let Err(ref err) = net_info_res { + debug!(error = ?err, "Failed to retrieve network info from Core RPC"); + } + + let bc_info = bc_info_res.ok(); + let net_info = net_info_res.ok(); + + trace!(?bc_info, "Core blockchain info retrieved"); + trace!(?net_info, "Core network info retrieved"); + + use dapi_grpc::core::v0::get_blockchain_status_response as respmod; + + // Version + let version = net_info.as_ref().map(|info| respmod::Version { + protocol: info.protocol_version as u32, + software: info.version as u32, + agent: info.subversion.clone(), + }); + + // Time + let time = if let Some(bc) = &bc_info + && let Some(net) = &net_info + { + let now = chrono::Utc::now().timestamp() as u32; + let offset = net.time_offset as i32; + let median = bc.median_time as u32; + Some(respmod::Time { + now, + offset, + median, + }) + } else { + None + }; + + let (chain, status) = if let Some(info) = &bc_info { + // Status and sync progress + let sync_progress = info.verification_progress; + let status = if !info.warnings.is_empty() { + respmod::Status::Error as i32 + } else if sync_progress >= 0.9999 { + respmod::Status::Ready as i32 + } else { + respmod::Status::Syncing as i32 + }; + + // Chain + let best_block_hash_bytes = info.best_block_hash.to_byte_array().to_vec(); + let chain_work_bytes = info.chainwork.clone(); + let chain = respmod::Chain { + name: info.chain.clone(), + headers_count: info.headers as u32, + blocks_count: info.blocks as u32, + best_block_hash: best_block_hash_bytes, + difficulty: info.difficulty, + chain_work: chain_work_bytes, + is_synced: status == respmod::Status::Ready as i32, + sync_progress, + }; + (Some(chain), Some(status)) + } else { + (None, None) + }; + + // Network + let network = net_info.as_ref().map(|info| respmod::Network { + peers_count: info.connections as u32, + fee: Some(respmod::NetworkFee { + relay: info.relay_fee.to_dash(), + incremental: info.incremental_fee.to_dash(), + }), + }); + + let response = GetBlockchainStatusResponse { + version, + time, + status: status.unwrap_or(respmod::Status::Error as i32), + sync_progress: chain.as_ref().map(|c| c.sync_progress).unwrap_or(0.0), + chain, + network, + }; + + trace!( + status = status, + sync_progress = response.sync_progress, + "Prepared get_blockchain_status response" + ); + + Ok(Response::new(response)) + } + .await; + + match &result { + Ok(resp) => info!( + method, + status = resp.get_ref().status, + sync_progress = resp.get_ref().sync_progress, + "request succeeded" + ), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + /// Return the masternode status for the current node via Dash Core. + async fn get_masternode_status( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received get_masternode_status request"); + let method = type_name_of_val(request.get_ref()); + let _ = request; + use dapi_grpc::core::v0::get_masternode_status_response::Status as MnStatus; + use dashcore_rpc::json::MasternodeState as CoreStatus; + + let result: Result, Status> = async { + // Query core for masternode status and overall sync status + let (mn_status_res, mnsync_res) = tokio::join!( + self.core_client.get_masternode_status(), + self.core_client.mnsync_status() + ); + + let mn_status = mn_status_res.map_err(tonic::Status::from)?; + let mnsync = mnsync_res.map_err(tonic::Status::from)?; + + // Map masternode state to gRPC enum + let status_enum = match mn_status.state { + CoreStatus::MasternodeWaitingForProtx => MnStatus::WaitingForProtx as i32, + CoreStatus::MasternodePoseBanned => MnStatus::PoseBanned as i32, + CoreStatus::MasternodeRemoved => MnStatus::Removed as i32, + CoreStatus::MasternodeOperatorKeyChanged => MnStatus::OperatorKeyChanged as i32, + CoreStatus::MasternodeProtxIpChanged => MnStatus::ProtxIpChanged as i32, + CoreStatus::MasternodeReady => MnStatus::Ready as i32, + CoreStatus::MasternodeError => MnStatus::Error as i32, + CoreStatus::Nonrecognised | CoreStatus::Unknown => MnStatus::Unknown as i32, + }; + + // pro_tx_hash bytes + let pro_tx_hash_hex = mn_status.pro_tx_hash.to_string(); + let pro_tx_hash_bytes = hex::decode(&pro_tx_hash_hex).unwrap_or_default(); + + // Get PoSe penalty via masternode list filtered by protx hash + let pose_penalty = match self + .core_client + .get_masternode_pos_penalty(&pro_tx_hash_hex) + .await + { + Ok(Some(score)) => score, + _ => 0, + }; + + // Sync flags and progress computed from AssetID (JS parity) + let is_synced = mnsync.is_synced; + let sync_progress = match mnsync.asset_id { + 999 => 1.0, // FINISHED + 0 => 0.0, // INITIAL + 1 => 1.0 / 3.0, // BLOCKCHAIN + 4 => 2.0 / 3.0, // GOVERNANCE (legacy numeric value) + _ => 0.0, + }; + + let response = GetMasternodeStatusResponse { + status: status_enum, + pro_tx_hash: pro_tx_hash_bytes, + pose_penalty, + is_synced, + sync_progress, + }; + + Ok(Response::new(response)) + } + .await; + + match &result { + Ok(resp) => info!( + method, + status = resp.get_ref().status, + synced = resp.get_ref().is_synced, + "request succeeded" + ), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + /// Estimate smart fee rate for a confirmation target, preserving legacy units. + async fn get_estimated_transaction_fee( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received get_estimated_transaction_fee request"); + let method = type_name_of_val(request.get_ref()); + let blocks = request.into_inner().blocks.clamp(1, 1000) as u16; + + let result: Result, Status> = async { + let fee = self + .core_client + .estimate_smart_fee_btc_per_kb(blocks) + .await + .map_err(tonic::Status::from)? + .unwrap_or(0.0); + + Ok(Response::new(GetEstimatedTransactionFeeResponse { fee })) + } + .await; + + match &result { + Ok(resp) => info!( + method, + blocks, + fee = resp.get_ref().fee, + "request succeeded" + ), + Err(status) => warn!(method, blocks, error = %status, "request failed"), + } + + result + } + + /// Stream block headers with optional chain locks, selecting optimal delivery mode. + async fn subscribe_to_block_headers_with_chain_locks( + &self, + request: Request, + ) -> Result::subscribeToBlockHeadersWithChainLocksStream>, Status> { + trace!("Received subscribe_to_block_headers_with_chain_locks request"); + let method = type_name_of_val(request.get_ref()); + let result = self + .streaming_service + .subscribe_to_block_headers_with_chain_locks_impl(request) + .await; + + match &result { + Ok(_) => info!(method, "request succeeded"), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + /// Stream transactions accompanied by proofs via the streaming service. + async fn subscribe_to_transactions_with_proofs( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received subscribe_to_transactions_with_proofs request"); + let method = type_name_of_val(request.get_ref()); + let result = self + .streaming_service + .subscribe_to_transactions_with_proofs_impl(request) + .await; + + match &result { + Ok(_) => info!(method, "request succeeded"), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + /// Stream masternode list diffs using the masternode sync helper. + async fn subscribe_to_masternode_list( + &self, + request: Request, + ) -> Result, Status> { + trace!("Received subscribe_to_masternode_list request"); + let method = type_name_of_val(request.get_ref()); + let result = self + .streaming_service + .subscribe_to_masternode_list_impl(request) + .await; + + match &result { + Ok(_) => info!(method, "request succeeded"), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } +} diff --git a/packages/rs-dapi/src/services/mod.rs b/packages/rs-dapi/src/services/mod.rs new file mode 100644 index 00000000000..2761f9bdd17 --- /dev/null +++ b/packages/rs-dapi/src/services/mod.rs @@ -0,0 +1,7 @@ +pub mod core_service; +pub mod platform_service; +pub mod streaming_service; + +pub use core_service::CoreServiceImpl; +pub use platform_service::PlatformServiceImpl; +pub use streaming_service::StreamingServiceImpl; diff --git a/packages/rs-dapi/src/services/platform_service/broadcast_state_transition.rs b/packages/rs-dapi/src/services/platform_service/broadcast_state_transition.rs new file mode 100644 index 00000000000..a0bb9317fc7 --- /dev/null +++ b/packages/rs-dapi/src/services/platform_service/broadcast_state_transition.rs @@ -0,0 +1,259 @@ +/*! + * Complex implementation of broadcastStateTransition + * + * This module implements the full logic for broadcasting state transitions + * to the Tenderdash network, including validation, error handling, and + * duplicate detection, following the JavaScript DAPI implementation. + */ + +use crate::error::DapiError; +use crate::services::PlatformServiceImpl; +use crate::services::platform_service::TenderdashStatus; +use crate::services::platform_service::error_mapping::decode_consensus_error; +use base64::prelude::*; +use dapi_grpc::platform::v0::{BroadcastStateTransitionRequest, BroadcastStateTransitionResponse}; +use sha2::{Digest, Sha256}; +use tonic::Request; +use tracing::{Instrument, debug, trace}; + +impl PlatformServiceImpl { + /// Complex implementation of broadcastStateTransition + /// + /// This method: + /// 1. Validates the state transition request + /// 2. Converts the state transition to base64 for Tenderdash + /// 3. Broadcasts via Tenderdash RPC + /// 4. Handles complex error scenarios including duplicates + /// 5. Returns appropriate gRPC responses + /// + /// ## Returned Values + /// + /// code: non-zero on error + /// data: string error message or null + /// info: base64-encoded CBOR with error details or null + /// hash: base64-encoded hash of the state transition or null + pub async fn broadcast_state_transition_impl( + &self, + request: Request, + ) -> Result { + let tx = request.get_ref().state_transition.clone(); + + // Validate that state transition is provided + if tx.is_empty() { + debug!("State transition is empty"); + return Err(DapiError::InvalidArgument( + "State Transition is not specified".to_string(), + )); + } + + let txid = Sha256::digest(&tx).to_vec(); + let txid_hex = hex::encode(&txid); + + let span = tracing::trace_span!("broadcast_state_transition_impl", tx = %txid_hex); + + async move { + // Convert to base64 for Tenderdash RPC + let tx_base64 = BASE64_STANDARD.encode(&tx); + + // Attempt to broadcast the transaction; note that both Ok and Err can contain + // information about the broadcast result, so we need to handle both. + let error_result = match self.tenderdash_client.broadcast_tx(tx_base64.clone()).await { + Ok(broadcast_result) => { + if broadcast_result.code == 0 { + trace!( + st_hash = %txid_hex, + "broadcast_state_transition: state transition broadcasted successfully" + ); + // we are good, no need to return anything specific + return Ok(BroadcastStateTransitionResponse {}); + } else { + debug!( + code = broadcast_result.code, + info = ?broadcast_result.info, + data = ?broadcast_result.data, + tx = %txid_hex, + "broadcast_state_transition: State transition broadcast failed - service error" + ); + + // Prefer detailed error message if provided in `data`, otherwise fallback to `info`. + let error_message = if broadcast_result.data.is_empty() { + broadcast_result.info.clone() + } else { + broadcast_result.data.clone() + }; + + let info = if broadcast_result.info.is_empty() { + None + } else { + Some(broadcast_result.info.as_str()) + }; + + map_broadcast_error(broadcast_result.code, &error_message, info) + } + } + Err(DapiError::TenderdashClientError(e)) => DapiError::TenderdashClientError(e), + Err(error) => { + tracing::debug!( + error = %error, + tx = %txid_hex, + "broadcast_state_transition: Error broadcasting state transition to Tenderdash" + ); + return Err(error); + } + }; + + let response: Result = match error_result { + DapiError::AlreadyExists(_) => self.handle_duplicate_transaction(&tx, &txid).await, + e => Err(e), + }; + + response.inspect_err(|e| { + debug!( + error = %e, + st_hash = %txid_hex, + "broadcast_state_transition: failed to broadcast state transition to Tenderdash" + ); + }) + } + .instrument(span) + .await + } + + /// Handle duplicate transaction scenarios + async fn handle_duplicate_transaction( + &self, + st_bytes: &[u8], + txid: &[u8], + ) -> Result { + let txid_base64 = BASE64_STANDARD.encode(txid); + + debug!(tx = txid_base64, "Checking duplicate state transition",); + + // Check if the ST is in the mempool + match self.tenderdash_client.unconfirmed_tx(&txid_base64).await { + Ok(_) => { + return Err(DapiError::AlreadyExists( + "state transition already in mempool".to_string(), + )); + } + Err(DapiError::TenderdashClientError(status)) => { + let is_not_found = status + .message + .as_deref() + .map(|message| message.contains("not found")) + .unwrap_or(false); + + if !is_not_found { + return Err(DapiError::TenderdashClientError(status)); + } + } + Err(DapiError::NotFound(_)) => {} + Err(e) => return Err(e), + } + + // Check if the ST is already committed to the blockchain + match self.tenderdash_client.tx(txid_base64.clone()).await { + Ok(tx_response) => { + if !tx_response.tx_result.is_empty() || !tx_response.tx.is_empty() { + return Err(DapiError::AlreadyExists( + "state transition already in chain".to_string(), + )); + } + } + Err(DapiError::NotFound(e)) => { + tracing::trace!( + error = %e, + "State transition not found in chain, will re-validate with CheckTx" + ); + } + Err(e) => return Err(e), + } + + // If not in mempool and not in chain, re-validate with CheckTx + let st_base64 = BASE64_STANDARD.encode(st_bytes); + match self.tenderdash_client.check_tx(st_base64).await { + Ok(check_response) => { + if check_response.code != 0 { + let val = serde_json::to_value(check_response)?; + return Err(DapiError::from_tenderdash_error(val)); + } + + // CheckTx passes but ST was removed from block - this is a bug + debug!( + tx_bytes = hex::encode(st_bytes), + "State transition is passing CheckTx but removed from the block by proposer; potential bug, please report", + ); + + Err(DapiError::Internal("State Transition processing error. Please report faulty state transition and try to create a new state transition with different hash as a workaround.".to_string())) + } + Err(DapiError::Client(message)) => { + if message.contains("ECONNRESET") || message.contains("socket hang up") { + Err(DapiError::Unavailable( + "Tenderdash is not available".to_string(), + )) + } else { + Err(DapiError::Internal(format!( + "Failed checking state transition: {}", + message + ))) + } + } + Err(DapiError::TenderdashClientError(rpc_error)) => { + Err(DapiError::TenderdashClientError(rpc_error)) + } + Err(other) => Err(DapiError::Internal(format!( + "State transition check failed: {}", + other + ))), + } + } +} + +/// Convert Tenderdash broadcast error details into a structured `DapiError`. +fn map_broadcast_error(code: u32, error_message: &str, info: Option<&str>) -> DapiError { + // TODO: prefer code over message when possible + tracing::trace!( + "broadcast_state_transition: Classifying broadcast error {}: {}", + code, + error_message + ); + if error_message == "tx already exists in cache" { + return DapiError::AlreadyExists(error_message.to_string()); + } + + if error_message.starts_with("Tx too large.") { + let message = error_message.replace("Tx too large. ", ""); + return DapiError::InvalidArgument( + "state transition is too large. ".to_string() + &message, + ); + } + + if error_message.starts_with("mempool is full") { + return DapiError::ResourceExhausted(error_message.to_string()); + } + + if error_message.contains("context deadline exceeded") { + return DapiError::Timeout("broadcasting state transition is timed out".to_string()); + } + + if error_message.contains("too_many_requests") { + return DapiError::ResourceExhausted( + "tenderdash is not responding: too many requests".to_string(), + ); + } + + if error_message.starts_with("broadcast confirmation not received:") { + return DapiError::Timeout(error_message.to_string()); + } + let consensus_error = info.and_then(|x| decode_consensus_error(x.to_string())); + let message = if error_message.is_empty() { + None + } else { + Some(error_message.to_string()) + }; + DapiError::TenderdashClientError(TenderdashStatus::new( + i64::from(code), + message, + consensus_error, + )) +} diff --git a/packages/rs-dapi/src/services/platform_service/error_mapping.rs b/packages/rs-dapi/src/services/platform_service/error_mapping.rs new file mode 100644 index 00000000000..c2cdf252d9a --- /dev/null +++ b/packages/rs-dapi/src/services/platform_service/error_mapping.rs @@ -0,0 +1,370 @@ +use base64::{engine, prelude::Engine as _}; +use dapi_grpc::platform::v0::{ + StateTransitionBroadcastError, WaitForStateTransitionResultResponse, +}; +use dpp::{consensus::ConsensusError, serialization::PlatformDeserializable}; +use std::{fmt::Debug, str::FromStr}; +use tonic::{Code, metadata::MetadataValue}; + +#[derive(Clone, serde::Serialize)] +pub struct TenderdashStatus { + pub code: i64, + // human-readable error message; will be put into `data` field + pub message: Option, + // CBOR-encoded dpp ConsensusError + pub consensus_error: Option>, +} + +impl TenderdashStatus { + /// Construct a Tenderdash status wrapper, validating consensus error payloads upfront. + pub fn new(code: i64, message: Option, consensus_error: Option>) -> Self { + // sanity check: consensus_error must deserialize to ConsensusError if present + if let Some(ref bytes) = consensus_error + && ConsensusError::deserialize_from_bytes(bytes).is_err() + { + tracing::debug!( + data = hex::encode(bytes), + "TenderdashStatus consensus_error failed to deserialize to ConsensusError" + ); + } + Self { + code, + message, + consensus_error, + } + } + + /// Convert the Tenderdash status into a gRPC `tonic::Status` with enriched metadata. + pub fn to_status(&self) -> tonic::Status { + let status_code = self.grpc_code(); + let status_message = self.grpc_message(); + + let mut status: tonic::Status = tonic::Status::new(status_code, status_message); + + self.write_grpc_metadata(status.metadata_mut()); + + status + } + + /// Populate metadata fields expected by clients consuming Drive/Tenderdash errors. + fn write_grpc_metadata(&self, metadata: &mut tonic::metadata::MetadataMap) { + // drive-error-data-bin contains serialized DriveErrorDataBin structure + let mut serialized_drive_error_data = Vec::new(); + ciborium::ser::into_writer(&self, &mut serialized_drive_error_data) + .inspect_err(|e| { + tracing::debug!("Failed to serialize drive error data bin: {}", e); + }) + .ok(); + + metadata.insert_bin( + "drive-error-data-bin", + MetadataValue::from_bytes(&serialized_drive_error_data), + ); + + // expose the consensus error code directly for clients + metadata.insert( + "code", + MetadataValue::from_str(&self.code.to_string()) + .unwrap_or_else(|_| MetadataValue::from_static("0")), + ); + + if let Some(consensus_error) = &self.consensus_error { + // Add consensus error metadata + metadata.insert_bin( + "dash-serialized-consensus-error-bin", + MetadataValue::from_bytes(consensus_error), + ); + } + } + + /// Derive an end-user message, preferring explicit message over consensus error details. + fn grpc_message(&self) -> String { + if let Some(message) = &self.message { + return message.clone(); + } + + if let Some(consensus_error_bytes) = &self.consensus_error + && let Ok(consensus_error) = + ConsensusError::deserialize_from_bytes(consensus_error_bytes).inspect_err(|e| { + tracing::debug!("Failed to deserialize consensus error: {}", e); + }) + { + return consensus_error.to_string(); + } + + format!("Unknown error with code {}", self.code) + } + + /// map gRPC code from Tenderdash to tonic::Code. + /// + /// See packages/rs-dpp/src/errors/consensus/codes.rs for possible codes. + fn grpc_code(&self) -> Code { + let code = Code::from_i32(self.code as i32); + if code != Code::Unknown { + return code; + } + + match self.code { + 17..10000 => Code::Unknown, + 10000..20000 => Code::InvalidArgument, + 20000..30000 => Code::Unauthenticated, + 30000..40000 => Code::FailedPrecondition, + 40000..50000 => Code::InvalidArgument, + _ => Code::Internal, + } + } +} + +impl From for tonic::Response { + fn from(err: TenderdashStatus) -> Self { + use dapi_grpc::platform::v0::wait_for_state_transition_result_response::*; + let st_error = StateTransitionBroadcastError::from(err.clone()); + + let message = WaitForStateTransitionResultResponse { + version: Some(Version::V0(WaitForStateTransitionResultResponseV0 { + metadata: None, + result: Some(wait_for_state_transition_result_response_v0::Result::Error( + st_error, + )), + })), + }; + + let mut response = Self::new(message); + + err.write_grpc_metadata(response.metadata_mut()); + + response + } +} + +impl From for StateTransitionBroadcastError { + fn from(err: TenderdashStatus) -> Self { + StateTransitionBroadcastError { + code: err.code.clamp(0, u32::MAX as i64) as u32, + message: err.message.unwrap_or_else(|| "Unknown error".to_string()), + data: err.consensus_error.clone().unwrap_or_default(), + } + } +} + +impl Debug for TenderdashStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TenderdashStatus") + .field("code", &self.code) + .field("message", &self.message) + .field( + "consensus_error", + &self + .consensus_error + .as_ref() + .map(hex::encode) + .unwrap_or_else(|| "None".to_string()), + ) + .finish() + } +} + +/// Decode a potentially unpadded base64 string used by Tenderdash error payloads. +pub(crate) fn base64_decode(input: &str) -> Option> { + static BASE64: engine::GeneralPurpose = { + let b64_config = engine::GeneralPurposeConfig::new() + .with_decode_allow_trailing_bits(true) + .with_encode_padding(false) + .with_decode_padding_mode(engine::DecodePaddingMode::Indifferent); + + engine::GeneralPurpose::new(&base64::alphabet::STANDARD, b64_config) + }; + BASE64 + .decode(input) + .inspect_err(|e| { + tracing::debug!("Failed to decode base64: {}", e); + }) + .ok() +} + +/// Walk a nested CBOR map by following the provided key path. +fn walk_cbor_for_key<'a>(data: &'a ciborium::Value, keys: &[&str]) -> Option<&'a ciborium::Value> { + if keys.is_empty() { + tracing::trace!(?data, "found value, returning"); + return Some(data); + } + + let current_key = keys[0]; + let rest_keys = &keys[1..]; + + let map = data.as_map().or_else(|| { + tracing::trace!(?data, "Not a CBOR map, cannot walk for key: {:?}", keys); + None + })?; + + for (k, v) in map { + if let ciborium::Value::Text(key_str) = k + && key_str == current_key + { + let found = walk_cbor_for_key(v, rest_keys); + return found; + } + } + + tracing::trace!(?keys, "Key not found in CBOR map: {:?}", keys); + None +} + +/// Decode Tenderdash consensus error metadata from base64 CBOR into raw consensus bytes. +pub(super) fn decode_consensus_error(info_base64: String) -> Option> { + use ciborium::value::Value; + + tracing::trace!(?info_base64, "decode_consensus_error: received info"); + let decoded_bytes = base64_decode(&info_base64)?; + tracing::trace!(hex = %hex::encode(&decoded_bytes), len = decoded_bytes.len(), "decode_consensus_error: base64 decoded bytes"); + // CBOR-decode decoded_bytes + let raw_value: Value = ciborium::de::from_reader(decoded_bytes.as_slice()) + .inspect_err(|e| { + tracing::debug!("Failed to decode drive error info from CBOR: {}", e); + }) + .ok()?; + + tracing::trace!("Drive error info CBOR value: {:?}", raw_value); + + let serialized_error = walk_cbor_for_key(&raw_value, &["data", "serializedError"])? + .as_array()? + .iter() + .map(|v| { + v.as_integer().and_then(|n| { + u8::try_from(n) + .inspect_err(|e| { + tracing::debug!("Non-u8 value in serializedError array: {}", e); + }) + .ok() + }) + }) + .collect::>>() + .or_else(|| { + tracing::debug!("serializedError is not an array of integers"); + None + })?; + + // sanity check: serialized error must deserialize to ConsensusError + if ConsensusError::deserialize_from_bytes(&serialized_error).is_err() { + tracing::debug!( + data = hex::encode(&serialized_error), + "Drive error info 'serializedError' failed to deserialize to ConsensusError" + ); + return None; + } + + tracing::trace!( + serialized_error_hex = %hex::encode(&serialized_error), + len = serialized_error.len(), + "decode_consensus_error: extracted consensus error bytes", + ); + + Some(serialized_error) +} + +impl From for TenderdashStatus { + // Convert from a JSON error object returned by Tenderdash RPC, typically in the `error` field of a JSON-RPC response. + fn from(value: serde_json::Value) -> Self { + if let Some(object) = value.as_object() { + let code = object + .get("code") + .and_then(|c| c.as_i64()) + .unwrap_or_else(|| { + tracing::debug!("Tenderdash error missing 'code' field, defaulting to 0"); + 0 + }); + let message = object + .get("message") + .and_then(|m| m.as_str()) + .map(|s| s.to_string()); + + // info contains additional error details, possibly including consensus error + let consensus_error = object + .get("info") + .and_then(|v| v.as_str().map(|s| s.to_string())) + .and_then(decode_consensus_error); + + Self { + code, + message, + consensus_error, + } + } else { + tracing::debug!("Tenderdash error is not an object: {:?}", value); + Self { + code: u32::MAX as i64, + message: Some("Invalid error object from Tenderdash".to_string()), + consensus_error: None, + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use dpp::{serialization::PlatformSerializableWithPlatformVersion, version::PlatformVersion}; + use serde::Deserialize; + + fn setup_tracing() { + let _ = tracing_subscriber::fmt() + .with_max_level(tracing::Level::TRACE) + .with_test_writer() + .try_init(); + } + + #[derive(Deserialize)] + struct DriveErrorDataBinMetadata { + code: i64, + } + + #[test] + fn to_status_sets_expected_metadata() { + setup_tracing(); + + let consensus_error = ConsensusError::DefaultError; + let original_consensus_error_bytes = consensus_error + .serialize_to_bytes_with_platform_version(PlatformVersion::latest()) + .expect("should serialize"); + + let status = TenderdashStatus::new( + 42, + Some("metadata test".to_string()), + Some(original_consensus_error_bytes.clone()), + ) + .to_status(); + + let metadata = status.metadata(); + + let drive_error_bytes = metadata + .get_bin("drive-error-data-bin") + .inspect(|v| { + tracing::debug!(?v, "drive-error-data-bin metadata"); + }) + .expect("missing drive-error-data-bin metadata") + .to_bytes() + .expect("drive-error-data-bin should be valid bytes"); + let drive_error: DriveErrorDataBinMetadata = + ciborium::de::from_reader(drive_error_bytes.as_ref()) + .expect("drive-error-data-bin should deserialize"); + assert_eq!(drive_error.code, 42); + + let consensus_error_bytes = metadata + .get_bin("dash-serialized-consensus-error-bin") + .expect("missing consensus error metadata") + .to_bytes() + .expect("consensus error metadata should be valid bytes"); + assert_eq!( + consensus_error_bytes.as_ref(), + original_consensus_error_bytes.as_slice() + ); + } + #[test_case::test_case( + "oWRkYXRhoW9zZXJpYWxpemVkRXJyb3KYIgMAGCwYHRgeGIoYwhh+GHwYvRhmGJ0UGNUYuhjlARjgGN0YmBhkERinGB0YPRh5GDIMGBkWGLcYfhMYzg=="; "info_fixture_1" + )] + fn test_info_fixture(info_base64: &str) { + setup_tracing(); + let decoded = decode_consensus_error(info_base64.to_string()).unwrap(); + ConsensusError::deserialize_from_bytes(&decoded).expect("should deserialize"); + } +} diff --git a/packages/rs-dapi/src/services/platform_service/get_status.rs b/packages/rs-dapi/src/services/platform_service/get_status.rs new file mode 100644 index 00000000000..e572fc10fed --- /dev/null +++ b/packages/rs-dapi/src/services/platform_service/get_status.rs @@ -0,0 +1,543 @@ +use dapi_grpc::platform::v0::{ + GetStatusRequest, GetStatusResponse, + get_status_response::get_status_response_v0, + get_status_response::{self, GetStatusResponseV0}, +}; +use dapi_grpc::tonic::{Request, Response, Status}; +use tracing::{debug, trace}; + +use crate::clients::{ + drive_client::DriveStatusResponse, + tenderdash_client::{NetInfoResponse, TenderdashStatusResponse}, +}; +use crate::error::DapiError; + +// The struct is defined in the parent platform_service.rs module +use crate::services::platform_service::PlatformServiceImpl; + +/// Captures upstream health information when building the Platform status response. +#[derive(Debug, Default)] +pub struct PlatformStatusHealth { + pub drive_error: Option, + pub tenderdash_status_error: Option, + pub tenderdash_netinfo_error: Option, +} + +impl PlatformStatusHealth { + #[inline] + pub fn is_drive_healthy(&self) -> bool { + self.drive_error.is_none() + } + + #[inline] + pub fn is_tenderdash_healthy(&self) -> bool { + self.tenderdash_status_error.is_none() + } + + #[inline] + pub fn is_netinfo_healthy(&self) -> bool { + self.tenderdash_netinfo_error.is_none() + } + + #[inline] + pub fn is_healthy(&self) -> bool { + self.is_drive_healthy() && self.is_tenderdash_healthy() && self.is_netinfo_healthy() + } +} + +impl PlatformServiceImpl { + /// Handle the Platform `getStatus` request with caching and cache warming logic. + pub async fn get_status_impl( + &self, + request: Request, + ) -> Result, Status> { + use crate::cache::make_cache_key; + use std::time::Duration; + + // Cache status response, just to avoid hammering Drive and Tenderdash + let key = make_cache_key("get_status", request.get_ref()); + trace!(?key, "get_status cache lookup"); + if let Some(mut cached) = self + .platform_cache + .get_with_ttl::(&key, Duration::from_secs(10)) + { + trace!(?key, "get_status cache hit"); + // Refresh local time to current instant like JS implementation + if let Some(get_status_response::Version::V0(ref mut v0)) = cached.version + && let Some(ref mut time) = v0.time + { + time.local = chrono::Utc::now().timestamp().max(0) as u64; + } + return Ok(Response::new(cached)); + } + + trace!(?key, "get_status cache miss; building response"); + // Build fresh response and cache it + match self.build_status_response_with_health().await { + Ok((response, health)) => { + trace!( + drive_error = ?health.drive_error, + tenderdash_status_error = ?health.tenderdash_status_error, + tenderdash_netinfo_error = ?health.tenderdash_netinfo_error, + "get_status upstream fetch completed" + ); + self.platform_cache.put(key, &response); + Ok(Response::new(response)) + } + Err(status) => Err(status), + } + } + + /// Gather Drive and Tenderdash status information and compose the unified response. + pub(crate) async fn build_status_response_with_health( + &self, + ) -> Result<(GetStatusResponse, PlatformStatusHealth), Status> { + let mut health = PlatformStatusHealth::default(); + + // Prepare request for Drive + let drive_request = GetStatusRequest { + version: Some(dapi_grpc::platform::v0::get_status_request::Version::V0( + dapi_grpc::platform::v0::get_status_request::GetStatusRequestV0 {}, + )), + }; + + // Fetch data from Drive and Tenderdash concurrently + trace!("fetching Drive status, Tenderdash status, and netinfo"); + let (drive_result, tenderdash_status_result, tenderdash_netinfo_result) = tokio::join!( + self.drive_client.get_drive_status(&drive_request), + self.tenderdash_client.status(), + self.tenderdash_client.net_info() + ); + + // Handle potential errors with proper logging + let drive_status = match drive_result { + Ok(status) => status, + Err(e) => { + debug!(error = ?e, "Failed to fetch Drive status - technical failure, using defaults"); + health.drive_error = Some(e.into()); + DriveStatusResponse::default() + } + }; + + let tenderdash_status = match tenderdash_status_result { + Ok(status) => status, + Err(e) => { + debug!(error = ?e, "Failed to fetch Tenderdash status - technical failure, using defaults"); + health.tenderdash_status_error = Some(e); + TenderdashStatusResponse::default() + } + }; + + let tenderdash_netinfo = match tenderdash_netinfo_result { + Ok(netinfo) => netinfo, + Err(e) => { + debug!(error = ?e, "Failed to fetch Tenderdash netinfo - technical failure, using defaults"); + health.tenderdash_netinfo_error = Some(e); + NetInfoResponse::default() + } + }; + + // Use standalone functions to create the response + let response = build_status_response(drive_status, tenderdash_status, tenderdash_netinfo)?; + Ok((response, health)) + } +} + +// Status building functions + +/// Assemble the full gRPC response from Drive and Tenderdash status snapshots. +fn build_status_response( + drive_status: DriveStatusResponse, + tenderdash_status: TenderdashStatusResponse, + tenderdash_netinfo: NetInfoResponse, +) -> Result { + let v0 = GetStatusResponseV0 { + version: Some(build_version_info(&drive_status, &tenderdash_status)), + node: build_node_info(&tenderdash_status), + chain: build_chain_info(&drive_status, &tenderdash_status), + state_sync: build_state_sync_info(&tenderdash_status), + network: build_network_info(&tenderdash_status, &tenderdash_netinfo), + time: Some(build_time_info(&drive_status)), + }; + + let response: GetStatusResponse = GetStatusResponse { + version: Some(get_status_response::Version::V0(v0)), + }; + + Ok(response) +} + +/// Populate version metadata including protocol and software versions. +fn build_version_info( + drive_status: &DriveStatusResponse, + tenderdash_status: &TenderdashStatusResponse, +) -> get_status_response_v0::Version { + let mut version = get_status_response_v0::Version::default(); + + // Protocol version + let mut protocol = get_status_response_v0::version::Protocol::default(); + + // Tenderdash protocol version + let node_info = &tenderdash_status.node_info; + let protocol_version = &node_info.protocol_version; + + if !protocol_version.block.is_empty() || !protocol_version.p2p.is_empty() { + let mut tenderdash_protocol = + get_status_response_v0::version::protocol::Tenderdash::default(); + + if !protocol_version.block.is_empty() { + tenderdash_protocol.block = protocol_version.block.parse().unwrap_or(0); + } + + if !protocol_version.p2p.is_empty() { + tenderdash_protocol.p2p = protocol_version.p2p.parse().unwrap_or(0); + } + + protocol.tenderdash = Some(tenderdash_protocol); + } + + // Drive protocol version + if let Some(version_info) = &drive_status.version + && let Some(protocol_info) = &version_info.protocol + && let Some(drive_protocol) = &protocol_info.drive + { + let drive_protocol_version = get_status_response_v0::version::protocol::Drive { + current: drive_protocol.current.unwrap_or(0).min(u32::MAX as u64) as u32, + latest: drive_protocol.latest.unwrap_or(0).min(u32::MAX as u64) as u32, + }; + + protocol.drive = Some(drive_protocol_version); + } + + version.protocol = Some(protocol); + + // Software version + let drive_version = drive_status + .version + .as_ref() + .and_then(|v| v.software.as_ref()) + .and_then(|s| s.drive.as_ref()) + .cloned(); + + let tenderdash_version = if tenderdash_status.node_info.version.is_empty() { + None + } else { + Some(tenderdash_status.node_info.version.clone()) + }; + + let software = get_status_response_v0::version::Software { + dapi: env!("CARGO_PKG_VERSION").to_string(), + drive: drive_version, + tenderdash: tenderdash_version, + }; + + version.software = Some(software); + version +} + +/// Build node identification data from Tenderdash status, decoding hex identifiers. +fn build_node_info( + tenderdash_status: &TenderdashStatusResponse, +) -> Option { + let node_info = &tenderdash_status.node_info; + + if node_info.id.is_empty() && node_info.pro_tx_hash.is_empty() { + None + } else { + let mut node = get_status_response_v0::Node::default(); + + if let Ok(id_bytes) = hex::decode(&node_info.id) { + node.id = id_bytes; + } + + if !node_info.pro_tx_hash.is_empty() + && let Ok(pro_tx_hash_bytes) = hex::decode(&node_info.pro_tx_hash) + { + node.pro_tx_hash = Some(pro_tx_hash_bytes); + } + + Some(node) + } +} + +/// Construct chain synchronization information combining Drive and Tenderdash fields. +fn build_chain_info( + drive_status: &DriveStatusResponse, + tenderdash_status: &TenderdashStatusResponse, +) -> Option { + let sync_info = &tenderdash_status.sync_info; + + let has_sync_data = sync_info.latest_block_height != 0 + || !sync_info.latest_block_hash.is_empty() + || !sync_info.latest_app_hash.is_empty(); + + if !has_sync_data { + None + } else { + let catching_up = sync_info.catching_up; + + let latest_block_hash = if sync_info.latest_block_hash.is_empty() { + Vec::new() + } else { + hex::decode(&sync_info.latest_block_hash).unwrap_or_default() + }; + + let latest_app_hash = if sync_info.latest_app_hash.is_empty() { + Vec::new() + } else { + hex::decode(&sync_info.latest_app_hash).unwrap_or_default() + }; + + let latest_block_height = sync_info.latest_block_height.max(0) as u64; + + let earliest_block_hash = if sync_info.earliest_block_hash.is_empty() { + Vec::new() + } else { + hex::decode(&sync_info.earliest_block_hash).unwrap_or_default() + }; + + let earliest_app_hash = if sync_info.earliest_app_hash.is_empty() { + Vec::new() + } else { + hex::decode(&sync_info.earliest_app_hash).unwrap_or_default() + }; + + let earliest_block_height = sync_info.earliest_block_height.max(0) as u64; + let max_peer_block_height = sync_info.max_peer_block_height.max(0) as u64; + + let core_chain_locked_height = drive_status + .chain + .as_ref() + .and_then(|c| c.core_chain_locked_height) + .map(|h| { + h.try_into() + .inspect_err(|error| { + tracing::warn!( + core_chain_locked_height = h, + ?error, + "Failed to convert core_chain_locked_height" + ) + }) + .unwrap_or(u32::MIN) + }); + + let chain = get_status_response_v0::Chain { + catching_up, + latest_block_hash, + latest_app_hash, + latest_block_height, + earliest_block_hash, + earliest_app_hash, + earliest_block_height, + max_peer_block_height, + core_chain_locked_height, + }; + + Some(chain) + } +} + +/// Produce state sync metrics derived from Tenderdash status response. +fn build_state_sync_info( + tenderdash_status: &TenderdashStatusResponse, +) -> Option { + let sync_info = &tenderdash_status.sync_info; + + let has_state_sync_data = !sync_info.total_synced_time.is_empty() + || !sync_info.remaining_time.is_empty() + || !sync_info.total_snapshots.is_empty() + || !sync_info.snapshot_height.is_empty(); + + if !has_state_sync_data { + None + } else { + let parse_or_default = |value: &str| -> u64 { + if value.is_empty() { + 0 + } else { + value.parse::().map(|v| v.max(0) as u64).unwrap_or(0) + } + }; + + let state_sync = get_status_response_v0::StateSync { + total_synced_time: parse_or_default(&sync_info.total_synced_time), + remaining_time: parse_or_default(&sync_info.remaining_time), + total_snapshots: parse_or_default(&sync_info.total_snapshots).min(u32::MAX as u64) + as u32, + chunk_process_avg_time: parse_or_default(&sync_info.chunk_process_avg_time), + snapshot_height: parse_or_default(&sync_info.snapshot_height), + snapshot_chunks_count: parse_or_default(&sync_info.snapshot_chunks_count), + backfilled_blocks: parse_or_default(&sync_info.backfilled_blocks), + backfill_blocks_total: parse_or_default(&sync_info.backfill_blocks_total), + }; + + Some(state_sync) + } +} + +/// Build network-related stats such as peers and listening state. +fn build_network_info( + tenderdash_status: &TenderdashStatusResponse, + tenderdash_netinfo: &NetInfoResponse, +) -> Option { + let has_network_data = tenderdash_netinfo.listening + || tenderdash_netinfo.n_peers > 0 + || !tenderdash_status.node_info.network.is_empty(); + + if !has_network_data { + None + } else { + let network = get_status_response_v0::Network { + listening: tenderdash_netinfo.listening, + peers_count: tenderdash_netinfo.n_peers, + chain_id: tenderdash_status.node_info.network.clone(), + }; + + Some(network) + } +} + +/// Compose the time section using Drive status timestamps. +fn build_time_info(drive_status: &DriveStatusResponse) -> get_status_response_v0::Time { + let mut time = get_status_response_v0::Time::default(); + + if let Some(drive_time) = &drive_status.time { + time.block = drive_time.block; + time.genesis = drive_time.genesis; + time.epoch = drive_time.epoch.map(|e| e.min(u32::MAX as u64) as u32); + } + + time.local = chrono::Utc::now().timestamp().max(0) as u64; + + time +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::clients::drive_client::DriveStatusResponse; + use crate::clients::tenderdash_client::{NetInfoResponse, TenderdashStatusResponse}; + + #[test] + fn build_status_response_populates_fields_from_tenderdash_status() { + let tenderdash_status: TenderdashStatusResponse = + serde_json::from_str(TENDERMASH_STATUS_JSON).expect("parse tenderdash status"); + let drive_status = DriveStatusResponse::default(); + let net_info = NetInfoResponse::default(); + + let response = + build_status_response(drive_status, tenderdash_status, net_info).expect("build ok"); + + let get_status_response::Version::V0(inner) = response.version.expect("version present"); + + let version = inner.version.expect("version struct"); + let software = version.software.expect("software present"); + assert_eq!(software.tenderdash.as_deref(), Some("1.5.0-dev.3")); + + let protocol = version.protocol.expect("protocol present"); + let tenderdash_protocol = protocol.tenderdash.expect("tenderdash protocol"); + assert_eq!(tenderdash_protocol.block, 14); + assert_eq!(tenderdash_protocol.p2p, 10); + + let node = inner.node.expect("node present"); + assert_eq!( + node.id, + hex::decode("972a33056d57359de8acfa4fb8b29dc1c14f76b8").expect("decode node id") + ); + + let chain = inner.chain.expect("chain present"); + assert_eq!(chain.latest_block_height, 198748); + assert_eq!( + chain.latest_block_hash, + hex::decode("B15CB7BD25D5334587B591D46FADEDA3AFCE2C57B7BC99E512F79422AB710343") + .expect("decode latest block hash") + ); + assert_eq!( + chain.earliest_block_hash, + hex::decode("08FA02C27EC0390BA301E4FC7E3D7EADB350C8193E3E62A093689706E3A20BFA") + .expect("decode earliest block hash") + ); + + let network = inner.network.expect("network present"); + assert_eq!(network.chain_id, "dash-testnet-51"); + + let state_sync = inner.state_sync.expect("state sync present"); + assert_eq!(state_sync.total_synced_time, 0); + + let time = inner.time.expect("time present"); + assert!(time.local > 0); + } + + const TENDERMASH_STATUS_JSON: &str = r#" + { + "node_info": { + "protocol_version": { + "p2p": "10", + "block": "14", + "app": "9" + }, + "id": "972a33056d57359de8acfa4fb8b29dc1c14f76b8", + "listen_addr": "44.239.39.153:36656", + "ProTxHash": "5C6542766615387183715D958A925552472F93335FA1612880423E4BBDAEF436", + "network": "dash-testnet-51", + "version": "1.5.0-dev.3", + "channels": [ + 64, + 32, + 33, + 34, + 35, + 48, + 56, + 96, + 97, + 98, + 99, + 0 + ], + "moniker": "hp-masternode-16", + "other": { + "tx_index": "on", + "rpc_address": "tcp://0.0.0.0:36657" + } + }, + "application_info": { + "version": "10" + }, + "sync_info": { + "latest_block_hash": "B15CB7BD25D5334587B591D46FADEDA3AFCE2C57B7BC99E512F79422AB710343", + "latest_app_hash": "FB90D667EB6CAE5DD5293EED7ECCE8B8B492EC0FF310BB0CB0C49C7DC1FFF9CD", + "latest_block_height": "198748", + "latest_block_time": "2025-10-14T13:10:48.765Z", + "earliest_block_hash": "08FA02C27EC0390BA301E4FC7E3D7EADB350C8193E3E62A093689706E3A20BFA", + "earliest_app_hash": "BF0CCB9CA071BA01AE6E67A0C090F97803D26D56D675DCD5131781CBCAC8EC8F", + "earliest_block_height": "1", + "earliest_block_time": "2024-07-19T01:40:09Z", + "max_peer_block_height": "198748", + "catching_up": false, + "total_synced_time": "0", + "remaining_time": "0", + "total_snapshots": "0", + "chunk_process_avg_time": "0", + "snapshot_height": "0", + "snapshot_chunks_count": "0", + "backfilled_blocks": "0", + "backfill_blocks_total": "0" + }, + "validator_info": { + "pro_tx_hash": "5C6542766615387183715D958A925552472F93335FA1612880423E4BBDAEF436", + "voting_power": 100 + }, + "light_client_info": { + "primaryID": "", + "witnessesID": null, + "number_of_peers": "0", + "last_trusted_height": "0", + "last_trusted_hash": "", + "latest_block_time": "0001-01-01T00:00:00Z", + "trusting_period": "", + "trusted_block_expired": false + } + } + "#; +} diff --git a/packages/rs-dapi/src/services/platform_service/mod.rs b/packages/rs-dapi/src/services/platform_service/mod.rs new file mode 100644 index 00000000000..07d9111a8e2 --- /dev/null +++ b/packages/rs-dapi/src/services/platform_service/mod.rs @@ -0,0 +1,522 @@ +// Platform service modular implementation +// This file contains the core PlatformServiceImpl struct and delegates to individual modules + +mod broadcast_state_transition; +mod error_mapping; +mod get_status; +mod wait_for_state_transition_result; + +use dapi_grpc::platform::v0::platform_server::Platform; +use dapi_grpc::platform::v0::{ + BroadcastStateTransitionRequest, BroadcastStateTransitionResponse, GetStatusRequest, + GetStatusResponse, WaitForStateTransitionResultRequest, WaitForStateTransitionResultResponse, +}; +use dapi_grpc::tonic::{Request, Response, Status}; +use futures::FutureExt; +use std::any::type_name_of_val; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::time::Duration; +use tokio::time::sleep; +use tracing::{info, trace, warn}; + +pub use error_mapping::TenderdashStatus; + +/// Macro to generate Platform trait method implementations that delegate to DriveClient +/// +/// Usage: `drive_method!(method_name, RequestType, ResponseType);` +/// +/// This generates a non-async method that returns impl Future, which: +/// 1. Gets the gRPC client from drive_client +/// 2. Calls the corresponding method on the client +/// 3. Returns the response directly (since gRPC client already returns Response) +macro_rules! drive_method { + ($method_name:ident, $request_type:ty, $response_type:ty) => { + fn $method_name<'life0, 'async_trait>( + &'life0 self, + request: Request<$request_type>, + ) -> Pin< + Box< + dyn Future, Status>> + Send + 'async_trait, + >, + > + where + 'life0: 'async_trait, + Self: 'async_trait, + { + use crate::cache::make_cache_key; + let mut client = self.drive_client.get_client(); + let cache = self.platform_cache.clone(); + let method = type_name_of_val(request.get_ref()); + async move { + let result_with_meta: Result<(Response<$response_type>, bool), Status> = async { + // Build cache key from method + request bytes + let key = make_cache_key(method, request.get_ref()); + + // Try cache + if let Some(decoded) = cache.get(&key) { + return Ok((Response::new(decoded), true)); + } + + // Fetch from Drive + trace!(method, "Calling Drive method"); + let drive_call = client.$method_name(request); + let resp = drive_call.await?; + // Store in cache using inner message + trace!(method, "Caching response"); + cache.put(key, resp.get_ref()); + trace!(method, "Response cached"); + + Ok((resp, false)) + } + .await; + + match &result_with_meta { + Ok((_, cache_hit)) => info!(method, cache_hit = *cache_hit, "request succeeded"), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result_with_meta.map(|(resp, _)| resp) + } + .boxed() + } + }; +} +use crate::DapiError; +use crate::clients::tenderdash_client::TenderdashClient; +use crate::clients::tenderdash_websocket::TenderdashWebSocketClient; +use crate::config::Config; +use crate::services::streaming_service::FilterType; +use crate::sync::Workers; + +/// Platform service implementation with modular method delegation +#[derive(Clone)] +pub struct PlatformServiceImpl { + pub drive_client: crate::clients::drive_client::DriveClient, + pub tenderdash_client: Arc, + pub websocket_client: Arc, + pub config: Arc, + pub platform_cache: crate::cache::LruResponseCache, + pub subscriber_manager: Arc, + #[allow(dead_code)] + // workers - dropping will cancel all spawned tasks + workers: Workers, +} + +impl PlatformServiceImpl { + /// Assemble the Platform service, wiring clients, caches, subscriptions, and workers. + /// Spawns background tasks for WebSocket streaming and platform event ingestion. + pub async fn new( + drive_client: crate::clients::drive_client::DriveClient, + tenderdash_client: Arc, + config: Arc, + subscriber_manager: Arc, + ) -> Self { + let workers = Workers::new(); + // Reuse Tenderdash client's WebSocket stream so that subscribers and forwarders share the same source. + let websocket_client = tenderdash_client.websocket_client(); + + let ws: Arc = websocket_client.clone(); + // Start listening for WebSocket events with automatic retries. + workers.spawn(async move { + loop { + match ws.connect_and_listen().await { + Ok(_) => { + info!("Tenderdash WebSocket listener exited; reconnecting in 10 seconds"); + } + Err(e) => { + tracing::error!( + error = %e, + retry_in_secs = 10, + "Tenderdash WebSocket listener error" + ); + } + } + + sleep(Duration::from_secs(10)).await; + } + #[allow(unreachable_code)] + Ok::<(), DapiError>(()) + }); + + // Cache dropped on each new block + let invalidation_subscription = subscriber_manager + .add_subscription(FilterType::PlatformAllBlocks) + .await; + + let platform_cache_bytes = config.dapi.platform_cache_bytes; + + Self { + drive_client, + tenderdash_client, + websocket_client, + config, + platform_cache: crate::cache::LruResponseCache::new( + "platform_service", + platform_cache_bytes, + invalidation_subscription, + ), + subscriber_manager, + workers, + } + } +} + +#[async_trait::async_trait] +impl Platform for PlatformServiceImpl { + // Manually implemented methods + + /// Get the status of the whole system + /// + /// This method retrieves the current status of Drive, Tenderdash, and other components. + /// + /// See [`PlatformServiceImpl::get_status_impl`] for the implementation details. + async fn get_status( + &self, + request: Request, + ) -> Result, Status> { + let method = type_name_of_val(request.get_ref()); + trace!(method, "Received get_status request"); + let result = self.get_status_impl(request).await; + + match &result { + Ok(_) => info!(method, "request succeeded"), + Err(status) => warn!(method, error = %status, "request failed"), + } + + result + } + + // State transition methods + /// Broadcast a state transition to the Dash Platform + /// + /// This method handles the complete broadcast flow including: + /// - State transition validation + /// - Broadcasting to Tenderdash + /// - Complex error handling and duplicate detection + /// + /// See [`PlatformServiceImpl::broadcast_state_transition_impl`] for implementation details. + async fn broadcast_state_transition( + &self, + request: Request, + ) -> Result, Status> { + let method = type_name_of_val(request.get_ref()); + trace!(method, "Received broadcast_state_transition request"); + let result = self.broadcast_state_transition_impl(request).await; + + match result { + Ok(response) => { + info!(method, "request succeeded"); + Ok(response.into()) + } + + Err(e) => { + let status = e.to_status(); + let metadata = status.metadata(); + warn!(method, error = %status, source = %e, ?metadata, "broadcast state transition request failed"); + Err(status) + } + } + } + + /// Implementation of waitForStateTransitionResult + /// + /// This method waits for a state transition to be processed and returns the result. + /// See [`PlatformServiceImpl::wait_for_state_transition_result_impl`] for implementation details. + async fn wait_for_state_transition_result( + &self, + request: Request, + ) -> Result, Status> { + let method = type_name_of_val(request.get_ref()); + trace!(method, "Received wait_for_state_transition_result request"); + match self.wait_for_state_transition_result_impl(request).await { + Ok(response) => { + info!(method, "request succeeded"); + Ok(response) + } + Err(error) => { + warn!(method, error = %error, "wait for state transition result request failed"); + let response = + wait_for_state_transition_result::build_wait_for_state_transition_error_response( + &error, + ); + + Ok(response) + } + } + } + + // Identity-related methods + drive_method!( + get_identity, + dapi_grpc::platform::v0::GetIdentityRequest, + dapi_grpc::platform::v0::GetIdentityResponse + ); + drive_method!( + get_identity_keys, + dapi_grpc::platform::v0::GetIdentityKeysRequest, + dapi_grpc::platform::v0::GetIdentityKeysResponse + ); + drive_method!( + get_identities_contract_keys, + dapi_grpc::platform::v0::GetIdentitiesContractKeysRequest, + dapi_grpc::platform::v0::GetIdentitiesContractKeysResponse + ); + drive_method!( + get_identity_nonce, + dapi_grpc::platform::v0::GetIdentityNonceRequest, + dapi_grpc::platform::v0::GetIdentityNonceResponse + ); + + drive_method!( + get_identity_contract_nonce, + dapi_grpc::platform::v0::GetIdentityContractNonceRequest, + dapi_grpc::platform::v0::GetIdentityContractNonceResponse + ); + + drive_method!( + get_identity_balance, + dapi_grpc::platform::v0::GetIdentityBalanceRequest, + dapi_grpc::platform::v0::GetIdentityBalanceResponse + ); + + drive_method!( + get_identities_balances, + dapi_grpc::platform::v0::GetIdentitiesBalancesRequest, + dapi_grpc::platform::v0::GetIdentitiesBalancesResponse + ); + + drive_method!( + get_identity_balance_and_revision, + dapi_grpc::platform::v0::GetIdentityBalanceAndRevisionRequest, + dapi_grpc::platform::v0::GetIdentityBalanceAndRevisionResponse + ); + + drive_method!( + get_identity_by_public_key_hash, + dapi_grpc::platform::v0::GetIdentityByPublicKeyHashRequest, + dapi_grpc::platform::v0::GetIdentityByPublicKeyHashResponse + ); + + drive_method!( + get_identity_by_non_unique_public_key_hash, + dapi_grpc::platform::v0::GetIdentityByNonUniquePublicKeyHashRequest, + dapi_grpc::platform::v0::GetIdentityByNonUniquePublicKeyHashResponse + ); + + // Evonodes methods + drive_method!( + get_evonodes_proposed_epoch_blocks_by_ids, + dapi_grpc::platform::v0::GetEvonodesProposedEpochBlocksByIdsRequest, + dapi_grpc::platform::v0::GetEvonodesProposedEpochBlocksResponse + ); + + drive_method!( + get_evonodes_proposed_epoch_blocks_by_range, + dapi_grpc::platform::v0::GetEvonodesProposedEpochBlocksByRangeRequest, + dapi_grpc::platform::v0::GetEvonodesProposedEpochBlocksResponse + ); + + // Data contract methods + drive_method!( + get_data_contract, + dapi_grpc::platform::v0::GetDataContractRequest, + dapi_grpc::platform::v0::GetDataContractResponse + ); + + drive_method!( + get_data_contract_history, + dapi_grpc::platform::v0::GetDataContractHistoryRequest, + dapi_grpc::platform::v0::GetDataContractHistoryResponse + ); + + drive_method!( + get_data_contracts, + dapi_grpc::platform::v0::GetDataContractsRequest, + dapi_grpc::platform::v0::GetDataContractsResponse + ); + + // Document methods + drive_method!( + get_documents, + dapi_grpc::platform::v0::GetDocumentsRequest, + dapi_grpc::platform::v0::GetDocumentsResponse + ); + + // System methods + drive_method!( + get_consensus_params, + dapi_grpc::platform::v0::GetConsensusParamsRequest, + dapi_grpc::platform::v0::GetConsensusParamsResponse + ); + + drive_method!( + get_protocol_version_upgrade_state, + dapi_grpc::platform::v0::GetProtocolVersionUpgradeStateRequest, + dapi_grpc::platform::v0::GetProtocolVersionUpgradeStateResponse + ); + + drive_method!( + get_protocol_version_upgrade_vote_status, + dapi_grpc::platform::v0::GetProtocolVersionUpgradeVoteStatusRequest, + dapi_grpc::platform::v0::GetProtocolVersionUpgradeVoteStatusResponse + ); + + drive_method!( + get_epochs_info, + dapi_grpc::platform::v0::GetEpochsInfoRequest, + dapi_grpc::platform::v0::GetEpochsInfoResponse + ); + + drive_method!( + get_finalized_epoch_infos, + dapi_grpc::platform::v0::GetFinalizedEpochInfosRequest, + dapi_grpc::platform::v0::GetFinalizedEpochInfosResponse + ); + + drive_method!( + get_path_elements, + dapi_grpc::platform::v0::GetPathElementsRequest, + dapi_grpc::platform::v0::GetPathElementsResponse + ); + + drive_method!( + get_total_credits_in_platform, + dapi_grpc::platform::v0::GetTotalCreditsInPlatformRequest, + dapi_grpc::platform::v0::GetTotalCreditsInPlatformResponse + ); + + // Quorum methods + drive_method!( + get_current_quorums_info, + dapi_grpc::platform::v0::GetCurrentQuorumsInfoRequest, + dapi_grpc::platform::v0::GetCurrentQuorumsInfoResponse + ); + + // Contested resource methods + drive_method!( + get_contested_resources, + dapi_grpc::platform::v0::GetContestedResourcesRequest, + dapi_grpc::platform::v0::GetContestedResourcesResponse + ); + + drive_method!( + get_prefunded_specialized_balance, + dapi_grpc::platform::v0::GetPrefundedSpecializedBalanceRequest, + dapi_grpc::platform::v0::GetPrefundedSpecializedBalanceResponse + ); + + drive_method!( + get_contested_resource_vote_state, + dapi_grpc::platform::v0::GetContestedResourceVoteStateRequest, + dapi_grpc::platform::v0::GetContestedResourceVoteStateResponse + ); + + drive_method!( + get_contested_resource_voters_for_identity, + dapi_grpc::platform::v0::GetContestedResourceVotersForIdentityRequest, + dapi_grpc::platform::v0::GetContestedResourceVotersForIdentityResponse + ); + + drive_method!( + get_contested_resource_identity_votes, + dapi_grpc::platform::v0::GetContestedResourceIdentityVotesRequest, + dapi_grpc::platform::v0::GetContestedResourceIdentityVotesResponse + ); + + drive_method!( + get_vote_polls_by_end_date, + dapi_grpc::platform::v0::GetVotePollsByEndDateRequest, + dapi_grpc::platform::v0::GetVotePollsByEndDateResponse + ); + + // Token balance methods + drive_method!( + get_identity_token_balances, + dapi_grpc::platform::v0::GetIdentityTokenBalancesRequest, + dapi_grpc::platform::v0::GetIdentityTokenBalancesResponse + ); + + drive_method!( + get_identities_token_balances, + dapi_grpc::platform::v0::GetIdentitiesTokenBalancesRequest, + dapi_grpc::platform::v0::GetIdentitiesTokenBalancesResponse + ); + + // Token info methods + drive_method!( + get_identity_token_infos, + dapi_grpc::platform::v0::GetIdentityTokenInfosRequest, + dapi_grpc::platform::v0::GetIdentityTokenInfosResponse + ); + + drive_method!( + get_identities_token_infos, + dapi_grpc::platform::v0::GetIdentitiesTokenInfosRequest, + dapi_grpc::platform::v0::GetIdentitiesTokenInfosResponse + ); + + // Token status and pricing methods + drive_method!( + get_token_statuses, + dapi_grpc::platform::v0::GetTokenStatusesRequest, + dapi_grpc::platform::v0::GetTokenStatusesResponse + ); + + drive_method!( + get_token_direct_purchase_prices, + dapi_grpc::platform::v0::GetTokenDirectPurchasePricesRequest, + dapi_grpc::platform::v0::GetTokenDirectPurchasePricesResponse + ); + + drive_method!( + get_token_contract_info, + dapi_grpc::platform::v0::GetTokenContractInfoRequest, + dapi_grpc::platform::v0::GetTokenContractInfoResponse + ); + + // Token distribution methods + drive_method!( + get_token_pre_programmed_distributions, + dapi_grpc::platform::v0::GetTokenPreProgrammedDistributionsRequest, + dapi_grpc::platform::v0::GetTokenPreProgrammedDistributionsResponse + ); + + drive_method!( + get_token_perpetual_distribution_last_claim, + dapi_grpc::platform::v0::GetTokenPerpetualDistributionLastClaimRequest, + dapi_grpc::platform::v0::GetTokenPerpetualDistributionLastClaimResponse + ); + + drive_method!( + get_token_total_supply, + dapi_grpc::platform::v0::GetTokenTotalSupplyRequest, + dapi_grpc::platform::v0::GetTokenTotalSupplyResponse + ); + + // Group methods + drive_method!( + get_group_info, + dapi_grpc::platform::v0::GetGroupInfoRequest, + dapi_grpc::platform::v0::GetGroupInfoResponse + ); + + drive_method!( + get_group_infos, + dapi_grpc::platform::v0::GetGroupInfosRequest, + dapi_grpc::platform::v0::GetGroupInfosResponse + ); + + drive_method!( + get_group_actions, + dapi_grpc::platform::v0::GetGroupActionsRequest, + dapi_grpc::platform::v0::GetGroupActionsResponse + ); + + drive_method!( + get_group_action_signers, + dapi_grpc::platform::v0::GetGroupActionSignersRequest, + dapi_grpc::platform::v0::GetGroupActionSignersResponse + ); +} diff --git a/packages/rs-dapi/src/services/platform_service/wait_for_state_transition_result.rs b/packages/rs-dapi/src/services/platform_service/wait_for_state_transition_result.rs new file mode 100644 index 00000000000..3b1ff1437d3 --- /dev/null +++ b/packages/rs-dapi/src/services/platform_service/wait_for_state_transition_result.rs @@ -0,0 +1,312 @@ +use crate::error::DapiError; +use crate::services::platform_service::error_mapping::decode_consensus_error; +use crate::services::platform_service::{PlatformServiceImpl, TenderdashStatus}; +use crate::services::streaming_service::FilterType; +use base64::Engine; +use dapi_grpc::platform::v0::wait_for_state_transition_result_response::wait_for_state_transition_result_response_v0; +use dapi_grpc::platform::v0::{ + Proof, ResponseMetadata, WaitForStateTransitionResultRequest, + WaitForStateTransitionResultResponse, wait_for_state_transition_result_request, + wait_for_state_transition_result_response, +}; +use dapi_grpc::tonic::{Request, Response}; +use std::time::Duration; +use tokio::time::timeout; +use tracing::{Instrument, debug, trace}; + +impl PlatformServiceImpl { + /// Wait for a state transition result by subscribing to platform events and returning proofs when requested. + pub async fn wait_for_state_transition_result_impl( + &self, + request: Request, + ) -> Result, DapiError> { + let inner = request.into_inner(); + let v0 = match inner.version { + Some(wait_for_state_transition_result_request::Version::V0(v0)) => v0, + None => { + return Err(DapiError::InvalidArgument( + "wait_for_state_transition_result request must have v0".to_string(), + )); + } + }; + + // Validate state transition hash + let state_transition_hash = v0.state_transition_hash; + if state_transition_hash.is_empty() { + return Err(DapiError::InvalidArgument( + "state transition hash is not specified".to_string(), + )); + } + + // Convert hash to commonly used representations + let hash_hex = hex::encode(&state_transition_hash).to_uppercase(); + let hash_base64 = base64::prelude::BASE64_STANDARD.encode(&state_transition_hash); + + let span = tracing::trace_span!("wait_for_state_transition_result", tx = %hash_hex); + + async move { + trace!("waitForStateTransitionResult called for hash: {}", hash_hex); + + // Check if WebSocket is connected + if !self.websocket_client.is_connected() { + return Err(DapiError::Unavailable( + "Tenderdash is not available".to_string(), + )); + } + + // RACE-FREE IMPLEMENTATION: Subscribe via subscription manager BEFORE checking existing state + trace!( + "Subscribing (manager) to platform tx for hash: {}", + hash_hex + ); + let sub_handle = self + .subscriber_manager + .add_subscription(FilterType::PlatformTxId(hash_hex.clone())) + .await; + + // Check if transaction already exists (after subscription is active) + trace!("Checking existing transaction for hash: {}", hash_hex); + match self.tenderdash_client.tx(hash_base64).await { + Ok(tx) => { + debug!(tx = hash_hex, "Transaction already exists, returning it"); + return self.build_response_from_existing_tx(tx, v0.prove).await; + } + Err(error) => { + debug!(?error, "Transaction not found, will wait for future events"); + } + }; + + // Wait for transaction event with timeout + let timeout_duration = + Duration::from_millis(self.config.dapi.state_transition_wait_timeout); + + trace!( + "Waiting for transaction event with timeout: {:?}", + timeout_duration + ); + + // Filter events to find our specific transaction + timeout(timeout_duration, async { + loop { + let result = sub_handle.recv().await; + match result { + Some(crate::services::streaming_service::StreamingEvent::PlatformTx { event }) => { + debug!(tx = hash_hex, "Received matching transaction event"); + return self.build_response_from_event(event, v0.prove).await; + } + Some(message) => { + // Ignore other message types + trace!( + ?message, + "Received non-matching message, ignoring; this should not happen due to filtering" + ); + continue; + } + None => { + debug!("Platform tx subscription channel closed unexpectedly"); + return Err(DapiError::Unavailable( + "Platform tx subscription channel closed unexpectedly".to_string(), + )); + } + } + } + }) + .await + .map_err(|msg| DapiError::Timeout(msg.to_string())) + .inspect_err(|e| { + tracing::debug!( + error = %e, + tx = %hash_hex, + "wait_for_state_transition_result: timed out" + ); + })? + } + .instrument(span) + .await + } + + /// Build a response for a transaction already known to Tenderdash, optionally generating proofs. + async fn build_response_from_existing_tx( + &self, + tx_response: crate::clients::tenderdash_client::TxResponse, + prove: bool, + ) -> Result, DapiError> { + let mut response_v0 = + wait_for_state_transition_result_response::WaitForStateTransitionResultResponseV0 { + result: None, + metadata: None, + }; + + // Check if transaction had an error + let tx_result = &tx_response.tx_result; + + if tx_result.code != 0 { + // Transaction had an error + let consensus_error_serialized = if tx_result.info.is_empty() { + None + } else { + decode_consensus_error(tx_result.info.clone()) + }; + + let error = TenderdashStatus::new( + i64::from(tx_result.code), + if tx_result.data.is_empty() { + None + } else { + Some(tx_result.data.clone()) + }, + consensus_error_serialized, + ); + return Ok(error.into()); + } + + // No error; generate proof if requested + if prove && !tx_response.tx.is_empty() { + if let Ok(tx_data) = + base64::prelude::Engine::decode(&base64::prelude::BASE64_STANDARD, &tx_response.tx) + { + match self.fetch_proof_for_state_transition(tx_data).await { + Ok((proof, metadata)) => { + response_v0.result = Some( + wait_for_state_transition_result_response_v0::Result::Proof(proof), + ); + response_v0.metadata = Some(metadata); + } + Err(e) => { + debug!("Failed to fetch proof: {}", e); + // Continue without proof + } + } + } + } + + let body = WaitForStateTransitionResultResponse { + version: Some(wait_for_state_transition_result_response::Version::V0( + response_v0, + )), + }; + + Ok(body.into()) + } + + /// Build a response from a streamed transaction event, handling success and error cases. + async fn build_response_from_event( + &self, + transaction_event: crate::clients::TransactionEvent, + prove: bool, + ) -> Result, DapiError> { + // Check transaction result + match transaction_event.result { + crate::clients::TransactionResult::Success => { + let mut response_v0 = + wait_for_state_transition_result_response::WaitForStateTransitionResultResponseV0 { + result: None, + metadata: None, + }; + // Success case - generate proof if requested + if prove && let Some(tx_bytes) = transaction_event.tx { + match self.fetch_proof_for_state_transition(tx_bytes).await { + Ok((proof, metadata)) => { + response_v0.result = Some( + wait_for_state_transition_result_response::wait_for_state_transition_result_response_v0::Result::Proof(proof), + ); + response_v0.metadata = Some(metadata); + } + Err(e) => { + debug!("Failed to fetch proof: {}", e); + // Continue without proof + } + } + } + + let body = WaitForStateTransitionResultResponse { + version: Some(wait_for_state_transition_result_response::Version::V0( + response_v0, + )), + }; + + Ok(body.into()) + } + crate::clients::TransactionResult::Error { code, info, data } => { + // Error case - create error response + tracing::debug!( + code, + info = ?info, + data = ?data, + "Transaction event indicates error" + ); + let consensus_error = if info.is_empty() { + None + } else { + decode_consensus_error(info.clone()) + }; + let error = TenderdashStatus::new(code as i64, data, consensus_error); + let result: Response = error.into(); + + Ok(result) + } + } + } + + /// Fetch Drive proofs for the provided state transition bytes, returning proof and metadata. + async fn fetch_proof_for_state_transition( + &self, + tx_bytes: Vec, + ) -> crate::DAPIResult<(Proof, ResponseMetadata)> { + // Create a GetProofsRequest with the state transition + let request = dapi_grpc::drive::v0::GetProofsRequest { + state_transition: tx_bytes.clone(), + }; + + // Get the internal client and make the request + let mut internal_client = self.drive_client.get_internal_client(); + + match internal_client.get_proofs(request).await { + Ok(response) => { + let inner = response.into_inner(); + + let proof = inner + .proof + .ok_or(crate::DapiError::no_valid_tx_proof(&tx_bytes))?; + let metadata = inner + .metadata + .ok_or(crate::DapiError::no_valid_tx_proof(&tx_bytes))?; + + Ok((proof, metadata)) + } + Err(e) => { + debug!("Failed to fetch proof from Drive: {}", e); + Err(crate::DapiError::Client(format!( + "Failed to fetch proof: {}", + e + ))) + } + } + } +} + +/// Convert a `DapiError` into the gRPC error response expected by waitForStateTransitionResult callers. +pub(super) fn build_wait_for_state_transition_error_response( + error: &DapiError, +) -> Response { + // TenderdashStatus has everything we need + let tenderdash_status = if let DapiError::TenderdashClientError(e) = error { + e.clone() + } else { + let status = error.to_status(); + let message = if status.message().is_empty() { + None + } else { + Some(status.message().to_string()) + }; + TenderdashStatus::new(status.code() as i64, message, None) + }; + + tracing::debug!( + error = %error, + ?tenderdash_status, + code = tenderdash_status.code, + "Mapping DapiError to WaitForStateTransitionResultResponse" + ); + tenderdash_status.into() +} diff --git a/packages/rs-dapi/src/services/streaming_service/block_header_stream.rs b/packages/rs-dapi/src/services/streaming_service/block_header_stream.rs new file mode 100644 index 00000000000..ce8f7601ff9 --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/block_header_stream.rs @@ -0,0 +1,596 @@ +use std::collections::HashSet; +use std::sync::Arc; + +use dapi_grpc::core::v0::block_headers_with_chain_locks_request::FromBlock; +use dapi_grpc::core::v0::{ + BlockHeaders, BlockHeadersWithChainLocksRequest, BlockHeadersWithChainLocksResponse, +}; +use dapi_grpc::tonic::{Request, Response, Status}; +use dashcore_rpc::dashcore::consensus::encode::{ + deserialize as deserialize_consensus, serialize as serialize_consensus, +}; +use dashcore_rpc::dashcore::hashes::Hash; +use tokio::sync::{Mutex as AsyncMutex, mpsc, watch}; +use tokio_stream::wrappers::ReceiverStream; +use tracing::{debug, trace, warn}; + +use crate::DapiError; +use crate::services::streaming_service::{ + FilterType, StreamingEvent, StreamingServiceImpl, SubscriptionHandle, +}; + +const BLOCK_HEADER_STREAM_BUFFER: usize = 512; + +type BlockHeaderResponseResult = Result; +type BlockHeaderResponseSender = mpsc::Sender; +type BlockHeaderResponseStream = ReceiverStream; +type BlockHeaderResponse = Response; +type DeliveredHashSet = Arc>>>; +type DeliveryGateSender = watch::Sender; +type DeliveryGateReceiver = watch::Receiver; + +const MAX_HEADERS_PER_BATCH: usize = 500; +impl StreamingServiceImpl { + pub async fn subscribe_to_block_headers_with_chain_locks_impl( + &self, + request: Request, + ) -> Result { + trace!("subscribe_to_block_headers_with_chain_locks_impl=begin"); + let req = request.into_inner(); + + // Validate parameters + let count = req.count; + + let from_block = match req.from_block { + Some(FromBlock::FromBlockHeight(height)) => { + if height == 0 { + debug!(height, "block_headers=invalid_starting_height"); + return Err(Status::invalid_argument( + "Minimum value for `fromBlockHeight` is 1", + )); + } + FromBlock::FromBlockHeight(height) + } + Some(FromBlock::FromBlockHash(ref hash)) if hash.is_empty() => { + debug!("block_headers=empty_from_block_hash"); + return Err(Status::invalid_argument("fromBlockHash cannot be empty")); + } + Some(from_block) => from_block, + None => { + debug!("block_headers=missing_from_block"); + return Err(Status::invalid_argument("from_block is required")); + } + }; + + trace!(count, "block_headers=request_parsed"); + + let response = if count > 0 { + self.handle_historical_mode(from_block, count).await? + } else { + self.handle_combined_mode(from_block).await? + }; + + Ok(response) + } + + async fn handle_historical_mode( + &self, + from_block: FromBlock, + count: u32, + ) -> Result { + let (tx, rx) = mpsc::channel(BLOCK_HEADER_STREAM_BUFFER); + + self.send_initial_chainlock(tx.clone()).await?; + + self.spawn_fetch_historical_headers(from_block, Some(count as usize), None, tx, None, None) + .await?; + + let stream: BlockHeaderResponseStream = ReceiverStream::new(rx); + debug!("block_headers=historical_stream_ready"); + Ok(Response::new(stream)) + } + + async fn handle_combined_mode( + &self, + from_block: FromBlock, + ) -> Result { + let (tx, rx) = mpsc::channel(BLOCK_HEADER_STREAM_BUFFER); + let delivered_hashes: DeliveredHashSet = Arc::new(AsyncMutex::new(HashSet::new())); + let (delivery_gate_tx, delivery_gate_rx) = watch::channel(false); + + let subscriber_id = self + .start_live_stream( + tx.clone(), + delivered_hashes.clone(), + delivery_gate_rx.clone(), + ) + .await; + self.send_initial_chainlock(tx.clone()).await?; + self.spawn_fetch_historical_headers( + from_block, + None, + Some(delivered_hashes), + tx, + Some(delivery_gate_tx), + Some(subscriber_id.clone()), + ) + .await?; + let stream: BlockHeaderResponseStream = ReceiverStream::new(rx); + debug!( + subscriber_id = subscriber_id.as_str(), + "block_headers=stream_ready" + ); + Ok(Response::new(stream)) + } + + async fn spawn_fetch_historical_headers( + &self, + from_block: FromBlock, + limit: Option, + delivered_hashes: Option, + tx: BlockHeaderResponseSender, + gate: Option, + subscriber_id: Option, + ) -> Result<(), Status> { + let service = self.clone(); + + self.workers.spawn(async move { + let result = service + .fetch_historical_blocks( + from_block, + limit, + delivered_hashes, + tx.clone(), + ) + .await; + + if let Some(gate) = gate { + let _ = gate.send(true); + } + // watch receivers wake via the send above; no separate notification needed. + + match result { + Ok(()) => { + if let Some(ref id) = subscriber_id { + debug!(subscriber_id = id.as_str(), "block_headers=historical_fetch_completed"); + } else { + debug!("block_headers=historical_fetch_completed"); + } + Ok(()) + } + Err(status) => { + if let Some(ref id) = subscriber_id { + debug!(subscriber_id = id.as_str(), error = %status, "block_headers=historical_fetch_failed"); + } else { + debug!(error = %status, "block_headers=historical_fetch_failed"); + } + let _ = tx.send(Err(status.clone())).await; + Err(DapiError::from(status)) + } + } + }); + + Ok(()) + } + + async fn start_live_stream( + &self, + tx: BlockHeaderResponseSender, + delivered_hashes: DeliveredHashSet, + delivery_gate: DeliveryGateReceiver, + ) -> String { + let filter = FilterType::CoreAllBlocks; + let block_handle = self.subscriber_manager.add_subscription(filter).await; + let subscriber_id = block_handle.id().to_string(); + debug!( + subscriber_id = subscriber_id.as_str(), + "block_headers=subscription_created" + ); + + let chainlock_handle = self + .subscriber_manager + .add_subscription(FilterType::CoreChainLocks) + .await; + debug!( + subscriber_id = chainlock_handle.id(), + "block_headers=chainlock_subscription_created" + ); + + self.workers.spawn(async move { + Self::block_header_worker( + block_handle, + chainlock_handle, + tx, + delivered_hashes, + delivery_gate, + ) + .await; + Ok::<(), DapiError>(()) + }); + + subscriber_id + } + + async fn send_initial_chainlock(&self, tx: BlockHeaderResponseSender) -> Result<(), Status> { + if let Some(chain_lock) = self + .core_client + .get_best_chain_lock() + .await + .map_err(Status::from)? + { + trace!( + height = chain_lock.block_height, + block_hash = %chain_lock.block_hash, + "block_headers=initial_chain_lock" + ); + let chain_lock_bytes = serialize_consensus(&chain_lock); + let response = BlockHeadersWithChainLocksResponse { + responses: Some( + dapi_grpc::core::v0::block_headers_with_chain_locks_response::Responses::ChainLock( + chain_lock_bytes, + ), + ), + }; + // Failure means client is already gone; treat as success. + let _ = tx.send(Ok(response)).await; + } + Ok(()) + } + + async fn block_header_worker( + block_handle: SubscriptionHandle, + chainlock_handle: SubscriptionHandle, + tx: BlockHeaderResponseSender, + delivered_hashes: DeliveredHashSet, + mut delivery_gate: DeliveryGateReceiver, + ) { + let subscriber_id = block_handle.id().to_string(); + let mut pending: Vec = Vec::new(); + let mut gated = !*delivery_gate.borrow(); + + loop { + tokio::select! { + gate_change = delivery_gate.changed(), if gated => { + if gate_change.is_err() { + break; + } + gated = !*delivery_gate.borrow(); + if !gated + && !Self::flush_pending(&subscriber_id, &tx, &delivered_hashes, &mut pending).await { + break; + } + } + message = block_handle.recv() => { + match message { + Some(event) => { + if gated { + pending.push(event); + continue; + } + if !Self::forward_event(event, &subscriber_id, &tx, &delivered_hashes).await { + break; + } + } + None => break, + } + } + message = chainlock_handle.recv() => { + match message { + Some(event) => { + if gated { + pending.push(event); + continue; + } + if !Self::forward_event(event, &subscriber_id, &tx, &delivered_hashes).await { + break; + } + } + None => break, + } + } + } + } + + debug!( + subscriber_id = subscriber_id.as_str(), + "block_headers=subscription_task_finished" + ); + } + + async fn flush_pending( + subscriber_id: &str, + tx: &BlockHeaderResponseSender, + delivered_hashes: &DeliveredHashSet, + pending: &mut Vec, + ) -> bool { + if pending.is_empty() { + return true; + } + + let queued: Vec = std::mem::take(pending); + for event in queued { + if !Self::forward_event(event, subscriber_id, tx, delivered_hashes).await { + return false; + } + } + true + } + + /// Forward event to the client, returns false if the client disconnected. + async fn forward_event( + event: StreamingEvent, + subscriber_id: &str, + tx: &BlockHeaderResponseSender, + delivered_hashes: &DeliveredHashSet, + ) -> bool { + let maybe_response = match event { + StreamingEvent::CoreRawBlock { data } => { + let Some((hash_bytes, block_hash_hex)) = super::block_hash_from_block_bytes(&data) + else { + // invalid block data received + warn!( + subscriber_id, + block = %hex::encode(&data), + "block_headers=forward_block_invalid_block - it should not happen, report this issue" + ); + return true; + }; + + let mut allow_forward = true; + + { + // scope for the lock + let mut hashes = delivered_hashes.lock().await; + if hashes.remove(&hash_bytes[..]) { + trace!( + subscriber_id, + block_hash = %block_hash_hex, + "block_headers=skip_duplicate_block" + ); + allow_forward = false; + } else { + hashes.insert(hash_bytes.into()); + } + } + + if !allow_forward { + return true; + } + + if data.len() < 80 { + debug!( + subscriber_id, + payload_size = data.len(), + "block_headers=forward_block_short_payload" + ); + return true; + } + + let header_bytes = data[..80].to_vec(); + trace!( + subscriber_id, + block_hash = %block_hash_hex, + payload_size = data.len(), + "block_headers=forward_block" + ); + let block_headers = BlockHeaders { + headers: vec![header_bytes], + }; + Some(Ok(BlockHeadersWithChainLocksResponse { + responses: Some( + dapi_grpc::core::v0::block_headers_with_chain_locks_response::Responses::BlockHeaders(block_headers), + ), + })) + } + StreamingEvent::CoreChainLock { data } => { + if tracing::enabled!(tracing::Level::TRACE) { + if let Ok(chain_lock) = + deserialize_consensus::(&data) + { + trace!( + subscriber_id, + height = chain_lock.block_height, + block_hash = %chain_lock.block_hash, + payload_size = data.len(), + "block_headers=forward_chain_lock" + ); + } else { + trace!( + subscriber_id, + payload_size = data.len(), + "block_headers=forward_chain_lock" + ); + } + } + Some(Ok(BlockHeadersWithChainLocksResponse { + responses: Some( + dapi_grpc::core::v0::block_headers_with_chain_locks_response::Responses::ChainLock(data), + ), + })) + } + other => { + let summary = super::summarize_streaming_event(&other); + trace!( + subscriber_id, + event = %summary, + "block_headers=ignore_event" + ); + None + } + }; + + if let Some(response) = maybe_response.clone() + && tx.send(response).await.is_err() + { + debug!(subscriber_id, "block_headers=client_disconnected"); + return false; + } + + trace!( + subscriber_id, + response=?maybe_response, "block_headers=event_forwarded" + ); + + true + } + + async fn fetch_historical_blocks( + &self, + from_block: FromBlock, + limit: Option, + delivered_hashes: Option, + tx: BlockHeaderResponseSender, + ) -> Result<(), Status> { + use std::str::FromStr; + + let best_height = self + .core_client + .get_block_count() + .await + .map_err(Status::from)? as usize; + + let (start_height, available, desired) = match from_block { + FromBlock::FromBlockHash(hash) => { + let hash_hex = hex::encode(&hash); + let block_hash = dashcore_rpc::dashcore::BlockHash::from_str(&hash_hex) + .map_err(|e| Status::invalid_argument(format!("Invalid block hash: {}", e)))?; + let header = self + .core_client + .get_block_header_info(&block_hash) + .await + .map_err(Status::from)?; + let start = header.height as usize; + let available = best_height + .checked_sub(start) + .and_then(|diff| diff.checked_add(1)) + .unwrap_or(0); + let desired = limit.unwrap_or(available); + debug!(start, desired, "block_headers=historical_from_hash_request"); + (start, available, desired) + } + FromBlock::FromBlockHeight(height) => { + let start = height as usize; + if start == 0 { + return Err(Status::invalid_argument( + "Minimum value for `fromBlockHeight` is 1", + )); + } + let available = best_height + .checked_sub(start) + .and_then(|diff| diff.checked_add(1)) + .unwrap_or(0); + let desired = limit.unwrap_or(available); + debug!( + start, + desired, "block_headers=historical_from_height_request" + ); + (start, available, desired) + } + }; + + if available == 0 { + // historical mode but no available headers + if limit.is_some() { + debug!(start_height, best_height, "block_headers=start_beyond_tip"); + return Err(Status::not_found(format!( + "Block {} not found", + start_height + ))); + } + // Combined mode: no historical data yet; proceed with live stream. + return Ok(()); + } + + if desired == 0 { + return Ok(()); + } + + if desired > available { + debug!( + start_height, + requested = desired, + max_available = available, + "block_headers=count_exceeds_tip" + ); + return Err(Status::invalid_argument("count exceeds chain tip")); + } + + let mut remaining = desired; + let mut current_height = start_height; + + while remaining > 0 { + let batch_size = remaining.min(MAX_HEADERS_PER_BATCH); + + let mut response_headers = Vec::with_capacity(batch_size); + let mut hashes_to_store: Vec> = Vec::with_capacity(batch_size); + + for offset in 0..batch_size { + let height = (current_height + offset) as u32; + let hash = self + .core_client + .get_block_hash(height) + .await + .map_err(Status::from)?; + trace!( + height, + block_hash = %hash, + "block_headers=historical_header_fetched" + ); + + let header_bytes = self + .core_client + .get_block_header_bytes_by_hash(hash) + .await + .map_err(Status::from)?; + + if header_bytes.len() < 80 { + return Err(Status::internal( + "Received malformed block header (len < 80)", + )); + } + + response_headers.push(header_bytes[..80].to_vec()); + + if delivered_hashes.is_some() { + hashes_to_store.push(hash.to_byte_array().to_vec()); + } + } + + if let Some(ref shared) = delivered_hashes { + let mut hashes = shared.lock().await; + for hash in hashes_to_store { + trace!( + block_hash = %hex::encode(&hash), + "block_headers=delivered_hash_recorded" + ); + hashes.insert(hash); + } + } + + let response = BlockHeadersWithChainLocksResponse { + responses: Some( + dapi_grpc::core::v0::block_headers_with_chain_locks_response::Responses::BlockHeaders( + BlockHeaders { + headers: response_headers, + }, + ), + ), + }; + + if tx.send(Ok(response)).await.is_err() { + debug!("block_headers=historical_client_disconnected"); + return Ok(()); + } + + trace!( + current_height, + batch_size, remaining, "block_headers=historical_batch_sent" + ); + + remaining = remaining.saturating_sub(batch_size); + current_height += batch_size; + } + + Ok(()) + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/bloom.rs b/packages/rs-dapi/src/services/streaming_service/bloom.rs new file mode 100644 index 00000000000..f71ca8a1971 --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/bloom.rs @@ -0,0 +1,425 @@ +use std::sync::Arc; + +use dash_spv::bloom::utils::{extract_pubkey_hash, outpoint_to_bytes}; +use dashcore_rpc::dashcore::bloom::{BloomFilter as CoreBloomFilter, BloomFlags}; +use dashcore_rpc::dashcore::script::Instruction; +use dashcore_rpc::dashcore::{OutPoint, ScriptBuf, Transaction as CoreTx, Txid}; + +fn script_matches(filter: &CoreBloomFilter, script: &ScriptBuf) -> bool { + let script_bytes = script.as_bytes(); + if filter.contains(script_bytes) { + return true; + } + + if let Some(pubkey_hash) = extract_pubkey_hash(script.as_script()) { + if filter.contains(&pubkey_hash) { + return true; + } + } + + extract_pushdatas(script_bytes) + .into_iter() + .any(|data| filter.contains(&data)) +} + +#[inline] +fn txid_to_be_bytes(txid: &Txid) -> Vec { + use dashcore_rpc::dashcore::hashes::Hash; + let mut arr = txid.to_byte_array(); + arr.reverse(); + arr.to_vec() +} + +fn is_pubkey_script(script: &ScriptBuf) -> bool { + let bytes = script.as_bytes(); + if bytes.len() >= 35 && (bytes[0] == 33 || bytes[0] == 65) { + return true; + } + bytes.contains(&33u8) + || bytes.contains(&65u8) + || extract_pubkey_hash(script.as_script()).is_some() +} + +pub fn extract_pushdatas(script: &[u8]) -> Vec> { + let script_buf = ScriptBuf::from_bytes(script.to_vec()); + script_buf + .as_script() + .instructions() + .filter_map(|res| match res { + Ok(Instruction::PushBytes(pb)) => Some(pb.as_bytes().to_vec()), + _ => None, + }) + .collect() +} + +pub fn matches_transaction( + filter_lock: Arc>, + tx: &CoreTx, + flags: BloomFlags, +) -> bool { + let filter = match filter_lock.read().inspect_err(|e| { + tracing::debug!("Failed to acquire read lock for bloom filter: {}", e); + }) { + Ok(guard) => guard, + Err(_) => return false, + }; + + let txid = tx.txid(); + let txid_be = txid_to_be_bytes(&txid); + if filter.contains(&txid_be) { + return true; + } + + for (index, out) in tx.output.iter().enumerate() { + if script_matches(&filter, &out.script_pubkey) { + if flags == BloomFlags::All + || (flags == BloomFlags::PubkeyOnly && is_pubkey_script(&out.script_pubkey)) + { + let outpoint_bytes = outpoint_to_bytes(&OutPoint { + txid, + vout: index as u32, + }); + drop(filter); + if let Ok(mut f) = filter_lock.write().inspect_err(|e| { + tracing::debug!("Failed to acquire write lock for bloom filter: {}", e); + }) { + f.insert(&outpoint_bytes); + } + } + return true; + } + } + + for input in tx.input.iter() { + let outpoint_bytes = outpoint_to_bytes(&input.previous_output); + if filter.contains(&outpoint_bytes) || script_matches(&filter, &input.script_sig) { + return true; + } + } + + false +} + +pub(crate) fn bloom_flags_from_int>(flags: I) -> BloomFlags { + let flag = flags.try_into().unwrap_or(u8::MAX); + match flag { + 0 => BloomFlags::None, + 1 => BloomFlags::All, + 2 => BloomFlags::PubkeyOnly, + _ => { + tracing::debug!("invalid bloom flags value {flag}"); + BloomFlags::None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use dash_spv::bloom::utils::outpoint_to_bytes; + use dashcore_rpc::dashcore::bloom::BloomFilter as CoreBloomFilter; + use dashcore_rpc::dashcore::hashes::Hash; + use dashcore_rpc::dashcore::{OutPoint, PubkeyHash}; + use std::str::FromStr; + use std::sync::RwLock; + + #[test] + fn test_insert_and_contains_roundtrip() { + let mut filter = CoreBloomFilter::from_bytes(vec![0; 128], 3, 0, BloomFlags::None).unwrap(); + let key = b"hello"; + assert!(!filter.contains(key)); + filter.insert(key); + assert!(filter.contains(key)); + } + + #[test] + fn test_extract_pushdatas_simple() { + let mut script = vec![0x76, 0xa9, 0x14]; + script.extend(vec![0u8; 20]); + script.extend([0x88, 0xac]); + let parts = extract_pushdatas(&script); + assert_eq!(parts.len(), 1); + assert_eq!(parts[0].len(), 20); + } + + #[test] + fn test_txid_endianness_conversion() { + let hex_be = "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"; + let txid = Txid::from_str(hex_be).expect("valid txid hex"); + let be_bytes = super::txid_to_be_bytes(&txid); + assert_eq!(be_bytes, hex::decode(hex_be).unwrap()); + } + + #[test] + fn test_matches_txid() { + let tx = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![], + special_transaction_payload: None, + }; + let txid_be = super::txid_to_be_bytes(&tx.txid()); + let mut filter = CoreBloomFilter::from_bytes(vec![0; 128], 3, 0, BloomFlags::None).unwrap(); + filter.insert(&txid_be); + assert!(matches_transaction( + Arc::new(RwLock::new(filter)), + &tx, + BloomFlags::None + )); + } + + #[test] + fn test_output_match_and_update_outpoint() { + use dashcore_rpc::dashcore::{ScriptBuf, Transaction as CoreTx, TxOut}; + let h160 = PubkeyHash::from_byte_array([0x11; 20]); + let script = ScriptBuf::new_p2pkh(&h160); + let tx = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 1000, + script_pubkey: script, + }], + special_transaction_payload: None, + }; + let mut filter = + CoreBloomFilter::from_bytes(vec![0; 256], 5, 12345, BloomFlags::All).unwrap(); + filter.insert(&h160.to_byte_array()); + let filter_lock = Arc::new(RwLock::new(filter)); + assert!(matches_transaction( + filter_lock.clone(), + &tx, + BloomFlags::All + )); + let outpoint = outpoint_to_bytes(&OutPoint { + txid: tx.txid(), + vout: 0, + }); + let guard = filter_lock.read().unwrap(); + assert!(guard.contains(&outpoint)); + } + + #[test] + fn test_all_flag_updates_enable_second_tx_match() { + use dashcore_rpc::dashcore::{ScriptBuf, Transaction as CoreTx, TxIn, TxOut}; + let h160 = PubkeyHash::from_byte_array([0x55; 20]); + let script = ScriptBuf::new_p2pkh(&h160); + let tx_a = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 1000, + script_pubkey: script, + }], + special_transaction_payload: None, + }; + let tx_b = CoreTx { + version: 2, + lock_time: 0, + input: vec![TxIn { + previous_output: OutPoint { + txid: tx_a.txid(), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: 0xFFFFFFFF, + witness: Default::default(), + }], + output: vec![], + special_transaction_payload: None, + }; + let mut filter = + CoreBloomFilter::from_bytes(vec![0; 1024], 5, 123, BloomFlags::All).unwrap(); + filter.insert(&h160.to_byte_array()); + let filter_lock = Arc::new(RwLock::new(filter)); + assert!(matches_transaction( + filter_lock.clone(), + &tx_a, + BloomFlags::All + )); + assert!(matches_transaction( + filter_lock.clone(), + &tx_b, + BloomFlags::All + )); + } + + #[test] + fn test_none_flag_does_not_update_for_second_tx() { + use dashcore_rpc::dashcore::{ScriptBuf, Transaction as CoreTx, TxIn, TxOut}; + let h160 = PubkeyHash::from_byte_array([0x66; 20]); + let script = ScriptBuf::new_p2pkh(&h160); + let tx_a = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 1000, + script_pubkey: script, + }], + special_transaction_payload: None, + }; + let tx_b = CoreTx { + version: 2, + lock_time: 0, + input: vec![TxIn { + previous_output: OutPoint { + txid: tx_a.txid(), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: 0xFFFFFFFF, + witness: Default::default(), + }], + output: vec![], + special_transaction_payload: None, + }; + let mut filter = + CoreBloomFilter::from_bytes(vec![0; 2048], 5, 456, BloomFlags::None).unwrap(); + filter.insert(&h160.to_byte_array()); + let filter_lock = Arc::new(RwLock::new(filter)); + assert!(matches_transaction( + filter_lock.clone(), + &tx_a, + BloomFlags::None + )); + assert!(!matches_transaction( + filter_lock.clone(), + &tx_b, + BloomFlags::None + )); + } + + #[test] + fn test_p2sh_and_opreturn_do_not_update_under_pubkeyonly() { + use dashcore_rpc::dashcore::{ScriptBuf, ScriptHash, Transaction as CoreTx, TxOut}; + let sh = ScriptHash::from_byte_array([0x77; 20]); + let p2sh = ScriptBuf::new_p2sh(&sh); + let mut opret_bytes = Vec::new(); + opret_bytes.push(0x6a); + opret_bytes.push(8u8); + opret_bytes.extend([0xAB; 8]); + let mut filter = + CoreBloomFilter::from_bytes(vec![0; 1024], 5, 789, BloomFlags::PubkeyOnly).unwrap(); + filter.insert(&sh.to_byte_array()); + filter.insert(&[0xAB; 8]); + let tx_sh = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 1, + script_pubkey: p2sh, + }], + special_transaction_payload: None, + }; + let filter_lock = Arc::new(RwLock::new(filter)); + assert!(matches_transaction( + filter_lock.clone(), + &tx_sh, + BloomFlags::PubkeyOnly + )); + let outpoint = outpoint_to_bytes(&OutPoint { + txid: tx_sh.txid(), + vout: 0, + }); + assert!(!filter_lock.read().unwrap().contains(&outpoint)); + let mut opret_bytes2 = Vec::new(); + opret_bytes2.push(0x6a); + opret_bytes2.push(8u8); + opret_bytes2.extend([0xAB; 8]); + let tx_or = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 0, + script_pubkey: ScriptBuf::from_bytes(opret_bytes2), + }], + special_transaction_payload: None, + }; + assert!(matches_transaction( + filter_lock.clone(), + &tx_or, + BloomFlags::PubkeyOnly + )); + let outpoint2 = outpoint_to_bytes(&OutPoint { + txid: tx_or.txid(), + vout: 0, + }); + assert!(!filter_lock.read().unwrap().contains(&outpoint2)); + } + + #[test] + fn test_nonminimal_push_still_matches() { + use dashcore_rpc::dashcore::{ScriptBuf, Transaction as CoreTx, TxOut}; + let script = ScriptBuf::from_bytes(vec![0x4c, 0x03, 0xDE, 0xAD, 0xBE]); + let tx = CoreTx { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 1, + script_pubkey: script, + }], + special_transaction_payload: None, + }; + let mut filter = + CoreBloomFilter::from_bytes(vec![0; 1024], 5, 321, BloomFlags::None).unwrap(); + filter.insert(&[0xDE, 0xAD, 0xBE]); + let filter_lock = Arc::new(RwLock::new(filter)); + assert!(matches_transaction( + filter_lock.clone(), + &tx, + BloomFlags::None + )); + } + + #[test] + fn test_witness_only_pushdata_does_not_match() { + use dashcore_rpc::dashcore::{OutPoint, ScriptBuf, Transaction as CoreTx, TxIn, TxOut}; + let pubkey = [0x02; 33]; + let input = TxIn { + previous_output: OutPoint { + txid: Txid::from_str( + "0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: 0xFFFFFFFF, + witness: vec![pubkey.to_vec()].into(), + }; + let tx = CoreTx { + version: 2, + lock_time: 0, + input: vec![input], + output: vec![TxOut { + value: 0, + script_pubkey: ScriptBuf::new(), + }], + special_transaction_payload: None, + }; + let mut filter = + CoreBloomFilter::from_bytes(vec![0; 4096], 5, 654, BloomFlags::None).unwrap(); + filter.insert(&pubkey); + let filter_lock = Arc::new(RwLock::new(filter)); + assert!(!matches_transaction( + filter_lock.clone(), + &tx, + BloomFlags::None + )); + } + + #[test] + fn test_bloom_flags_from_int_mapping() { + assert!(matches!(bloom_flags_from_int(0u32), BloomFlags::None)); + assert!(matches!(bloom_flags_from_int(1u32), BloomFlags::All)); + assert!(matches!(bloom_flags_from_int(2u32), BloomFlags::PubkeyOnly)); + assert!(matches!(bloom_flags_from_int(255u32), BloomFlags::None)); + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/masternode_list_stream.rs b/packages/rs-dapi/src/services/streaming_service/masternode_list_stream.rs new file mode 100644 index 00000000000..4ed5d39bf25 --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/masternode_list_stream.rs @@ -0,0 +1,101 @@ +use dapi_grpc::core::v0::{MasternodeListRequest, MasternodeListResponse}; +use dapi_grpc::tonic::{Request, Response, Status}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tracing::debug; + +use crate::DapiError; +use crate::services::streaming_service::{FilterType, StreamingEvent, StreamingServiceImpl}; + +const MASTERNODE_STREAM_BUFFER: usize = 512; + +impl StreamingServiceImpl { + pub async fn subscribe_to_masternode_list_impl( + &self, + _request: Request, + ) -> Result>>, Status> { + // Create filter (no filtering needed for masternode list - all updates) + let filter = FilterType::CoreAllMasternodes; + + // Create channel for streaming responses + let (tx, rx) = mpsc::channel(MASTERNODE_STREAM_BUFFER); + + // Add subscription to manager + let subscription_handle = self.subscriber_manager.add_subscription(filter).await; + + let subscriber_id = subscription_handle.id(); + debug!(subscriber_id, "masternode_list_stream=subscribed"); + + // Spawn task to convert internal messages to gRPC responses + let sub_handle = subscription_handle.clone(); + let tx_stream = tx.clone(); + let msg_convert_worker = self.workers.spawn(async move { + while let Some(message) = sub_handle.recv().await { + let response = match message { + StreamingEvent::CoreMasternodeListDiff { data } => { + debug!( + subscriber_id = sub_handle.id(), + payload_size = data.len(), + "masternode_list_stream=forward_diff" + ); + let response = MasternodeListResponse { + masternode_list_diff: data, + }; + + Ok(response) + } + other => { + tracing::trace!(event=?other, event_type=std::any::type_name_of_val(&other), "Ignoring non-matching event message type"); + // Ignore other message types for this subscription + continue; + } + }; + + if tx_stream.send(response).await.is_err() { + debug!( + "Client disconnected from masternode list subscription: {}", + sub_handle.id() + ); + break; + } + } + Result::<(),DapiError>::Ok(()) + }); + + if let Err(err) = self.masternode_list_sync.ensure_ready().await { + debug!( + subscriber_id, + error = %err, + "masternode_list_stream=ensure_ready_failed" + ); + msg_convert_worker.abort().await; + return Err(tonic::Status::from(err)); + } + + if let Some(diff) = self.masternode_list_sync.current_full_diff().await { + debug!( + subscriber_id, + payload_size = diff.len(), + "masternode_list_stream=send_initial_diff" + ); + if tx + .send(Ok(MasternodeListResponse { + masternode_list_diff: diff, + })) + .await + .is_err() + { + debug!( + "Client disconnected from masternode list subscription before initial response: {}", + subscription_handle.id() + ); + } + } else { + debug!(subscriber_id, "masternode_list_stream=no_initial_diff"); + } + + let stream = ReceiverStream::new(rx); + debug!(subscriber_id, "masternode_list_stream=stream_ready"); + Ok(Response::new(stream)) + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/masternode_list_sync.rs b/packages/rs-dapi/src/services/streaming_service/masternode_list_sync.rs new file mode 100644 index 00000000000..b19efb57bad --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/masternode_list_sync.rs @@ -0,0 +1,224 @@ +use std::sync::Arc; + +use ciborium::ser::into_writer; +use dashcore_rpc::dashcore::BlockHash; +use dashcore_rpc::dashcore::hashes::Hash as HashTrait; +use tokio::sync::{Mutex, Notify, RwLock}; +use tracing::{debug, trace}; + +use crate::clients::CoreClient; +use crate::error::{DAPIResult, DapiError}; +use crate::services::streaming_service::{FilterType, StreamingEvent, SubscriberManager}; +use crate::sync::Workers; + +#[derive(Default)] +struct MasternodeState { + block_hash: Option, + block_height: Option, + full_diff: Option>, +} + +/// Manages masternode list synchronization and diff emission. +pub struct MasternodeListSync { + core_client: CoreClient, + subscriber_manager: Arc, + state: RwLock, + update_lock: Mutex<()>, + ready_notify: Notify, + workers: Workers, +} + +impl MasternodeListSync { + pub fn new(core_client: CoreClient, subscriber_manager: Arc) -> Self { + Self { + core_client, + subscriber_manager, + state: RwLock::new(MasternodeState::default()), + update_lock: Mutex::new(()), + ready_notify: Notify::new(), + workers: Workers::default(), + } + } + + pub fn spawn_initial_sync(self: &Arc) { + let this = Arc::clone(self); + self.workers.spawn(async move { + trace!("masternode_sync=initial start"); + let result = this.sync_best_chain_lock().await; + match &result { + Ok(true) => { + trace!("masternode_sync=initial completed"); + } + Ok(false) => { + debug!("masternode_sync=initial no_chain_lock"); + } + Err(err) => { + debug!(error = %err, "masternode_sync=initial failed"); + } + }; + result + }); + } + + pub fn start_chain_lock_listener(self: &Arc, subscriber_manager: Arc) { + let this = Arc::clone(self); + self.workers.spawn(async move { + trace!("masternode_sync=listener started"); + let handle = subscriber_manager + .add_subscription(FilterType::CoreChainLocks) + .await; + + while let Some(event) = handle.recv().await { + if let StreamingEvent::CoreChainLock { .. } = event { + trace!("masternode_sync=listener chain_lock_event"); + this.handle_chain_lock_notification().await; + } + } + debug!("masternode_sync=listener stopped"); + Result::<(), DapiError>::Err(DapiError::ConnectionClosed) + }); + } + + pub async fn ensure_ready(&self) -> DAPIResult<()> { + // Define Notified so that we will not miss notifications between the check and the wait. + // As per docs, The Notified future is guaranteed to receive wakeups from notify_waiters() as soon as + // it has been created, even if it has not yet been polled. + let notified = self.ready_notify.notified(); + + if self.state.read().await.full_diff.is_some() { + trace!("masternode_sync=ensure_ready cached"); + return Ok(()); + } + + if self.sync_best_chain_lock().await? { + trace!("masternode_sync=ensure_ready synced_now"); + return Ok(()); + } + + trace!("masternode_sync=ensure_ready wait_notify"); + // Wait until notified that initial sync is done + notified.await; + Ok(()) + } + + pub async fn current_full_diff(&self) -> Option> { + self.state.read().await.full_diff.clone() + } + + pub async fn handle_chain_lock_notification(&self) { + match self.sync_best_chain_lock().await { + Ok(true) => { + trace!("masternode_sync=chain_lock handled"); + } + Ok(false) => { + debug!("masternode_sync=chain_lock no_best_lock"); + } + Err(err) => { + debug!(error = %err, "masternode_sync=chain_lock failed"); + } + } + } + + async fn sync_best_chain_lock(&self) -> DAPIResult { + trace!("masternode_sync=sync_best_chain_lock fetch"); + match self.core_client.get_best_chain_lock().await? { + Some(chain_lock) => { + trace!( + block_hash = %chain_lock.block_hash, + height = chain_lock.block_height, + "masternode_sync=sync_best_chain_lock obtained" + ); + self.sync_to_chain_lock(chain_lock.block_hash, chain_lock.block_height) + .await?; + Ok(true) + } + None => Ok(false), + } + } + + async fn sync_to_chain_lock(&self, block_hash: BlockHash, height: u32) -> DAPIResult<()> { + trace!(%block_hash, height, "masternode_sync=sync_to_chain_lock start"); + let _guard = self.update_lock.lock().await; + + if self + .state + .read() + .await + .block_hash + .as_ref() + .filter(|current| *current == &block_hash) + .is_some() + { + debug!(%block_hash, "masternode_sync=sync_to_chain_lock already_current"); + return Ok(()); + } + + let previous_state = self.state.read().await; + let previous_hash = previous_state.block_hash; + drop(previous_state); + + let full_diff = self.fetch_diff(None, &block_hash).await?; + + let diff_bytes = if let Some(prev) = previous_hash { + if prev == block_hash { + None + } else { + Some(self.fetch_diff(Some(&prev), &block_hash).await?) + } + } else { + None + }; + + trace!( + previous = previous_hash.map(|h| h.to_string()), + has_incremental = diff_bytes.is_some(), + "masternode_sync=sync_to_chain_lock diffs_prepared" + ); + + { + let mut state = self.state.write().await; + state.block_hash = Some(block_hash); + state.block_height = Some(height); + state.full_diff = Some(full_diff.clone()); + } + + trace!("masternode_sync=sync_to_chain_lock state_updated"); + + let payload = diff_bytes.unwrap_or_else(|| full_diff.clone()); + self.subscriber_manager + .notify(StreamingEvent::CoreMasternodeListDiff { data: payload }) + .await; + + self.ready_notify.notify_waiters(); + + trace!( + %block_hash, + height, + "Masternode list synchronized" + ); + + Ok(()) + } + + async fn fetch_diff(&self, base: Option<&BlockHash>, block: &BlockHash) -> DAPIResult> { + trace!( + base = base.map(|h| h.to_string()), + block = %block, + "masternode_sync=fetch_diff start" + ); + let base_hash = base.cloned().unwrap_or_else(Self::null_block_hash); + let diff = self.core_client.mn_list_diff(&base_hash, block).await?; + + let mut buffer = Vec::new(); + into_writer(&diff, &mut buffer) + .map_err(|e| DapiError::internal(format!("failed to encode masternode diff: {}", e)))?; + + trace!(size = buffer.len(), "masternode_sync=fetch_diff encoded"); + + Ok(buffer) + } + + fn null_block_hash() -> BlockHash { + BlockHash::from_slice(&[0u8; 32]).expect("zero block hash") + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/mod.rs b/packages/rs-dapi/src/services/streaming_service/mod.rs new file mode 100644 index 00000000000..dac63b4e39e --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/mod.rs @@ -0,0 +1,489 @@ +// Streaming service modular implementation +// This module handles real-time streaming of blockchain data from ZMQ to gRPC clients + +mod block_header_stream; +mod bloom; +mod masternode_list_stream; +mod masternode_list_sync; +mod subscriber_manager; +mod transaction_stream; +mod zmq_listener; + +use crate::DapiError; +use crate::clients::{CoreClient, TenderdashClient}; +use crate::config::Config; +use crate::sync::Workers; +use dash_spv::Hash; +use std::sync::Arc; +use tokio::sync::broadcast; +use tokio::sync::broadcast::error::RecvError; +use tokio::time::{Duration, sleep}; +use tracing::{debug, trace}; + +pub(crate) use masternode_list_sync::MasternodeListSync; +pub(crate) use subscriber_manager::{ + FilterType, StreamingEvent, SubscriberManager, SubscriptionHandle, +}; +pub(crate) use zmq_listener::{ZmqEvent, ZmqListener}; + +/// Streaming service implementation with ZMQ integration. +/// +/// Cheap cloning is supported, and will create references to the same background workers. +/// Doesn't store any state itself; all state is in the background workers. +#[derive(Clone)] +pub struct StreamingServiceImpl { + pub drive_client: crate::clients::drive_client::DriveClient, + pub tenderdash_client: Arc, + pub core_client: CoreClient, + pub config: Arc, + pub zmq_listener: Arc, + pub subscriber_manager: Arc, + pub masternode_list_sync: Arc, + /// Background workers; aborted when the last reference is dropped + pub workers: Workers, +} + +impl StreamingServiceImpl { + /// Construct the streaming service with default ZMQ listener and background workers. + pub fn new( + drive_client: crate::clients::drive_client::DriveClient, + tenderdash_client: Arc, + core_client: CoreClient, + config: Arc, + ) -> Result> { + trace!( + zmq_url = %config.dapi.core.zmq_url, + "Creating streaming service with default ZMQ listener" + ); + let zmq_listener = Arc::new(ZmqListener::new(&config.dapi.core.zmq_url)?); + + Self::create_with_common_setup( + drive_client, + tenderdash_client, + core_client, + config, + zmq_listener, + ) + } + + /// Create a new streaming service with a custom ZMQ listener (useful for testing) + fn create_with_common_setup( + drive_client: crate::clients::drive_client::DriveClient, + tenderdash_client: Arc, + core_client: CoreClient, + config: Arc, + zmq_listener: Arc, + ) -> Result> { + trace!( + zmq_url = %config.dapi.core.zmq_url, + "Creating streaming service with provided ZMQ listener" + ); + let subscriber_manager = Arc::new(SubscriberManager::new()); + let masternode_list_sync = Arc::new(MasternodeListSync::new( + core_client.clone(), + subscriber_manager.clone(), + )); + masternode_list_sync.spawn_initial_sync(); + masternode_list_sync.start_chain_lock_listener(subscriber_manager.clone()); + + // Prepare background workers set + let workers = Workers::new(); + + // Spawn Core ZMQ subscribe + process loop + let zmq_listener_clone = zmq_listener.clone(); + let subscriber_manager_clone = subscriber_manager.clone(); + workers.spawn(async move { + Self::core_zmq_subscription_worker(zmq_listener_clone, subscriber_manager_clone).await; + Ok::<(), DapiError>(()) + }); + + // Spawn Tenderdash transaction forwarder worker + let td_client = tenderdash_client.clone(); + let sub_mgr = subscriber_manager.clone(); + workers.spawn(async move { + Self::tenderdash_transactions_subscription_worker(td_client, sub_mgr).await; + Ok::<(), DapiError>(()) + }); + let td_client = tenderdash_client.clone(); + let sub_mgr = subscriber_manager.clone(); + workers.spawn(async move { + Self::tenderdash_block_subscription_worker(td_client, sub_mgr).await; + Ok::<(), DapiError>(()) + }); + + trace!( + zmq_url = %config.dapi.core.zmq_url, + drive = %config.dapi.drive.uri, + tenderdash_http = %config.dapi.tenderdash.uri, + tenderdash_ws = %config.dapi.tenderdash.websocket_uri, + "Started streaming service background tasks" + ); + + Ok(Self { + drive_client, + tenderdash_client, + core_client, + config, + zmq_listener, + subscriber_manager, + masternode_list_sync, + workers, + }) + } + + /// Background worker: subscribe to Tenderdash transactions and forward to subscribers + async fn tenderdash_transactions_subscription_worker( + tenderdash_client: Arc, + subscriber_manager: Arc, + ) { + trace!("Starting Tenderdash tx forwarder loop"); + let mut transaction_rx = tenderdash_client.subscribe_to_transactions(); + let mut forwarded_events: u64 = 0; + loop { + match transaction_rx.recv().await { + Ok(event) => { + debug!( + hash = %event.hash, + height = event.height, + forwarded = forwarded_events, + "Forwarding Tenderdash transaction event" + ); + subscriber_manager + .notify(StreamingEvent::PlatformTx { event }) + .await; + forwarded_events = forwarded_events.saturating_add(1); + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(skipped)) => { + debug!( + "Tenderdash event receiver lagged, skipped {} events", + skipped + ); + continue; + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => { + debug!( + forwarded = forwarded_events, + "Tenderdash transaction event receiver closed" + ); + break; + } + } + } + trace!( + forwarded = forwarded_events, + "Tenderdash tx forwarder loop exited" + ); + } + + /// Background worker: subscribe to Tenderdash transactions and forward to subscribers + async fn tenderdash_block_subscription_worker( + tenderdash_client: Arc, + subscriber_manager: Arc, + ) { + trace!("Starting Tenderdash block forwarder loop"); + let mut block_rx = tenderdash_client.subscribe_to_blocks(); + let mut forwarded_events: u64 = 0; + loop { + match block_rx.recv().await { + Ok(event) => { + debug!( + forwarded = forwarded_events, + "Forwarding Tenderdash block event" + ); + subscriber_manager + .notify(StreamingEvent::PlatformBlock { event }) + .await; + forwarded_events = forwarded_events.saturating_add(1); + } + Err(tokio::sync::broadcast::error::RecvError::Lagged(skipped)) => { + debug!( + skipped, + "Tenderdash block event receiver lagged, skipped events", + ); + continue; + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => { + debug!( + forwarded = forwarded_events, + "Tenderdash block event receiver closed" + ); + break; + } + } + } + trace!( + forwarded = forwarded_events, + "Tenderdash block forwarder loop exited" + ); + } + + /// Background worker: subscribe to ZMQ and process events, with retry/backoff + async fn core_zmq_subscription_worker( + zmq_listener: Arc, + subscriber_manager: Arc, + ) { + trace!("Starting ZMQ subscribe/process loop"); + let mut backoff = Duration::from_secs(1); + let max_backoff = Duration::from_secs(60); + loop { + match zmq_listener.subscribe() { + Ok(zmq_events) => { + trace!("ZMQ listener started successfully, processing events"); + Self::process_zmq_events(zmq_events, subscriber_manager.clone()).await; + // processing ended; mark unhealthy and retry after short delay + backoff = Duration::from_secs(1); + debug!("ZMQ event processing ended; restarting after {:?}", backoff); + sleep(backoff).await; + } + Err(e) => { + debug!("ZMQ subscribe failed: {}", e); + debug!("Retrying ZMQ subscribe in {:?}", backoff); + sleep(backoff).await; + backoff = (backoff * 2).min(max_backoff); + } + } + } + } + + /// Process ZMQ events and forward to matching subscribers + async fn process_zmq_events( + mut zmq_events: broadcast::Receiver, + subscriber_manager: Arc, + ) { + trace!("Starting ZMQ event processing loop"); + let mut processed_events: u64 = 0; + loop { + let event = zmq_events.recv().await; + + if event.is_ok() { + processed_events = processed_events.saturating_add(1); + } + + match event { + Ok(ZmqEvent::RawTransaction { data }) => { + let txid = txid_hex_from_bytes(&data).unwrap_or_else(|| "n/a".to_string()); + trace!( + txid = %txid, + size = data.len(), + processed = processed_events, + "Processing raw transaction event" + ); + subscriber_manager + .notify(StreamingEvent::CoreRawTransaction { data }) + .await; + } + Ok(ZmqEvent::RawBlock { data }) => { + let block_hash = + block_hash_hex_from_block_bytes(&data).unwrap_or_else(|| "n/a".to_string()); + trace!( + block_hash = %block_hash, + size = data.len(), + processed = processed_events, + "Processing raw block event" + ); + subscriber_manager + .notify(StreamingEvent::CoreRawBlock { data }) + .await; + } + Ok(ZmqEvent::RawTransactionLock { + tx_bytes, + lock_bytes, + }) => { + trace!( + tx_bytes = tx_bytes.as_ref().map(|b| b.len()).unwrap_or(0), + lock_bytes = lock_bytes.len(), + processed = processed_events, + "Processing transaction lock event" + ); + subscriber_manager + .notify(StreamingEvent::CoreInstantLock { + tx_bytes, + lock_bytes, + }) + .await; + } + Ok(ZmqEvent::RawChainLock { data }) => { + trace!( + size = data.len(), + processed = processed_events, + "Processing chain lock event" + ); + subscriber_manager + .notify(StreamingEvent::CoreChainLock { data }) + .await; + } + Ok(ZmqEvent::HashBlock { hash }) => { + trace!( + size = hash.len(), + processed = processed_events, + "Processing new block hash event" + ); + subscriber_manager + .notify(StreamingEvent::CoreNewBlockHash { hash }) + .await; + } + Err(RecvError::Closed) => break, + Err(RecvError::Lagged(skipped)) => { + tracing::error!(skipped, "ZMQ event reader lagged, skipped events"); + } + } + } + + trace!( + processed = processed_events, + "ZMQ event processing loop ended" + ); + } + + /// Returns current health of the ZMQ streaming pipeline + pub fn is_healthy(&self) -> bool { + self.zmq_listener.is_running() + } +} + +// --- Small helpers for concise logging across submodules --- +/// Attempt to decode transaction bytes and return the txid as hex. +pub(crate) fn txid_hex_from_bytes(bytes: &[u8]) -> Option { + use dashcore_rpc::dashcore::Transaction as CoreTx; + use dashcore_rpc::dashcore::consensus::encode::deserialize; + deserialize::(bytes) + .ok() + .map(|tx| tx.txid().to_string()) +} + +/// Decode transaction bytes and return the txid in raw byte form. +pub(crate) fn txid_bytes_from_bytes(bytes: &[u8]) -> Option> { + use dashcore_rpc::dashcore::Transaction as CoreTx; + use dashcore_rpc::dashcore::consensus::encode::deserialize; + use dashcore_rpc::dashcore::hashes::Hash as DashHash; + + deserialize::(bytes) + .ok() + .map(|tx| tx.txid().to_byte_array().to_vec()) +} +/// Decode block bytes and return the block hash in hex and as printable string. +pub(crate) fn block_hash_from_block_bytes(bytes: &[u8]) -> Option<([u8; 32], String)> { + use dashcore_rpc::dashcore::Block as CoreBlock; + use dashcore_rpc::dashcore::consensus::encode::deserialize; + deserialize::(bytes) + .inspect_err( + |error| tracing::debug!(%error, block=hex::encode(bytes), "cannot parse block data"), + ) + .ok() + .map(|b| { + let hash = b.block_hash(); + (hash.as_raw_hash().to_byte_array(), hash.to_string()) + }) +} + +/// Decode block bytes and return the block hash in hex. +#[inline] +pub(crate) fn block_hash_hex_from_block_bytes(bytes: &[u8]) -> Option { + block_hash_from_block_bytes(bytes).map(|(_, hash_string)| hash_string) +} + +/// Return a short hexadecimal prefix of the provided bytes for logging. +pub(crate) fn short_hex(bytes: &[u8], take: usize) -> String { + let len = bytes.len().min(take); + let mut s = hex::encode(&bytes[..len]); + if bytes.len() > take { + s.push('…'); + } + s +} + +/// Format a human-readable description of a streaming event for logs. +pub(crate) fn summarize_streaming_event(event: &StreamingEvent) -> String { + match event { + StreamingEvent::CoreRawTransaction { data } => { + if let Some(txid) = txid_hex_from_bytes(data) { + format!("CoreRawTransaction txid={} size={}", txid, data.len()) + } else { + format!( + "CoreRawTransaction size={} bytes prefix={}", + data.len(), + short_hex(data, 12) + ) + } + } + StreamingEvent::CoreRawBlock { data } => { + if let Some(hash) = block_hash_hex_from_block_bytes(data) { + format!("CoreRawBlock hash={} size={}", hash, data.len()) + } else { + format!( + "CoreRawBlock size={} bytes prefix={}", + data.len(), + short_hex(data, 12) + ) + } + } + StreamingEvent::CoreInstantLock { + tx_bytes, + lock_bytes, + } => { + format!( + "CoreInstantLock tx_bytes={} lock_bytes={}", + tx_bytes.as_ref().map(|b| b.len()).unwrap_or(0), + lock_bytes.len() + ) + } + StreamingEvent::CoreChainLock { data } => { + format!("CoreChainLock size={} bytes", data.len()) + } + StreamingEvent::CoreNewBlockHash { hash } => { + format!("CoreNewBlockHash {}", short_hex(hash, 12)) + } + StreamingEvent::PlatformTx { event } => { + // `hash` is already a string on TD events + format!("PlatformTx hash={} height={}", event.hash, event.height) + } + StreamingEvent::PlatformBlock { .. } => "PlatformBlock".to_string(), + StreamingEvent::CoreMasternodeListDiff { data } => { + format!("CoreMasternodeListDiff size={} bytes", data.len()) + } + } +} + +/// Describe a ZMQ event in a concise logging-friendly string. +pub(crate) fn summarize_zmq_event(event: &ZmqEvent) -> String { + match event { + ZmqEvent::RawTransaction { data } => { + if let Some(txid) = txid_hex_from_bytes(data) { + format!("RawTransaction txid={} size={}", txid, data.len()) + } else { + format!( + "RawTransaction size={} bytes prefix={}", + data.len(), + short_hex(data, 12) + ) + } + } + ZmqEvent::RawBlock { data } => { + if let Some(hash) = block_hash_hex_from_block_bytes(data) { + format!("RawBlock hash={} size={}", hash, data.len()) + } else { + format!( + "RawBlock size={} bytes prefix={}", + data.len(), + short_hex(data, 12) + ) + } + } + ZmqEvent::RawTransactionLock { + tx_bytes, + lock_bytes, + } => { + format!( + "RawTransactionLock tx_bytes={} lock_bytes={}", + tx_bytes.as_ref().map(|b| b.len()).unwrap_or(0), + lock_bytes.len() + ) + } + ZmqEvent::RawChainLock { data } => { + format!("RawChainLock size={} bytes", data.len()) + } + ZmqEvent::HashBlock { hash } => { + format!("HashBlock {}", short_hex(hash, 12)) + } + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/subscriber_manager.rs b/packages/rs-dapi/src/services/streaming_service/subscriber_manager.rs new file mode 100644 index 00000000000..d6486608a12 --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/subscriber_manager.rs @@ -0,0 +1,342 @@ +use dpp::dashcore::prelude::DisplayHex; +use hex::encode; +use std::fmt::Debug; +use std::sync::Arc; +use tracing::{debug, trace}; + +use crate::clients::tenderdash_websocket::{BlockEvent, TransactionEvent}; +use dash_event_bus::event_bus::{ + EventBus, Filter as EventBusFilter, SubscriptionHandle as EventBusSubscriptionHandle, +}; +use dashcore_rpc::dashcore::bloom::{BloomFilter as CoreBloomFilter, BloomFlags}; +use dashcore_rpc::dashcore::{Transaction as CoreTx, consensus::encode::deserialize}; + +/// Types of filters supported by the streaming service +#[derive(Debug, Clone)] +pub enum FilterType { + /// Bloom filter for transaction matching with update flags; filter is persisted/mutable + CoreBloomFilter(Arc>, BloomFlags), + /// All Core transactions (no filtering) + CoreAllTxs, + /// All platform transactions (Tenderdash) + PlatformAllTxs, + /// All Tenderdash platform blocks + PlatformAllBlocks, + /// Single platform transaction by uppercase hex hash + PlatformTxId(String), + /// All blocks filter (no filtering) + CoreAllBlocks, + /// All masternodes filter (no filtering) + CoreAllMasternodes, + /// Chain lock events only + CoreChainLocks, + /// New Core block hash notifications (for cache invalidation) + CoreNewBlockHash, +} + +impl FilterType { + fn matches_core_transaction(&self, raw_tx: &[u8]) -> bool { + match self { + FilterType::CoreBloomFilter(bloom, flags) => match deserialize::(raw_tx) { + Ok(tx) => super::bloom::matches_transaction(Arc::clone(bloom), &tx, *flags), + + Err(e) => { + debug!( + error = %e, + "Failed to deserialize core transaction for bloom filter matching, falling back to contains()" + ); + match bloom.read() { + Ok(guard) => guard.contains(raw_tx), + Err(_) => { + debug!("Failed to acquire read lock for bloom filter"); + false + } + } + } + }, + _ => false, + } + } + + fn matches_event(&self, event: &StreamingEvent) -> bool { + use StreamingEvent::*; + + let matched = match (self, event) { + (FilterType::PlatformAllTxs, PlatformTx { .. }) => true, + (FilterType::PlatformAllTxs, _) => false, + (FilterType::PlatformTxId(id), PlatformTx { event }) => &event.hash == id, + (FilterType::PlatformTxId(_), _) => false, + (FilterType::PlatformAllBlocks, PlatformBlock { .. }) => true, + (FilterType::PlatformAllBlocks, _) => false, + (FilterType::CoreNewBlockHash, CoreNewBlockHash { .. }) => true, + (FilterType::CoreNewBlockHash, _) => false, + (FilterType::CoreAllBlocks, CoreRawBlock { .. }) => true, + (FilterType::CoreAllBlocks, _) => false, + (FilterType::CoreBloomFilter(_, _), CoreRawTransaction { data }) => { + self.matches_core_transaction(data) + } + (FilterType::CoreBloomFilter(_, _), CoreRawBlock { .. }) => true, + (FilterType::CoreBloomFilter(_, _), CoreInstantLock { tx_bytes, .. }) => tx_bytes + .as_ref() + .map(|data| self.matches_core_transaction(data)) + .unwrap_or(true), + (FilterType::CoreBloomFilter(_, _), CoreChainLock { .. }) => true, + (FilterType::CoreBloomFilter(_, _), _) => false, + (FilterType::CoreAllMasternodes, CoreMasternodeListDiff { .. }) => true, + (FilterType::CoreAllMasternodes, _) => false, + (FilterType::CoreChainLocks, CoreChainLock { .. }) => true, + (FilterType::CoreChainLocks, _) => false, + (FilterType::CoreAllTxs, CoreRawTransaction { .. }) => true, + (FilterType::CoreAllTxs, CoreInstantLock { .. }) => true, + (FilterType::CoreAllTxs, CoreChainLock { .. }) => true, + (FilterType::CoreAllTxs, _) => false, + }; + let event_summary = super::summarize_streaming_event(event); + trace!(matched, filter = ?self, event = %event_summary, "subscription_manager=filter_evaluated"); + matched + } +} + +impl EventBusFilter for FilterType { + fn matches(&self, event: &StreamingEvent) -> bool { + self.matches_event(event) + } +} + +/// Incoming events from various sources to dispatch to subscribers +#[derive(Clone)] +pub enum StreamingEvent { + /// Core raw transaction bytes + CoreRawTransaction { data: Vec }, + /// Core raw block bytes + CoreRawBlock { data: Vec }, + /// Core InstantSend lock (transaction bytes optional, lock bytes mandatory) + CoreInstantLock { + tx_bytes: Option>, + lock_bytes: Vec, + }, + /// Core ChainLock + CoreChainLock { data: Vec }, + /// New block hash event (for side-effects like cache invalidation) + CoreNewBlockHash { hash: Vec }, + /// Tenderdash platform transaction event + PlatformTx { event: TransactionEvent }, + /// Tenderdash platform block event + PlatformBlock { event: BlockEvent }, + /// Masternode list diff bytes + CoreMasternodeListDiff { data: Vec }, +} + +impl Debug for StreamingEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StreamingEvent::CoreRawTransaction { data } => { + write!( + f, + "CoreRawTransaction {{ data: [{}] }}", + data.to_lower_hex_string() + ) + } + StreamingEvent::CoreRawBlock { data } => { + write!( + f, + "CoreRawBlock {{ data: [{}] }}", + data.to_lower_hex_string() + ) + } + StreamingEvent::CoreInstantLock { + tx_bytes, + lock_bytes, + } => match tx_bytes { + Some(tx) => write!( + f, + "CoreInstantLock {{ tx_bytes: [{}], lock_bytes: [{}] }}", + encode(tx), + encode(lock_bytes) + ), + None => write!( + f, + "CoreInstantLock {{ tx_bytes: none, lock_bytes: [{}] }}", + encode(lock_bytes) + ), + }, + StreamingEvent::CoreChainLock { data } => { + write!( + f, + "CoreChainLock {{ data: [{}] }}", + data.to_lower_hex_string() + ) + } + StreamingEvent::CoreNewBlockHash { hash } => { + write!( + f, + "CoreNewBlockHash {{ hash: [{}] }}", + hash.to_lower_hex_string() + ) + } + StreamingEvent::PlatformTx { event } => { + write!(f, "PlatformTx {{ hash: {} }}", event.hash) + } + StreamingEvent::PlatformBlock { .. } => { + write!(f, "PlatformBlock {{ }}") + } + StreamingEvent::CoreMasternodeListDiff { data } => { + write!( + f, + "CoreMasternodeListDiff {{ data: [{}] }}", + data.to_lower_hex_string() + ) + } + } + } +} + +/// Manages all active streaming subscriptions +pub type SubscriberManager = EventBus; + +pub type SubscriptionHandle = EventBusSubscriptionHandle; + +#[cfg(test)] +mod tests { + use super::*; + use dashcore_rpc::dashcore::bloom::BloomFlags; + use dashcore_rpc::dashcore::consensus::encode::serialize; + use dashcore_rpc::dashcore::hashes::Hash; + use dashcore_rpc::dashcore::{OutPoint, PubkeyHash, ScriptBuf, TxIn, TxOut}; + use tokio::time::{Duration, timeout}; + + #[tokio::test] + async fn test_subscription_management() { + let manager = SubscriberManager::new(); + + let handle = manager.add_subscription(FilterType::CoreAllBlocks).await; + + assert_eq!(manager.subscription_count().await, 1); + + manager.remove_subscription(handle.id()).await; + assert_eq!(manager.subscription_count().await, 0); + } + + #[tokio::test] + async fn test_non_tx_bytes_fallbacks_to_contains() { + let manager = SubscriberManager::new(); + + // Create a filter with all bits set so contains() returns true for any data + let filter = FilterType::CoreBloomFilter( + std::sync::Arc::new(std::sync::RwLock::new( + dashcore_rpc::dashcore::bloom::BloomFilter::from_bytes( + vec![0xFF; 8], + 5, + 0, + BloomFlags::None, + ) + .expect("failed to create bloom filter"), + )), + BloomFlags::None, + ); + + let handle = manager.add_subscription(filter).await; + + // Send non-transaction bytes + let payload = vec![1u8, 2, 3, 4, 5, 6, 7, 8]; + manager + .notify(StreamingEvent::CoreRawTransaction { + data: payload.clone(), + }) + .await; + + // We should receive one transaction message with the same bytes + let msg = timeout(Duration::from_millis(200), handle.recv()) + .await + .expect("timed out") + .expect("channel closed"); + + match msg { + StreamingEvent::CoreRawTransaction { data } => { + assert_eq!(data, payload); + } + other => panic!("unexpected message: {:?}", other), + } + } + + #[tokio::test] + async fn test_bloom_update_persistence_across_messages() { + // This test describes desired behavior and is expected to FAIL with the current + // implementation because filter updates are not persisted (filter is cloned per check). + let manager = SubscriberManager::new(); + + // Build TX A with a P2PKH output whose hash160 we seed into the filter + let h160 = PubkeyHash::from_byte_array([0x44; 20]); + let script_a = ScriptBuf::new_p2pkh(&h160); + let tx_a = dashcore_rpc::dashcore::Transaction { + version: 2, + lock_time: 0, + input: vec![], + output: vec![TxOut { + value: 1500, + script_pubkey: script_a, + }], + special_transaction_payload: None, + }; + + // Build TX B spending outpoint (tx_a.txid, vout=0) + let tx_a_txid = tx_a.txid(); + let tx_b = dashcore_rpc::dashcore::Transaction { + version: 2, + lock_time: 0, + input: vec![TxIn { + previous_output: OutPoint { + txid: tx_a_txid, + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: 0xFFFFFFFF, + witness: Default::default(), + }], + output: vec![], + special_transaction_payload: None, + }; + + // Subscription with BLOOM_UPDATE_ALL so outpoint should be added after TX A matches + let mut base_filter = dashcore_rpc::dashcore::bloom::BloomFilter::from_bytes( + vec![0; 512], + 5, + 12345, + BloomFlags::All, + ) + .unwrap(); + base_filter.insert(&h160.to_byte_array()); + let filter = FilterType::CoreBloomFilter( + std::sync::Arc::new(std::sync::RwLock::new(base_filter)), + BloomFlags::All, + ); + + let handle = manager.add_subscription(filter).await; + + // Notify with TX A (should match by output pushdata) + let tx_a_bytes = serialize(&tx_a); + manager + .notify(StreamingEvent::CoreRawTransaction { + data: tx_a_bytes.clone(), + }) + .await; + let _first = timeout(Duration::from_millis(200), handle.recv()) + .await + .expect("timed out waiting for first match") + .expect("channel closed"); + + // Notify with TX B: desired behavior is to match due to persisted outpoint update + let tx_b_bytes = serialize(&tx_b); + manager + .notify(StreamingEvent::CoreRawTransaction { + data: tx_b_bytes.clone(), + }) + .await; + + // Expect a second message (this will FAIL until persistence is implemented) + let _second = timeout(Duration::from_millis(400), handle.recv()) + .await + .expect("timed out waiting for second match (persistence missing?)") + .expect("channel closed"); + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/transaction_stream.rs b/packages/rs-dapi/src/services/streaming_service/transaction_stream.rs new file mode 100644 index 00000000000..06ce6e24a83 --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/transaction_stream.rs @@ -0,0 +1,1263 @@ +use std::collections::HashSet; +use std::io::Cursor; +use std::sync::Arc; +use std::time::Duration; + +use dapi_grpc::core::v0::transactions_with_proofs_response::Responses; +use dapi_grpc::core::v0::{ + InstantSendLockMessages, RawTransactions, TransactionsWithProofsRequest, + TransactionsWithProofsResponse, +}; +use dapi_grpc::tonic::{Request, Response, Status}; +use dashcore_rpc::dashcore::consensus::Decodable as _; +use dashcore_rpc::dashcore::{Block, InstantLock, Transaction, hashes::Hash}; +use futures::TryFutureExt; +use tokio::sync::{Mutex as AsyncMutex, mpsc, watch}; +use tokio::task::JoinSet; +use tokio::time::timeout; +use tokio_stream::wrappers::ReceiverStream; +use tracing::{debug, trace}; + +use crate::DapiError; +use crate::clients::{CoreClient, core_client}; +use crate::services::streaming_service::{ + FilterType, StreamingEvent, StreamingServiceImpl, SubscriptionHandle, + bloom::bloom_flags_from_int, +}; + +const TRANSACTION_STREAM_BUFFER: usize = 512; +/// Maximum duration to keep the delivery gate closed while replaying historical data. +const GATE_MAX_TIMEOUT: Duration = Duration::from_secs(180); + +type TxResponseResult = Result; +type TxResponseSender = mpsc::Sender; +type TxResponseStream = ReceiverStream; +type TxResponse = Response; +type DeliveredTxSet = Arc>>>; +type DeliveredBlockSet = Arc>>>; +type DeliveredInstantLockSet = Arc>>>; +type GateSender = watch::Sender; +type GateReceiver = watch::Receiver; + +#[derive(Clone)] +struct TransactionStreamState { + delivered_txs: DeliveredTxSet, + delivered_blocks: DeliveredBlockSet, + delivered_instant_locks: DeliveredInstantLockSet, + gate_sender: GateSender, + gate_receiver: GateReceiver, +} + +impl TransactionStreamState { + fn new() -> Self { + let (gate_sender, gate_receiver) = watch::channel(false); + Self { + delivered_txs: Arc::new(AsyncMutex::new(HashSet::new())), + delivered_blocks: Arc::new(AsyncMutex::new(HashSet::new())), + delivered_instant_locks: Arc::new(AsyncMutex::new(HashSet::new())), + gate_sender, + gate_receiver, + } + } + + fn is_gate_open(&self) -> bool { + *self.gate_receiver.borrow() + } + + /// Open the gate to allow live events to be processed. + /// + /// Provide TransactionStreamState::gate_sender. + /// + /// This is decoupled for easier handling between tasks. + fn open_gate(sender: &GateSender) { + let _ = sender.send(true); + } + + async fn wait_for_gate_open(&self) { + // when true, the gate is already open + if self.is_gate_open() { + return; + } + + let mut receiver = self.gate_receiver.clone(); + + let wait_future = async { + while !*receiver.borrow() { + if receiver.changed().await.is_err() { + break; + } + } + }; + + if let Err(e) = timeout(GATE_MAX_TIMEOUT, wait_future).await { + debug!( + timeout = GATE_MAX_TIMEOUT.as_secs(), + "transactions_with_proofs=gate_open_timeout error: {}, forcibly opening gate", e + ); + + Self::open_gate(&self.gate_sender); + } + } + + /// Marks a transaction as delivered. Returns false if it was already delivered. + async fn mark_transaction_delivered(&self, txid: &[u8]) -> bool { + tracing::trace!( + txid = txid_to_hex(txid), + "transaction_stream=mark_transaction_delivered" + ); + let mut guard = self.delivered_txs.lock().await; + guard.insert(txid.to_vec()) + } + + async fn mark_transactions_delivered(&self, txids: I) + where + I: IntoIterator>, + { + let mut guard = self.delivered_txs.lock().await; + for txid in txids { + tracing::trace!( + txid = txid_to_hex(&txid), + "transaction_stream=mark_transaction_delivered" + ); + guard.insert(txid); + } + } + + /// Returns true if transaction has already been delivered on this stream + async fn has_transaction_been_delivered(&self, txid: &[u8]) -> bool { + self.delivered_txs.lock().await.contains(txid) + || self.delivered_instant_locks.lock().await.contains(txid) + } + + async fn mark_block_delivered(&self, block_hash: &[u8]) -> bool { + let mut guard = self.delivered_blocks.lock().await; + let inserted = guard.insert(block_hash.to_vec()); + trace!( + block_hash = %hex::encode(block_hash), + inserted, + "transactions_with_proofs=block_delivery_state_updated" + ); + inserted + } + + async fn mark_instant_lock_delivered(&self, txid: &[u8]) -> bool { + let mut guard = self.delivered_instant_locks.lock().await; + guard.insert(txid.to_vec()) + } +} + +impl StreamingServiceImpl { + pub async fn subscribe_to_transactions_with_proofs_impl( + &self, + request: Request, + ) -> Result { + trace!("transactions_with_proofs=subscribe_begin"); + let req = request.into_inner(); + let count = req.count; + let filter = match req.bloom_filter { + Some(bloom_filter) => { + let (core_filter, flags) = parse_bloom_filter(&bloom_filter)?; + FilterType::CoreBloomFilter( + std::sync::Arc::new(std::sync::RwLock::new(core_filter)), + flags, + ) + } + None => FilterType::CoreAllTxs, + }; + + let from_block = req + .from_block + .ok_or_else(|| Status::invalid_argument("Must specify from_block"))?; + + let (tx, rx) = mpsc::channel(TRANSACTION_STREAM_BUFFER); + if count > 0 { + // Historical mode + self.spawn_fetch_transactions_history( + Some(from_block), + Some(count as usize), + filter, + None, + tx, + None, + ) + .await?; + + debug!("transactions_with_proofs=historical_stream_ready"); + } else { + self.handle_transactions_combined_mode(from_block, filter, tx) + .await?; + } + + Ok(Response::new(ReceiverStream::new(rx))) + } + + async fn transaction_worker( + tx_handle: SubscriptionHandle, + block_handle: SubscriptionHandle, + tx: TxResponseSender, + filter: FilterType, + state: TransactionStreamState, + ) -> Result<(), DapiError> { + let subscriber_id = tx_handle.id().to_string(); + let tx_handle_id = tx_handle.id().to_string(); + let block_handle_id = block_handle.id().to_string(); + + let mut pending: Vec<(StreamingEvent, String)> = Vec::new(); + // Gate stays closed until historical replay finishes; queue live events until it opens. + let mut gated = !state.is_gate_open(); + + loop { + tokio::select! { + _ = state.wait_for_gate_open(), if gated => { + gated = !state.is_gate_open(); + // gated changed from true to false, flush pending events + if !gated + && !Self::flush_transaction_pending( + &filter, + &subscriber_id, + &tx, + &state, + &mut pending, + ).await { + break; + } + } + message = block_handle.recv() => { + match message { + Some(event) => { + if gated { + pending.push((event, block_handle_id.clone())); + continue; + } + if !Self::forward_transaction_event( + event, + &block_handle_id, + &filter, + &subscriber_id, + &tx, + &state, + ).await { + tracing::debug!(subscriber_id, block_handle_id, "transactions_with_proofs=forward_block_event_failed"); + break; + } + } + None => { + tracing::debug!(subscriber_id, block_handle_id, "transactions_with_proofs=block_subscription_closed"); + break + }, + } + } + message = tx_handle.recv() => { + match message { + Some(event) => { + if gated { + pending.push((event, tx_handle_id.clone())); + continue; + } + if !Self::forward_transaction_event( + event, + &tx_handle_id, + &filter, + &subscriber_id, + &tx, + &state, + ).await { + tracing::debug!(subscriber_id, tx_handle_id, "transactions_with_proofs=forward_tx_event_failed"); + break; + } + } + None => { + tracing::debug!(subscriber_id, tx_handle_id, "transactions_with_proofs=tx_subscription_closed"); + break + }, + } + } + } + } + + debug!(subscriber_id, "transactions_with_proofs=worker_finished"); + Err(DapiError::ConnectionClosed) + } + + async fn flush_transaction_pending( + filter: &FilterType, + subscriber_id: &str, + tx_sender: &TxResponseSender, + state: &TransactionStreamState, + pending: &mut Vec<(StreamingEvent, String)>, + ) -> bool { + if pending.is_empty() { + return true; + } + + let queued: Vec<(StreamingEvent, String)> = std::mem::take(pending); + for (event, handle_id) in queued { + if !Self::forward_transaction_event( + event, + &handle_id, + filter, + subscriber_id, + tx_sender, + state, + ) + .await + { + return false; + } + } + true + } + /// Forwards a single transaction-related event to the client if it matches the filter and + /// has not been previously delivered. + /// + /// Returns false if the client has disconnected. + async fn forward_transaction_event( + event: StreamingEvent, + handle_id: &str, + filter: &FilterType, + subscriber_id: &str, + tx_sender: &TxResponseSender, + state: &TransactionStreamState, + ) -> bool { + let maybe_response = match event { + StreamingEvent::CoreRawTransaction { data } => { + let (Some(txid_bytes), Some(txid_hex)) = ( + super::txid_bytes_from_bytes(&data), + super::txid_hex_from_bytes(&data), + ) else { + tracing::debug!("transactions_with_proofs=transaction_no_txid"); + return true; + }; + + let already_delivered = !state.mark_transaction_delivered(&txid_bytes).await; + if already_delivered { + trace!( + subscriber_id, + handle_id, + txid = txid_hex, + "transactions_with_proofs=skip_duplicate_transaction" + ); + return true; + }; + + debug!( + subscriber_id, + handle_id, + txid = txid_hex, + payload_size = data.len(), + "transactions_with_proofs=forward_raw_transaction" + ); + let raw_transactions = RawTransactions { + transactions: vec![data], + }; + Some(Ok(TransactionsWithProofsResponse { + responses: Some(Responses::RawTransactions(raw_transactions)), + })) + } + StreamingEvent::CoreRawBlock { data } => { + if let Some((hash_bytes, hash_string)) = super::block_hash_from_block_bytes(&data) { + if !state.mark_block_delivered(&hash_bytes).await { + trace!( + subscriber_id, + handle_id, + block_hash = hash_string, + "transactions_with_proofs=skip_duplicate_merkle_block" + ); + return true; + } + + trace!( + subscriber_id, + handle_id, + block_hash = hash_string, + payload_size = data.len(), + "transactions_with_proofs=forward_merkle_block" + ); + } + + match Self::build_transaction_merkle_response(filter, &data, handle_id, Some(state)) + .await + { + Ok(resp) => Some(Ok(resp)), + Err(e) => Some(Err(e)), + } + } + StreamingEvent::CoreInstantLock { + tx_bytes, + lock_bytes, + } => { + let tx = tx_bytes.as_ref().and_then(|bytes| { + let mut cursor = Cursor::new(bytes.as_slice()); + Transaction::consensus_decode(&mut cursor).ok() + }); + + if lock_bytes.is_empty() { + trace!( + subscriber_id, + handle_id, + txid = tx + .as_ref() + .map(|tx| tx.txid().to_string()) + .unwrap_or_else(|| "unknown".to_string()), + "transactions_with_proofs=instant_lock_missing" + ); + return true; + } + + let mut cursor = Cursor::new(lock_bytes.as_slice()); + let instant_lock = match InstantLock::consensus_decode(&mut cursor) { + Ok(instant_lock) => instant_lock, + Err(e) => { + debug!( + subscriber_id, + handle_id, + txid = tx + .as_ref() + .map(|tx| tx.txid().to_string()) + .unwrap_or_else(|| "unknown".to_string()), + error = %e, + hex = %hex::encode(lock_bytes.as_slice()), + "transactions_with_proofs=drop_invalid_instant_lock" + ); + return true; + } + }; + + let txid_bytes = *instant_lock.txid.as_byte_array(); + let txid_hex = tx + .as_ref() + .map(|tx| tx.txid().to_string()) + .unwrap_or_else(|| txid_to_hex(&txid_bytes)); + + let already_delivered = state.has_transaction_been_delivered(&txid_bytes).await; + let bloom_matched = match &filter { + FilterType::CoreAllTxs => true, + FilterType::CoreBloomFilter(bloom, flags) => tx + .as_ref() + .map(|tx| super::bloom::matches_transaction(Arc::clone(bloom), tx, *flags)) + .unwrap_or(true), // failsafe: we assume match to be on a safe side + _ => false, + }; + + // skip no match or duplicate + if !bloom_matched && !already_delivered && !matches!(filter, FilterType::CoreAllTxs) + { + trace!( + subscriber_id, + handle_id, + txid = %txid_hex, + "transactions_with_proofs=skip_instant_lock_not_in_bloom" + ); + return true; + } + + if !state.mark_instant_lock_delivered(&txid_bytes).await { + trace!( + subscriber_id, + handle_id, + txid = %txid_hex, + "transactions_with_proofs=skip_duplicate_instant_lock" + ); + return true; + } + + debug!( + subscriber_id, + handle_id, + txid = %txid_hex, + payload_size = lock_bytes.len(), + "transactions_with_proofs=forward_instant_lock" + ); + let instant_lock_messages = InstantSendLockMessages { + messages: vec![lock_bytes.clone()], + }; + Some(Ok(TransactionsWithProofsResponse { + responses: Some(Responses::InstantSendLockMessages(instant_lock_messages)), + })) + } + other => { + let summary = super::summarize_streaming_event(&other); + trace!(subscriber_id, handle_id, event = %summary, "transactions_with_proofs=ignore_event"); + None + } + }; + + if let Some(response) = maybe_response { + match response { + Ok(resp) => { + if tx_sender.send(Ok(resp.clone())).await.is_err() { + debug!( + subscriber_id, + "transactions_with_proofs=client_disconnected" + ); + return false; + } else { + trace!( + event = ?resp, + subscriber_id, + handle_id, + "transactions_with_proofs=forward_transaction_event_success" + ); + } + } + Err(status) => { + let _ = tx_sender.send(Err(status.clone())).await; + debug!( + subscriber_id, + error = %status, + "transactions_with_proofs=send_error_to_client" + ); + return false; + } + } + } else { + trace!( + subscriber_id, + handle_id, "transactions_with_proofs=no_response_event" + ); + } + + true + } + + async fn build_transaction_merkle_response( + filter: &FilterType, + raw_block: &[u8], + handle_id: &str, + state: Option<&TransactionStreamState>, + ) -> Result { + use dashcore_rpc::dashcore::consensus::encode::{deserialize, serialize}; + + let response = match filter { + FilterType::CoreAllTxs => { + if let Ok(block) = deserialize::(raw_block) { + let match_flags = vec![true; block.txdata.len()]; + let bytes = build_merkle_block_bytes(&block, &match_flags).unwrap_or_else(|e| { + debug!( + handle_id, + block_hash = %block.block_hash(), + error = %e, + "transactions_with_proofs=live_merkle_build_failed_fallback_raw_block" + ); + serialize(&block) + }); + TransactionsWithProofsResponse { + responses: Some(Responses::RawMerkleBlock(bytes)), + } + } else { + TransactionsWithProofsResponse { + responses: Some(Responses::RawMerkleBlock(raw_block.to_vec())), + } + } + } + FilterType::CoreBloomFilter(bloom, flags) => { + if let Ok(block) = deserialize::(raw_block) { + let mut match_flags = Vec::with_capacity(block.txdata.len()); + for tx in block.txdata.iter() { + let mut matches = + super::bloom::matches_transaction(Arc::clone(bloom), tx, *flags); + // Also include any txids we have already delivered on this stream + if let Some(s) = state.as_ref() { + let txid_bytes = tx.txid().to_byte_array(); + if s.has_transaction_been_delivered(&txid_bytes).await { + matches = true; + } + } + match_flags.push(matches); + } + let bytes = build_merkle_block_bytes(&block, &match_flags).unwrap_or_else(|e| { + debug!( + handle_id, + block_hash = %block.block_hash(), + error = %e, + "transactions_with_proofs=live_merkle_build_failed_fallback_raw_block" + ); + serialize(&block) + }); + TransactionsWithProofsResponse { + responses: Some(Responses::RawMerkleBlock(bytes)), + } + } else { + TransactionsWithProofsResponse { + responses: Some(Responses::RawMerkleBlock(raw_block.to_vec())), + } + } + } + _ => TransactionsWithProofsResponse { + responses: Some(Responses::RawMerkleBlock(raw_block.to_vec())), + }, + }; + + Ok(response) + } + + // Starts a live transaction stream by creating subscriptions for transactions and blocks. + // + // Returns the subscriber ID to be used for debugging/logging purposes. + // + // Spawns a background task to handle the stream. + async fn start_live_transaction_stream( + &self, + filter: FilterType, + tx: TxResponseSender, + state: TransactionStreamState, + ) -> String { + let tx_subscription_handle = self + .subscriber_manager + .add_subscription(filter.clone()) + .await; + let subscriber_id = tx_subscription_handle.id().to_string(); + debug!( + subscriber_id, + "transactions_with_proofs=subscription_created" + ); + + let merkle_block_subscription_handle = self + .subscriber_manager + .add_subscription(FilterType::CoreAllBlocks) + .await; + + debug!( + subscriber_id = merkle_block_subscription_handle.id(), + "transactions_with_proofs=merkle_subscription_created" + ); + + self.workers.spawn(async move { + Self::transaction_worker( + tx_subscription_handle, + merkle_block_subscription_handle, + tx, + filter, + state, + ) + .await + }); + + subscriber_id + } + + async fn handle_transactions_combined_mode( + &self, + from_block: dapi_grpc::core::v0::transactions_with_proofs_request::FromBlock, + filter: FilterType, + tx: TxResponseSender, + ) -> Result<(), Status> { + let state = TransactionStreamState::new(); + + // Will spawn worker thread, gated until historical replay is done + let subscriber_id = self + .start_live_transaction_stream(filter.clone(), tx.clone(), state.clone()) + .await; + + // We need our own worker pool so that we can open the gate once historical sync is done + let mut local_workers = JoinSet::new(); + + // Fetch historical transactions in a separate task + let core_client = self.core_client.clone(); + + // this will add new worked to the local_workers pool + self.spawn_fetch_transactions_history( + Some(from_block), + None, + filter.clone(), + Some(state.clone()), + tx.clone(), + Some(&mut local_workers), + ) + .await?; + + let gate_sender = state.gate_sender.clone(); + + local_workers.spawn( + Self::fetch_mempool_transactions_worker(filter.clone(), tx.clone(), state, core_client) + .map_err(DapiError::from), + ); + + // Now, thread that will wait for all local workers to complete and disable the gate + let sub_id = subscriber_id.clone(); + self.workers.spawn(async move { + while let Some(result) = local_workers.join_next().await { + match result { + Ok(Ok(())) => { /* task completed successfully */ } + Ok(Err(e)) => { + debug!(error = %e, subscriber_id=&sub_id, "transactions_with_proofs=worker_task_failed"); + // return error back to caller + let status = e.to_status(); + let _ = tx.send(Err(status)).await; // ignore returned value + return Err(e); + } + Err(e) => { + debug!(error = %e, subscriber_id=&sub_id, "transactions_with_proofs=worker_task_join_failed"); + return Err(DapiError::TaskJoin(e)); + } + } + } + TransactionStreamState::open_gate(&gate_sender); + debug!(subscriber_id=&sub_id, "transactions_with_proofs=historical_sync_completed_gate_opened"); + + Ok(()) + }); + + debug!(subscriber_id, "transactions_with_proofs=stream_ready"); + Ok(()) + } + + /// Spawns new thread that fetches historical transactions starting from the specified block. + async fn spawn_fetch_transactions_history( + &self, + from_block: Option, + limit: Option, + filter: FilterType, + state: Option, + tx: TxResponseSender, + workers: Option<&mut JoinSet>>, // defaults to self.workers if None + ) -> Result<(), Status> { + use std::str::FromStr; + + let from_block = match from_block { + Some(block) => block, + None => return Ok(()), + }; + + let best_height = self + .core_client + .get_block_count() + .await + .map_err(Status::from)? as usize; + + let (start_height, count_target) = match from_block { + dapi_grpc::core::v0::transactions_with_proofs_request::FromBlock::FromBlockHash( + hash, + ) => { + let hash_hex = hex::encode(&hash); + let block_hash = dashcore_rpc::dashcore::BlockHash::from_str(&hash_hex) + .map_err(|e| Status::invalid_argument(format!("Invalid block hash: {}", e)))?; + let header = self + .core_client + .get_block_header_info(&block_hash) + .await + .map_err(Status::from)?; + let start = header.height as usize; + let available = best_height.saturating_sub(start).saturating_add(1); + let desired = limit.map_or(available, |limit| limit.min(available)); + debug!( + start, + desired, "transactions_with_proofs=historical_from_hash_request" + ); + (start, desired) + } + dapi_grpc::core::v0::transactions_with_proofs_request::FromBlock::FromBlockHeight( + height, + ) => { + let start = height as usize; + if start == 0 { + return Err(Status::invalid_argument( + "Minimum value for `fromBlockHeight` is 1", + )); + } + if start > best_height { + return Err(Status::not_found(format!("Block {} not found", start))); + } + let available = best_height.saturating_sub(start).saturating_add(1); + let desired = limit.map_or(available, |limit| limit.min(available)); + debug!( + start, + desired, "transactions_with_proofs=historical_from_height_request" + ); + (start, desired) + } + }; + + if count_target == 0 { + return Ok(()); + } + let core_client = self.core_client.clone(); + + let worker = Self::process_transactions_from_height( + start_height, + count_target, + filter, + state, + tx, + core_client, + ) + .map_err(DapiError::from); + + if let Some(workers) = workers { + workers.spawn(worker); + } else { + self.workers.spawn(worker); + } + Ok(()) + } + + /// Starts fetching mempool transactions that match the filter and sends them to the client. + /// + /// Blocking; caller should spawn in a separate task. + async fn fetch_mempool_transactions_worker( + filter: FilterType, + tx: TxResponseSender, + state: TransactionStreamState, + core_client: CoreClient, + ) -> Result<(), Status> { + use dashcore_rpc::dashcore::consensus::encode::serialize; + + let txids = core_client + .get_mempool_txids() + .await + .map_err(Status::from)?; + + if txids.is_empty() { + trace!("transactions_with_proofs=mempool_empty"); + return Ok(()); + } + + let mut matching: Vec> = Vec::new(); + + for txid in txids { + let tx = match core_client.get_raw_transaction(txid).await { + Ok(tx) => tx, + Err(err) => { + debug!(error = %err, "transactions_with_proofs=mempool_tx_fetch_failed"); + continue; + } + }; + + let matches = match &filter { + FilterType::CoreAllTxs => true, + FilterType::CoreBloomFilter(bloom, flags) => { + super::bloom::matches_transaction(Arc::clone(bloom), &tx, *flags) + } + _ => false, + }; + + if !matches { + continue; + } + + let tx_bytes = serialize(&tx); + let txid_bytes = tx.txid().to_byte_array(); + + if !state.mark_transaction_delivered(&txid_bytes).await { + trace!( + txid = %tx.txid(), + "transactions_with_proofs=skip_duplicate_mempool_transaction" + ); + continue; + } + + matching.push(tx_bytes); + } + + if matching.is_empty() { + trace!("transactions_with_proofs=mempool_no_matches"); + return Ok(()); + } + + trace!( + matches = matching.len(), + "transactions_with_proofs=forward_mempool_transactions" + ); + + let raw_transactions = RawTransactions { + transactions: matching, + }; + if tx + .send(Ok(TransactionsWithProofsResponse { + responses: Some(Responses::RawTransactions(raw_transactions)), + })) + .await + .is_err() + { + debug!("transactions_with_proofs=mempool_client_disconnected"); + } + + Ok(()) + } + + async fn process_transactions_from_height( + start_height: usize, + count: usize, + filter: FilterType, + state: Option, + tx: TxResponseSender, + core_client: core_client::CoreClient, + ) -> Result<(), Status> { + use dashcore_rpc::dashcore::Transaction as CoreTx; + use dashcore_rpc::dashcore::consensus::encode::deserialize; + + trace!( + start_height, + count, "transactions_with_proofs=historical_begin" + ); + + for i in 0..count { + let height = (start_height + i) as u32; + let hash = match core_client.get_block_hash(height).await { + Ok(h) => h, + Err(e) => { + trace!(height, error = ?e, "transactions_with_proofs=get_block_hash_failed"); + break; + } + }; + trace!( + height, + block_hash = %hash, + "transactions_with_proofs=historical_block_fetched" + ); + + let block = match core_client.get_block_by_hash(hash).await { + Ok(b) => b, + Err(e) => { + trace!(height, error = ?e, "transactions_with_proofs=get_block_raw_with_txs_failed"); + break; + } + }; + + let txs_bytes = match core_client.get_block_transactions_bytes_by_hash(hash).await { + Ok(t) => t, + Err(e) => { + debug!( + height, + block_hash = %hash, + error = ?e, + "transactions_with_proofs=get_block_txs_failed, skipping block" + ); + continue; + } + }; + + let block_hash_bytes = + >::as_ref(&hash).to_vec(); + + let mut matching: Vec> = Vec::new(); + let mut matching_hashes: Vec> = Vec::new(); + let mut match_flags: Vec = Vec::with_capacity(txs_bytes.len()); + + for tx_bytes in txs_bytes.iter() { + let filter_matched = match &filter { + FilterType::CoreAllTxs => true, + FilterType::CoreBloomFilter(bloom, flags) => { + match deserialize::(tx_bytes.as_slice()) { + Ok(tx) => { + let matches = super::bloom::matches_transaction( + Arc::clone(bloom), + &tx, + *flags, + ); + trace!(height,matches, txid = %tx.txid(), "transactions_with_proofs=bloom_match"); + matches + } + Err(e) => { + debug!(height, error = %e, "transactions_with_proofs=tx_deserialize_failed, checking raw-bytes contains()"); + let guard = bloom.read().unwrap(); + guard.contains(tx_bytes) + } + } + } + _ => false, + }; + // Include previously delivered transactions in PMT regardless of bloom match + let mut matches_for_merkle = filter_matched; + if let Some(state) = state.as_ref() + && let Some(hash_bytes) = super::txid_bytes_from_bytes(tx_bytes) + && state.has_transaction_been_delivered(&hash_bytes).await + { + matches_for_merkle = true; + } + + match_flags.push(matches_for_merkle); + // Only send raw transactions when they matched the bloom filter + if filter_matched { + if let Some(hash_bytes) = super::txid_bytes_from_bytes(tx_bytes) { + matching_hashes.push(hash_bytes); + } + matching.push(tx_bytes.clone()); + } + } + + if !matching.is_empty() { + if let Some(state) = state.as_ref() { + state.mark_transactions_delivered(matching_hashes).await; + } + + let raw_transactions = RawTransactions { + transactions: matching, + }; + let response = TransactionsWithProofsResponse { + responses: Some(Responses::RawTransactions(raw_transactions)), + }; + if tx.send(Ok(response)).await.is_err() { + debug!("transactions_with_proofs=historical_client_disconnected"); + return Ok(()); + } + } + + // deliver the merkle block (even if its' empty) + if let Some(state) = state.as_ref() { + state.mark_block_delivered(&block_hash_bytes).await; + } + + let merkle_block_bytes = build_merkle_block_bytes(&block, &match_flags).unwrap_or_else(|e| { + let bh = block.block_hash(); + debug!(height, block_hash = %bh, error = %e, "transactions_with_proofs=merkle_build_failed_fallback_raw_block"); + dashcore_rpc::dashcore::consensus::encode::serialize(&block) + }); + + let response = TransactionsWithProofsResponse { + responses: Some(Responses::RawMerkleBlock(merkle_block_bytes)), + }; + if tx.send(Ok(response)).await.is_err() { + debug!("transactions_with_proofs=historical_client_disconnected"); + return Ok(()); + } + + trace!( + height, + block_hash = %hash, + "transactions_with_proofs=historical_block_delivered" + ); + } + + trace!( + start_height, + count, "transactions_with_proofs=historical_end" + ); + Ok(()) + } +} + +/// Build a serialized MerkleBlock (header + PartialMerkleTree) from full block bytes and +/// a boolean match flag per transaction indicating which txids should be included. +fn build_merkle_block_bytes(block: &Block, match_flags: &[bool]) -> Result, String> { + use dashcore_rpc::dashcore::consensus::encode::serialize; + + let header = block.header; + let txids: Vec = block.txdata.iter().map(|t| t.txid()).collect(); + if txids.len() != match_flags.len() { + return Err(format!( + "flags len {} != tx count {}", + match_flags.len(), + txids.len() + )); + } + + let pmt = + dashcore_rpc::dashcore::merkle_tree::PartialMerkleTree::from_txids(&txids, match_flags); + let mb = dashcore_rpc::dashcore::merkle_tree::MerkleBlock { header, txn: pmt }; + Ok(serialize(&mb)) +} +fn parse_bloom_filter( + bloom_filter: &dapi_grpc::core::v0::BloomFilter, +) -> Result< + ( + dashcore_rpc::dashcore::bloom::BloomFilter, + dashcore_rpc::dashcore::bloom::BloomFlags, + ), + Status, +> { + trace!( + n_hash_funcs = bloom_filter.n_hash_funcs, + n_tweak = bloom_filter.n_tweak, + v_data_len = bloom_filter.v_data.len(), + v_data_prefix = %super::short_hex(&bloom_filter.v_data, 16), + "transactions_with_proofs=request_bloom_filter_parsed" + ); + + // Validate bloom filter parameters + if bloom_filter.v_data.is_empty() { + debug!("transactions_with_proofs=bloom_filter_empty"); + return Err(Status::invalid_argument( + "bloom filter data cannot be empty", + )); + } + + if bloom_filter.n_hash_funcs == 0 { + debug!("transactions_with_proofs=bloom_filter_no_hash_funcs"); + return Err(Status::invalid_argument( + "number of hash functions must be greater than 0", + )); + } + + // Create filter from bloom filter parameters + let bloom_filter_clone = bloom_filter.clone(); + let flags = bloom_flags_from_int(bloom_filter_clone.n_flags); + let core_filter = dashcore_rpc::dashcore::bloom::BloomFilter::from_bytes( + bloom_filter_clone.v_data.clone(), + bloom_filter_clone.n_hash_funcs, + bloom_filter_clone.n_tweak, + flags, + ) + .map_err(|e| Status::invalid_argument(format!("invalid bloom filter data: {}", e)))?; + + Ok((core_filter, flags)) +} + +fn txid_to_hex(txid: &[u8]) -> String { + let mut buf = txid.to_vec(); + // txid is displayed in reverse byte order (little-endian) + buf.reverse(); + hex::encode(buf) +} + +#[cfg(test)] +mod tests { + use super::*; + use dashcore_rpc::dashcore::{ + Block, BlockHash, CompactTarget, OutPoint, ScriptBuf, Transaction as CoreTx, TxIn, + TxMerkleNode, TxOut, Txid, Witness, + block::{Header as BlockHeader, Version}, + consensus::encode::{deserialize, serialize}, + merkle_tree::MerkleBlock, + }; + use std::time::Duration; + use tokio::time::{sleep, timeout}; + + fn sample_tx(tag: u8) -> CoreTx { + let mut txid_bytes = [0u8; 32]; + txid_bytes[0] = tag; + let out_point = OutPoint::new(Txid::from_byte_array(txid_bytes), 0); + CoreTx { + version: 1, + lock_time: 0, + input: vec![TxIn { + previous_output: out_point, + script_sig: ScriptBuf::new(), + sequence: 0xFFFFFFFF, + witness: Witness::default(), + }], + output: vec![TxOut { + value: tag as u64, + script_pubkey: ScriptBuf::new(), + }], + special_transaction_payload: None, + } + } + + fn sample_block(mut txs: Vec) -> Block { + let header = BlockHeader { + version: Version::ONE, + prev_blockhash: BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0, + bits: CompactTarget::from_consensus(0x1f00ffff), + nonce: 0, + }; + let mut block = Block { + header, + txdata: Vec::new(), + }; + block.txdata.append(&mut txs); + let merkle_root = block + .compute_merkle_root() + .expect("expected at least one transaction"); + block.header.merkle_root = merkle_root; + block + } + + #[tokio::test] + async fn should_dedupe_transactions_blocks_and_instant_locks() { + let state = TransactionStreamState::new(); + let txid = vec![1u8; 32]; + assert!(state.mark_transaction_delivered(&txid).await); + assert!(!state.mark_transaction_delivered(&txid).await); + assert!(state.has_transaction_been_delivered(&txid).await); + + let block_hash = vec![2u8; 32]; + assert!(state.mark_block_delivered(&block_hash).await); + assert!(!state.mark_block_delivered(&block_hash).await); + + let lock_txid = vec![3u8; 32]; + assert!(state.mark_instant_lock_delivered(&lock_txid).await); + assert!(!state.mark_instant_lock_delivered(&lock_txid).await); + assert!(state.has_transaction_been_delivered(&lock_txid).await); + } + + #[tokio::test] + async fn should_wait_for_gate_and_flush_pending_events() { + let state = TransactionStreamState::new(); + let waiter = { + let state_clone = state.clone(); + tokio::spawn(async move { + state_clone.wait_for_gate_open().await; + }) + }; + + sleep(Duration::from_millis(10)).await; + TransactionStreamState::open_gate(&state.gate_sender); + timeout(Duration::from_secs(1), waiter) + .await + .expect("wait_for_gate_open did not complete in time") + .expect("wait task panicked"); + + let (tx_sender, mut rx) = mpsc::channel(8); + let mut pending = vec![( + StreamingEvent::CoreRawTransaction { + data: serialize(&sample_tx(7)), + }, + "tx_handle".to_string(), + )]; + + let flushed = StreamingServiceImpl::flush_transaction_pending( + &FilterType::CoreAllTxs, + "subscriber", + &tx_sender, + &state, + &mut pending, + ) + .await; + assert!(flushed); + assert!(pending.is_empty()); + + let response = rx + .recv() + .await + .expect("expected response") + .expect("status ok"); + match response.responses { + Some(Responses::RawTransactions(raw)) => { + assert_eq!(raw.transactions.len(), 1); + } + other => panic!("unexpected response: {:?}", other), + } + } + + #[test] + fn should_build_merkle_block_with_partial_matches() { + let tx_a = sample_tx(10); + let tx_b = sample_tx(11); + let tx_c = sample_tx(12); + let block = sample_block(vec![tx_a.clone(), tx_b.clone(), tx_c.clone()]); + + let merkle_bytes = build_merkle_block_bytes(&block, &[true, false, true]) + .expect("merkle construction should succeed"); + let merkle_block: MerkleBlock = deserialize(&merkle_bytes).expect("valid merkle block"); + + let mut matches = Vec::new(); + let mut indexes = Vec::new(); + merkle_block + .extract_matches(&mut matches, &mut indexes) + .expect("extract matches"); + assert_eq!(matches.len(), 2); + assert!(matches.contains(&tx_a.txid())); + assert!(matches.contains(&tx_c.txid())); + } + + #[tokio::test] + async fn should_return_raw_block_on_deserialize_error() { + let raw_block = vec![0xde, 0xad, 0xbe, 0xef]; + let response = StreamingServiceImpl::build_transaction_merkle_response( + &FilterType::CoreAllTxs, + &raw_block, + "handle", + None, + ) + .await + .expect("response should build"); + + match response.responses { + Some(Responses::RawMerkleBlock(bytes)) => assert_eq!(bytes, raw_block), + other => panic!("unexpected response: {:?}", other), + } + } +} diff --git a/packages/rs-dapi/src/services/streaming_service/zmq_listener.rs b/packages/rs-dapi/src/services/streaming_service/zmq_listener.rs new file mode 100644 index 00000000000..9bcf4e0c43d --- /dev/null +++ b/packages/rs-dapi/src/services/streaming_service/zmq_listener.rs @@ -0,0 +1,729 @@ +//! ZMQ listener for Dash Core events +//! +//! This module provides functionality to connect to Dash Core's ZMQ interface. +//! +//! See [`ZmqListener`] for the main entry point. +//! +//! ## Control flow +//! +//! - `ZmqListener::new` creates a new listener and starts the connection task with [`ZmqConnection::new`] +//! - `ZmqConnection::new` establishes a new ZMQ connection and spawns [dispatcher](ZmqDispatcher) +//! and [monitor](ZmqConnection::start_monitor) tasks +//! - Whenever new message arrives, [`ZmqDispatcher`] forwards it through a channel to [`ZmqConnection::recv`] +//! - [`ZmqListener::process_messages`] reads messages from the connection with [`ZmqConnection::recv`] +//! - [`ZmqListener::parse_zmq_message`] parses raw ZMQ messages into structured [`ZmqEvent`] +//! - subscribers subscribe to events via [`ZmqListener::subscribe`] to receive [`ZmqEvent`]s +//! +use std::future::Future; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::{Arc, LazyLock}; + +use crate::error::{DAPIResult, DapiError}; +use crate::sync::Workers; +use async_trait::async_trait; +use dashcore_rpc::dashcore::Transaction as CoreTransaction; +use dashcore_rpc::dashcore::consensus::Decodable; +use futures::StreamExt; +use std::io::Cursor; +use tokio::select; +use tokio::sync::Mutex; +use tokio::sync::broadcast; +use tokio::sync::mpsc; +use tokio::time::{Duration, Instant, sleep}; +use tokio_util::sync::CancellationToken; +use tracing::span; +use tracing::{debug, trace}; +use zeromq::SocketEvent; +use zeromq::SubSocket; +use zeromq::ZmqError; +use zeromq::ZmqMessage; +use zeromq::ZmqResult; +use zeromq::prelude::*; + +/// Start time for calculating durations +static START_TIME: LazyLock = LazyLock::new(Instant::now); + +/// ZMQ topics that we subscribe to from Dash Core + +#[derive(Debug, Clone)] +pub struct ZmqTopics { + // pub hashtx: String, -- not used + // pub hashtxlock: String, -- not used + pub hashblock: String, + pub rawblock: String, + pub rawtx: String, + // pub rawtxlock: String, -- not used, it doesn't contain required data, we use rawtxlocksig instead + pub rawtxlocksig: String, + pub rawchainlock: String, + pub rawchainlocksig: String, +} + +impl Default for ZmqTopics { + fn default() -> Self { + Self { + // hashtx: "hashtx".to_string(), + // hashtxlock: "hashtxlock".to_string(), + hashblock: "hashblock".to_string(), + rawblock: "rawblock".to_string(), + rawtx: "rawtx".to_string(), + // rawtxlock: "rawtxlock".to_string(), + rawtxlocksig: "rawtxlocksig".to_string(), + rawchainlock: "rawchainlock".to_string(), + rawchainlocksig: "rawchainlocksig".to_string(), + } + } +} + +impl ZmqTopics { + /// Convert to a vector of topic strings + pub fn to_vec(&self) -> Vec { + vec![ + self.rawtx.clone(), + self.rawblock.clone(), + self.rawtxlocksig.clone(), + self.rawchainlock.clone(), + self.rawchainlocksig.clone(), + self.hashblock.clone(), + ] + } +} + +/// Events emitted by the ZMQ listener +#[derive(Debug, Clone)] +pub enum ZmqEvent { + /// Raw transaction data from Dash Core + RawTransaction { data: Vec }, + /// Raw block data from Dash Core + RawBlock { data: Vec }, + /// Raw transaction lock (InstantSend) data + RawTransactionLock { + tx_bytes: Option>, + lock_bytes: Vec, + }, + /// Raw chain lock data + RawChainLock { data: Vec }, + /// New block hash notification + HashBlock { hash: Vec }, +} + +#[derive(Clone)] +struct ZmqConnection { + cancel: CancellationToken, + /// Messages from zmq server, forwarded by [ZmqDispatcher]; consumed in [`ZmqConnection::recv`] + rx: Arc>>, + connected: Arc, + workers: Workers, + subscribed_topics: Vec, +} + +impl Drop for ZmqConnection { + fn drop(&mut self) { + // Cancel the connection when dropped + self.cancel.cancel(); + } +} + +impl ZmqConnection { + /// Create new ZmqConnection with running dispatcher and monitor. + /// + /// Messages will be received using [`ZmqConnection::recv`]. + async fn new( + zmq_uri: &str, + topics: &[String], + connection_timeout: Duration, + parent_cancel: CancellationToken, + ) -> DAPIResult { + // we want to be able to only clean up ZmqConnection threads, without affecting the caller + let cancel = parent_cancel.child_token(); + // ensure the socket is not in use + let mut socket = SubSocket::new(); + + // updated in monitor + let connected = Arc::new(AtomicBool::new(false)); + + let (tx, rx) = mpsc::channel(1000); + + let mut connection = Self { + cancel: cancel.clone(), + rx: Arc::new(Mutex::new(rx)), + connected: connected.clone(), + workers: Workers::default(), + subscribed_topics: Vec::new(), + }; + // Start monitor + connection.start_monitor(socket.monitor()); + + // Set connection timeout + tokio::time::timeout(connection_timeout, async { socket.connect(zmq_uri).await }) + .await + .map_err(|e| { + DapiError::Timeout(format!( + "Upstream ZMQ connect timeout {:.2}s exceeded: {}", + connection_timeout.as_secs_f32(), + e + )) + })? + .map_err(DapiError::ZmqConnection)?; + + connection.zmq_subscribe(&mut socket, topics).await?; + + connection.start_dispatcher(socket, tx); + + Ok(connection) + } + + async fn zmq_subscribe(&mut self, socket: &mut SubSocket, topics: &[String]) -> DAPIResult<()> { + // Subscribe to topics + let mut first_error = None; + + for topic in topics { + let result = socket + .subscribe(topic) + .await + .map_err(DapiError::ZmqConnection); + + match result { + Ok(_) => self.subscribed_topics.push(topic.clone()), + Err(e) => { + first_error.get_or_insert(e); + } + } + } + + if let Some(error) = first_error { + debug!( + ?error, + "ZMQ subscription errors occured, trying to unsubscribe from successful topics", + ); + + if let Err(unsub_err) = self.zmq_unsubscribe_all(socket).await { + debug!(error = %unsub_err, "Unsubscribe during rollback failed; preserving original subscribe error"); + } + // return the first error + return Err(error); + }; + + Ok(()) + } + + /// Unsubscribe from all topics. Returns first error encountered, if any. + async fn zmq_unsubscribe_all(&mut self, socket: &mut SubSocket) -> DAPIResult<()> { + let mut first_error = None; + for topic in &self.subscribed_topics { + if let Err(e) = socket.unsubscribe(topic).await { + trace!( + topic = %topic, + error = %e, + "Error unsubscribing from ZMQ topic", + ); + first_error.get_or_insert(DapiError::ZmqConnection(e)); + } + } + + // Clear the list of subscribed topics; even if errors occurred, we consider ourselves unsubscribed + self.subscribed_topics.clear(); + + first_error.map(Err).unwrap_or(Ok(())) + } + + fn disconnected(&self) { + self.connected.store(false, Ordering::SeqCst); + self.cancel.cancel(); + } + + fn start_dispatcher(&self, socket: SubSocket, tx: mpsc::Sender) { + let cancel = self.cancel.clone(); + + ZmqDispatcher { + socket, + zmq_tx: tx, + cancel: cancel.clone(), + connected: self.connected.clone(), + last_recv: Arc::new(AtomicU64::new(0)), + } + .spawn(&self.workers); + } + + /// Start monitor that will get connection updates. + fn start_monitor(&self, mut monitor: futures::channel::mpsc::Receiver) { + let connected = self.connected.clone(); + let cancel = self.cancel.clone(); + // Start the monitor to listen for connection events + self.workers.spawn(with_cancel(cancel.clone(), async move { + while let Some(event) = monitor.next().await { + if let Err(e) = Self::monitor_event(event, connected.clone(), cancel.clone()).await + { + debug!(error = %e, "ZMQ monitor event error"); + } + } + debug!("ZMQ monitor channel closed, stopping monitor"); + Err::<(), _>(DapiError::ConnectionClosed) + })); + } + + /// Act on monitor event + async fn monitor_event( + event: SocketEvent, + connected: Arc, + cancel: CancellationToken, + ) -> DAPIResult<()> { + // Get a monitor from the socket + let span = span!(tracing::Level::TRACE, "zmq_monitor"); + let _span = span.enter(); + + match event { + zeromq::SocketEvent::Connected(endpoint, peer) => { + trace!(endpoint = %endpoint, peer = hex::encode(peer), "ZMQ socket connected"); + connected.store(true, Ordering::SeqCst); + } + zeromq::SocketEvent::Disconnected(peer) => { + debug!( + peer = hex::encode(peer), + "ZMQ socket disconnected, requesting restart" + ); + // this does NOT work, we never receive a Disconnected event + // See [`ZmqDispatcher::tick_event_10s`] for workaround we use + connected.store(false, Ordering::SeqCst); + cancel.cancel(); + } + zeromq::SocketEvent::Closed => { + debug!("ZMQ socket closed, requesting restart"); + connected.store(false, Ordering::SeqCst); + cancel.cancel(); + } + zeromq::SocketEvent::ConnectRetried => { + debug!("ZMQ connection retry attempt"); + } + _ => { + // Log other events for debugging + tracing::trace!("ZMQ socket event: {:?}", event); + } + } + + Ok(()) + } +} + +#[async_trait] +impl SocketRecv for ZmqConnection { + async fn recv(&mut self) -> ZmqResult { + let mut rx = self.rx.lock().await; + let received = rx.recv().await; + drop(rx); // unlock + + match received { + Some(msg) => return Ok(msg), + None => { + // If the channel is closed, we should handle it gracefully + self.disconnected(); + return Err(ZmqError::NoMessage); + } + } + } +} + +/// ZMQ listener that connects to Dash Core and streams events. +/// +/// This is the main entry point for ZMQ streaming. +pub struct ZmqListener { + zmq_uri: String, + topics: ZmqTopics, + event_sender: broadcast::Sender, + cancel: CancellationToken, + workers: Workers, +} + +impl ZmqListener { + pub fn new(zmq_uri: &str) -> DAPIResult { + let (event_sender, _event_receiver) = broadcast::channel(1000); + + let mut instance = Self { + zmq_uri: zmq_uri.to_string(), + topics: ZmqTopics::default(), + event_sender, + cancel: CancellationToken::new(), + workers: Workers::default(), + }; + instance.connect()?; + Ok(instance) + } + + fn connect(&mut self) -> DAPIResult<()> { + // Start the ZMQ listener in a background task + let zmq_uri = self.zmq_uri.clone(); + let topics = self.topics.to_vec(); + let sender = self.event_sender.clone(); + + let cancel = self.cancel.clone(); + + self.workers.spawn(with_cancel(cancel.clone(), async move { + // we use child token so that cancelling threads started inside zmq_listener_task + // does not cancel the zmq_listener_task itself, as it needs to restart the + // connection if it fails + if let Err(e) = + Self::zmq_listener_task(zmq_uri, topics, sender, cancel.child_token()).await + { + debug!(error = %e, "ZMQ listener task error"); + // we cancel parent task to stop all spawned threads + cancel.cancel(); + } + Err::<(), _>(DapiError::ConnectionClosed) + })); + + Ok(()) + } + + /// Subscribe to ZMQ events and return a receiver for them + pub fn subscribe(&self) -> DAPIResult> { + Ok(self.event_sender.subscribe()) + } + + /// Check if the ZMQ listener is connected (placeholder) + pub fn is_running(&self) -> bool { + !self.cancel.is_cancelled() + } + /// ZMQ listener task that runs asynchronously + async fn zmq_listener_task( + zmq_uri: String, + topics: Vec, + sender: broadcast::Sender, + cancel_parent: CancellationToken, + ) -> DAPIResult<()> { + let mut retry_count = 0; + let mut delay = Duration::from_millis(1000); // Start with 1 second delay + + loop { + // We don't want to cancel parent task by mistake + let cancel = cancel_parent.child_token(); + + if cancel.is_cancelled() { + debug!("ZMQ listener task cancelled, exiting"); + return Err(DapiError::ConnectionClosed); + } + + // Try to establish connection + match ZmqConnection::new(&zmq_uri, &topics, Duration::from_secs(5), cancel).await { + Ok(mut connection) => { + retry_count = 0; // Reset retry count on successful connection + delay = Duration::from_millis(1000); // Reset delay + trace!("ZMQ connected to {}", zmq_uri); + + // Listen for messages with connection recovery + + match Self::process_messages(&mut connection, sender.clone()).await { + Ok(_) => { + trace!("ZMQ message processing ended normally"); + } + Err(e) => { + debug!(error = %e, "ZMQ message processing failed"); + continue; // Restart connection + } + } + } + Err(e) => { + debug!(error = %e, "ZMQ connection failed"); + retry_count += 1; + + debug!( + "ZMQ connection attempt {} failed: {}. Retrying in {:?}", + retry_count, e, delay + ); + sleep(delay).await; + + // Exponential backoff with jitter, capped at 300 seconds + delay = std::cmp::min(delay * 2, Duration::from_secs(300)); + } + } + } + } + + /// After successful connection, start the message processing workers that will process messages + /// + /// Errors returned by this method are critical and should cause the listener to restart + async fn process_messages( + connection: &mut ZmqConnection, + sender: broadcast::Sender, + ) -> DAPIResult<()> { + tracing::trace!("ZMQ worker waiting for messages"); + + loop { + let message = connection.recv().await; + + match message { + Ok(msg) => { + let frames: Vec> = msg + .into_vec() + .into_iter() + .map(|bytes| bytes.to_vec()) + .collect(); + if let Some(event) = Self::parse_zmq_message(frames) { + let summary = super::summarize_zmq_event(&event); + tracing::trace!(event = %summary, "Received ZMQ event"); + if let Err(e) = sender.send(event) { + tracing::trace!("Cannot send ZMQ event, dropping: {}", e); + } + } + } + Err(ZmqError::NoMessage) => { + // No message received + tracing::debug!("No ZMQ message received, connection closed? Exiting worker"); + return Err(DapiError::ConnectionClosed); + } + Err(e) => { + debug!(error = %e, "Error receiving ZMQ message"); + return Err(DapiError::ZmqConnection(e)); + } + } + } + } + + /// Parse ZMQ message frames into events + fn parse_zmq_message(frames: Vec>) -> Option { + tracing::trace!(frames_count = frames.len(), "Parsing new ZMQ message"); + if frames.len() < 2 { + return None; + } + + let topic = String::from_utf8_lossy(&frames[0]); + let data = frames[1].clone(); + + match topic.as_ref() { + "rawtx" => Some(ZmqEvent::RawTransaction { data }), + "rawblock" => Some(ZmqEvent::RawBlock { data }), + "rawtxlocksig" => { + tracing::trace!( + data = hex::encode(&data), + "Parsing rawtxlocksig ZMQ message" + ); + let (tx_bytes, lock_bytes_opt) = split_tx_and_lock(data); + if let Some(lock_bytes) = lock_bytes_opt + && !lock_bytes.is_empty() + { + Some(ZmqEvent::RawTransactionLock { + tx_bytes, + lock_bytes, + }) + } else { + debug!("rawtxlocksig payload missing instant lock bytes"); + None + } + } + // We ignore rawtxlock, we need rawtxlocksig only + // "rawtxlock" => Some(ZmqEvent::RawTransactionLock { data }), + "rawchainlocksig" => Some(ZmqEvent::RawChainLock { data }), + // Some Core builds emit rawchainlock without signature suffix + "rawchainlock" => Some(ZmqEvent::RawChainLock { data }), + "hashblock" => Some(ZmqEvent::HashBlock { hash: data }), + _ => { + debug!("Unknown ZMQ topic: {}", topic); + None + } + } + } +} + +fn split_tx_and_lock(data: Vec) -> (Option>, Option>) { + let mut cursor = Cursor::new(data.as_slice()); + match CoreTransaction::consensus_decode(&mut cursor) { + Ok(_) => { + let consumed = cursor.position() as usize; + if consumed >= data.len() { + // Transaction consumed all bytes, no lock data present + (Some(data), None) + } else { + let lock_bytes = data[consumed..].to_vec(); + let tx_bytes = data[..consumed].to_vec(); + (Some(tx_bytes), Some(lock_bytes)) + } + } + Err(_) => (None, Some(data)), + } +} + +impl Drop for ZmqListener { + fn drop(&mut self) { + // Cancel all running tasks when dropped + self.cancel.cancel(); + } +} + +/// ZMQ dispatcher that receives messages from the socket and forwards them +/// to the provided sender (usually ZmqListener). +struct ZmqDispatcher { + socket: SubSocket, + /// Sender to forward received ZMQ messages, consumed by [ZmqConnection::recv] + zmq_tx: mpsc::Sender, + /// Cancellation token to stop all spawned threads; cancelled when the connection is lost + cancel: CancellationToken, + connected: Arc, + /// Time of last received message, in seconds since [START_TIME] + last_recv: Arc, +} + +impl ZmqDispatcher { + /// Create a new ZmqDispatcher + fn spawn(self, workers: &Workers) { + let cancel = self.cancel.clone(); + workers.spawn(with_cancel(cancel, self.dispatcher_worker())); + } + + /// Receive messages from the ZMQ socket and dispatch them to the provided sender. + /// It also supports connection health monitoring. + async fn dispatcher_worker(mut self) -> DAPIResult<()> { + let mut interval_10s = tokio::time::interval(Duration::from_secs(10)); + interval_10s.reset(); + + loop { + select! { + msg = self.socket.recv() => { + match msg { + Ok(msg) => + { + if let Err(e) = self.zmq_tx.send(msg).await { + debug!(error = %e, "Error sending ZMQ event to receiver, receiver may have exited"); + // receiver exited? I think it is fatal, we exit as it makes no sense to continue + self.connected.store(false, Ordering::SeqCst); + self.cancel.cancel(); + return Err(DapiError::ClientGone("ZMQ receiver exited".to_string())); + } else { + // update last received timestamp + self.last_recv_update(); + } + }, + Err(e) => { + debug!(error = %e, "Error receiving ZMQ message, restarting connection"); + // most likely the connection is lost, we exit as this will abort the task anyway + self.connected.store(false, Ordering::SeqCst); + self.cancel.cancel(); + + return Err(DapiError::ConnectionClosed); + } + } + } + _ = interval_10s.tick() => { + self.tick_event_10s().await; + } + }; + } + } + + /// Event that happens every ten seconds to check connection status + async fn tick_event_10s(&mut self) { + // if we have received a message in less than 10s, we are connected + if self.last_recv_elapsed() < Duration::from_secs(10) { + self.connected.store(true, Ordering::SeqCst); + return; + } + + // fallback to subscribing to some dummy `ping` topic. + // This is a hack to ensure the connection is alive, as the monitor fails to notify us about disconnects. + let current_status = self.socket.subscribe("ping").await.is_ok(); + // Unsubscribe immediately to avoid resource waste + self.socket + .unsubscribe("ping") + .await + .inspect_err(|e| { + debug!(error = %e, "Error unsubscribing from ping topic during health check"); + }) + .ok(); + + // If the status changed, log it + let previous_status = self.connected.swap(current_status, Ordering::SeqCst); + if current_status != previous_status { + if current_status { + debug!("ZMQ connection recovered"); + } else { + debug!("ZMQ connection is lost, connection will be restarted"); + // disconnect the socket + self.cancel.cancel(); + } + } + + // if we are connected, we assume last_recv is now + if current_status { + self.last_recv_update(); + } + } + + /// Get duration since last received message. + /// Defaults to [START_TIME] on error. + fn last_recv_elapsed(&self) -> Duration { + let now = Instant::now(); + let start_time = *START_TIME; + + let last_recv_secs = self.last_recv.load(Ordering::Relaxed); + let last_recv = START_TIME + .checked_add(Duration::from_secs(last_recv_secs)) + .unwrap_or_else(|| { + tracing::warn!(?start_time, ?now, "zmq last receive time out of bounds"); + *START_TIME + }); + + now.duration_since(last_recv) + } + + /// Update the last received timestamp + fn last_recv_update(&self) { + let duration = Instant::now().duration_since(*START_TIME); + + self.last_recv.store(duration.as_secs(), Ordering::Relaxed); + } +} + +/// Helper function to run a future with cancellation support. +async fn with_cancel( + cancel: CancellationToken, + future: impl Future>, +) -> DAPIResult { + select! { + _ = cancel.cancelled() => { + debug!("Cancelled before future completed"); + Err(DapiError::ConnectionClosed) + } + result = future => result, + } +} + +#[cfg(test)] +mod tests { + use super::split_tx_and_lock; + use super::*; + use dpp::dashcore::consensus::Decodable; + use dpp::dashcore::{InstantLock, Transaction}; + use hex::FromHex; + + #[test] + fn test_zmq_topics_default() { + let topics = ZmqTopics::default(); + assert_eq!(topics.rawtx, "rawtx"); + assert_eq!(topics.rawblock, "rawblock"); + } + + #[tokio::test] + async fn test_zmq_listener_creation() { + let listener = ZmqListener::new("tcp://127.0.0.1:28332").unwrap(); + assert_eq!(listener.zmq_uri, "tcp://127.0.0.1:28332"); + } + + #[test] + fn split_tx_and_lock_extracts_components() { + let hex_bytes = "03000800014d6d36c50d484aa79f7db080f971c3f6845407f652c7d5865756017fa06969c1010000006a47304402200136894a2ebb4967cf2766c10e238d69c53c24bf330758e4432eb4753def03de02202a2afb05475a064a419a6cc1c582e3504fcb36c2e22b610b5d320f7656573f7f0121028fdb0a3f730bb20f477536d98ca830efa56412dd05992c801219ba0ff35ad530ffffffff028801030000000000026a00288d9500000000001976a9148d40dfe30494080a1c1187c74066956043ff13fb88ac0000000024010188010300000000001976a914aa85a9fb4f84bc63046a574ac4f2ce3361f0db0d88ac01014d6d36c50d484aa79f7db080f971c3f6845407f652c7d5865756017fa06969c1010000008155cc5d9fe5da3b0508c28d02c88fb6d3d4cf44ef4ffcd77162afa338d1a181ad7300e92255a7a7cf031d6de6bac99df9f1b94735ea603b3f03060c3ebf1f37acc4c1d8ddea77f3d4d816e467571f51ae216715fb3e47d68831adeee6aa1640b26cdf085bb8dd0b4920d15eed83e8c50de8b4b0508db47f08451f7807194d68758a92b367ef6074b516336f689c75c5e22b87aa71d50157875f1018a305a957"; + let data = Vec::from_hex(hex_bytes).expect("hex should decode"); + + let (tx_bytes, lock_bytes) = split_tx_and_lock(data); + + assert!(tx_bytes.is_some(), "transaction bytes should be extracted"); + // Parse tx_bytes to ensure it's valid + let tx = Transaction::consensus_decode(&mut Cursor::new(tx_bytes.as_ref().unwrap())) + .expect("transaction bytes should decode"); + assert_eq!(tx.version, 3, "transaction version should be 3"); + + // Parse lock_bytes to ensure it's valid + assert!( + lock_bytes.as_ref().is_some_and(|b| !b.is_empty()), + "instant lock bytes should be present for rawtxlocksig payloads" + ); + InstantLock::consensus_decode(&mut Cursor::new(lock_bytes.as_ref().unwrap())) + .expect("instant asset lock should be correct"); + } +} diff --git a/packages/rs-dapi/src/sync.rs b/packages/rs-dapi/src/sync.rs new file mode 100644 index 00000000000..5b04fcea103 --- /dev/null +++ b/packages/rs-dapi/src/sync.rs @@ -0,0 +1,362 @@ +use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}; +use tokio::sync::{Mutex, Notify, OnceCell, oneshot}; +use tokio::task::{AbortHandle, JoinError, JoinSet}; + +use crate::{DapiError, metrics}; + +/// Boxed worker future accepted by the worker manager task. +type WorkerTask = Pin> + Send>>; + +/// Guard that keeps worker metrics and counters balanced. +struct WorkerMetricsGuard { + task_count: Arc, +} + +impl WorkerMetricsGuard { + /// Increase the active worker metric and return a guard that will decrement on drop. + fn new(task_count: Arc) -> Self { + metrics::workers_active_inc(); + task_count.fetch_add(1, Ordering::SeqCst); + Self { task_count } + } +} + +impl Drop for WorkerMetricsGuard { + /// Decrease the active worker metric when the guard leaves scope. + fn drop(&mut self) { + metrics::workers_active_dec(); + self.task_count.fetch_sub(1, Ordering::SeqCst); + } +} + +/// Async worker pool for managing background tasks. +/// +/// The pool uses a command pattern: [`Workers`] handles send spawn requests +/// to a [`WorkerManager`] task that owns a [`JoinSet`]. The manager continuously +/// drains completed tasks and returns [`AbortHandle`]s to callers via oneshot channels. + +#[derive(Clone)] +pub struct Workers { + inner: Arc, +} + +/// Internal state shared with the worker manager task. +struct WorkersInner { + sender: UnboundedSender, + task_count: Arc, +} + +/// Request sent to the manager describing a worker spawn operation. +enum WorkerCommand { + Spawn { + task: WorkerTask, + response: oneshot::Sender, + }, +} + +/// Debug implementation that reports the number of active workers. +impl Debug for Workers { + /// Display the number of active worker tasks. + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let workers = self.inner.task_count.load(Ordering::SeqCst) as i64; + write!(f, "Workers {{ num_workers: {workers} }}") + } +} + +impl Workers { + /// Create a new worker pool backed by a shared `JoinSet`. + pub fn new() -> Self { + let task_count = Arc::new(AtomicUsize::new(0)); + let (sender, receiver) = unbounded_channel(); + WorkerManager::spawn(receiver); + Self { + inner: Arc::new(WorkersInner { sender, task_count }), + } + } + + /// Spawn a new task into the join set while tracking metrics and error conversion. + pub fn spawn(&self, fut: F) -> WorkerTaskHandle + where + F: Future> + Send + 'static, + O: Send + 'static, + E: Debug + Into + Send + 'static, + { + let task_count = self.inner.task_count.clone(); + let metrics_guard = WorkerMetricsGuard::new(task_count); + let task = async move { + let _metrics_guard = metrics_guard; + match fut.await { + Ok(_) => Ok(()), + Err(e) => Err(e.into()), + } + }; + + let (response_tx, response_rx) = oneshot::channel(); + let handle = WorkerTaskHandle::new(response_rx); + + if let Err(err) = self.inner.sender.send(WorkerCommand::Spawn { + task: Box::pin(task), + response: response_tx, + }) { + tracing::error!(error=?err, "Failed to dispatch worker task to manager"); + handle.notify_failure(); + } + + handle + } +} + +impl Default for Workers { + /// Construct a new worker pool using the default configuration. + fn default() -> Self { + Self::new() + } +} + +/// Provides a lazy abort handle for a spawned worker task. +pub struct WorkerTaskHandle { + inner: Arc, +} + +/// Shared handshake state between worker handles and the manager. +struct WorkerTaskHandleInner { + handle: OnceCell>, + receiver: Mutex>>, + notify: Notify, +} + +impl WorkerTaskHandle { + /// Create a handle that waits for the manager to return an abort handle. + fn new(receiver: oneshot::Receiver) -> Self { + let inner = WorkerTaskHandleInner { + handle: OnceCell::new(), + receiver: Mutex::new(Some(receiver)), + notify: Notify::new(), + }; + Self { + inner: Arc::new(inner), + } + } + + /// Notify any waiters that the spawn request could not be fulfilled. + fn notify_failure(&self) { + if self.inner.handle.set(Err(())).is_ok() { + self.inner.notify.notify_waiters(); + } + } + + /// Abort the background task once its handle becomes available. + pub async fn abort(&self) { + if let Some(handle) = self.get_handle().await { + handle.abort(); + } + } + + /// Fetch the abort handle from the manager, waiting if necessary. + async fn get_handle(&self) -> Option { + if let Some(result) = self.inner.handle.get() { + return result.clone().ok(); + } + + if let Some(receiver) = self.take_receiver().await { + let outcome = receiver.await.map_err(|_| ()); + match &outcome { + Ok(handle) => { + let _ = self.inner.handle.set(Ok(handle.clone())); + } + Err(_) => { + let _ = self.inner.handle.set(Err(())); + } + } + self.inner.notify.notify_waiters(); + return outcome.ok(); + } + + self.inner.notify.notified().await; + self.inner + .handle + .get() + .and_then(|result| result.clone().ok()) + } + + /// Remove the pending receiver so only one waiter consumes the response. + async fn take_receiver(&self) -> Option> { + let mut guard = self.inner.receiver.lock().await; + guard.take() + } +} + +/// Task that owns the JoinSet and coordinates worker execution. +struct WorkerManager { + receiver: UnboundedReceiver, +} + +impl WorkerManager { + /// Start a background manager that processes worker commands. + fn spawn(receiver: UnboundedReceiver) { + tokio::spawn(async move { + Self { receiver }.run().await; + }); + } + + /// Main event loop: accept work and join completed tasks. + async fn run(mut self) { + let mut join_set = JoinSet::new(); + + loop { + if join_set.is_empty() { + match self.receiver.recv().await { + Some(WorkerCommand::Spawn { task, response }) => { + let abort_handle = join_set.spawn(task); + let _ = response.send(abort_handle); + } + None => break, + } + } else { + tokio::select! { + cmd = self.receiver.recv() => { + match cmd { + Some(WorkerCommand::Spawn { task, response }) => { + let abort_handle = join_set.spawn(task); + let _ = response.send(abort_handle); + } + None => break, + } + } + join_result = join_set.join_next() => { + if let Some(result) = join_result { + Self::handle_result(result); + } + } + } + } + } + + while let Some(result) = join_set.join_next().await { + Self::handle_result(result); + } + } + + /// Handle task completion results, emitting appropriate logs. + fn handle_result(result: Result, JoinError>) { + match result { + Ok(Ok(())) => {} + Ok(Err(error)) => { + tracing::error!(error=?error, "Worker task exited with error"); + } + Err(join_error) if join_error.is_cancelled() => { + tracing::debug!("Worker task cancelled during shutdown"); + } + Err(join_error) => { + tracing::error!(error=?join_error, "Worker task panicked or failed to join"); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::Arc; + use std::sync::atomic::Ordering; + use tokio::sync::{Notify, oneshot}; + use tokio::time::{Duration, sleep, timeout}; + + struct DropGuard(Option>); + + impl Drop for DropGuard { + fn drop(&mut self) { + if let Some(tx) = self.0.take() { + let _ = tx.send(()); + } + } + } + + async fn wait_for_active_count(workers: &Workers, expected: usize) { + for _ in 0..50 { + if workers.inner.task_count.load(Ordering::SeqCst) == expected { + return; + } + sleep(Duration::from_millis(10)).await; + } + panic!( + "active worker count did not reach {expected}, last value {}", + workers.inner.task_count.load(Ordering::SeqCst) + ); + } + + #[tokio::test(flavor = "multi_thread")] + async fn worker_executes_task_and_cleans_up() { + let workers = Workers::new(); + let (tx, rx) = oneshot::channel(); + + workers.spawn(async move { + let _ = tx.send(()); + Ok::<(), DapiError>(()) + }); + + timeout(Duration::from_secs(1), rx) + .await + .expect("worker did not run") + .expect("worker task dropped sender"); + + wait_for_active_count(&workers, 0).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn aborting_worker_cancels_future() { + let workers = Workers::new(); + let (drop_tx, drop_rx) = oneshot::channel(); + let notify = Arc::new(Notify::new()); + let ready = Arc::new(Notify::new()); + let ready_wait = ready.notified(); + + let worker_notify = notify.clone(); + let worker_ready = ready.clone(); + let handle = workers.spawn(async move { + let _guard = DropGuard(Some(drop_tx)); + worker_ready.notify_one(); + worker_notify.notified().await; + Ok::<(), DapiError>(()) + }); + + timeout(Duration::from_secs(1), ready_wait) + .await + .expect("worker did not signal readiness"); + + timeout(Duration::from_secs(1), handle.abort()) + .await + .expect("abort timed out"); + + timeout(Duration::from_secs(1), drop_rx) + .await + .expect("worker did not drop after abort") + .expect("drop receiver cancelled"); + + wait_for_active_count(&workers, 0).await; + } + + #[tokio::test(flavor = "multi_thread")] + async fn worker_error_still_clears_active_count() { + let workers = Workers::new(); + let (tx, rx) = oneshot::channel(); + + workers.spawn(async move { + let _ = tx.send(()); + Err::<(), DapiError>(DapiError::Internal("boom".into())) + }); + + timeout(Duration::from_secs(1), rx) + .await + .expect("worker did not run") + .expect("worker task dropped sender"); + + wait_for_active_count(&workers, 0).await; + } +} diff --git a/packages/rs-dapi/src/utils.rs b/packages/rs-dapi/src/utils.rs new file mode 100644 index 00000000000..35a7a90ed2c --- /dev/null +++ b/packages/rs-dapi/src/utils.rs @@ -0,0 +1,163 @@ +use serde::Deserialize; +use serde::de::{Error as DeError, Visitor}; +use serde_json::Value; +use std::fmt; +use std::marker::PhantomData; +use std::str::FromStr; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +static JSONRPC_ID_COUNTER: AtomicU64 = AtomicU64::new(0); + +pub fn generate_jsonrpc_id() -> String { + let elapsed = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)); + let timestamp_ns = elapsed.as_nanos(); + let pid = std::process::id(); + let counter = JSONRPC_ID_COUNTER.fetch_add(1, Ordering::Relaxed); + + format!("{timestamp_ns}-{pid}-{counter}") +} + +pub fn deserialize_string_or_number<'de, D, T>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, + T: FromStr, + ::Err: fmt::Display, +{ + struct StringOrNumberVisitor(PhantomData); + + impl<'de, T> Visitor<'de> for StringOrNumberVisitor + where + T: FromStr, + ::Err: fmt::Display, + { + type Value = T; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string, integer, float, or boolean") + } + + fn visit_str(self, v: &str) -> Result + where + E: DeError, + { + T::from_str(v).map_err(|e| DeError::custom(format!("invalid value: {}", e))) + } + + fn visit_string(self, v: String) -> Result + where + E: DeError, + { + self.visit_str(&v) + } + + fn visit_u64(self, v: u64) -> Result + where + E: DeError, + { + self.visit_string(v.to_string()) + } + + fn visit_i64(self, v: i64) -> Result + where + E: DeError, + { + self.visit_string(v.to_string()) + } + + fn visit_f64(self, v: f64) -> Result + where + E: DeError, + { + self.visit_string(v.to_string()) + } + + fn visit_bool(self, v: bool) -> Result + where + E: DeError, + { + self.visit_string(v.to_string()) + } + } + + deserializer.deserialize_any(StringOrNumberVisitor(PhantomData)) +} + +pub fn deserialize_to_string<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + struct ToStringVisitor; + + impl<'de> Visitor<'de> for ToStringVisitor { + type Value = String; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string, integer, float, or boolean") + } + + fn visit_str(self, v: &str) -> Result + where + E: DeError, + { + Ok(v.to_string()) + } + + fn visit_string(self, v: String) -> Result + where + E: DeError, + { + Ok(v) + } + + fn visit_u64(self, v: u64) -> Result + where + E: DeError, + { + Ok(v.to_string()) + } + + fn visit_i64(self, v: i64) -> Result + where + E: DeError, + { + Ok(v.to_string()) + } + + fn visit_f64(self, v: f64) -> Result + where + E: DeError, + { + Ok(v.to_string()) + } + + fn visit_bool(self, v: bool) -> Result + where + E: DeError, + { + Ok(v.to_string()) + } + } + + deserializer.deserialize_any(ToStringVisitor) +} + +pub fn deserialize_string_number_or_null<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let value = Option::::deserialize(deserializer)?; + + match value { + None | Some(Value::Null) => Ok(String::new()), + Some(Value::String(s)) => Ok(s), + Some(Value::Number(n)) => Ok(n.to_string()), + Some(Value::Bool(b)) => Ok(b.to_string()), + Some(other) => Err(DeError::custom(format!( + "expected string, number, bool, or null but got {}", + other + ))), + } +} diff --git a/packages/rs-dash-event-bus/Cargo.toml b/packages/rs-dash-event-bus/Cargo.toml new file mode 100644 index 00000000000..266286a87a7 --- /dev/null +++ b/packages/rs-dash-event-bus/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "rs-dash-event-bus" +version = "2.1.0-pr.2716.1" +edition = "2024" +license = "MIT" +description = "Shared event bus and Platform events multiplexer for Dash Platform (rs-dapi, rs-drive-abci, rs-sdk)" + +[lib] +name = "dash_event_bus" +path = "src/lib.rs" + +[features] +default = [] +metrics = ["dep:metrics"] + +[dependencies] +tokio = { version = "1", features = ["rt", "macros", "sync", "time"] } +tracing = "0.1" + + +# Optional metrics +metrics = { version = "0.24.2", optional = true } + +[dev-dependencies] +tokio = { version = "1", features = [ + "rt-multi-thread", + "macros", + "sync", + "time", +] } diff --git a/packages/rs-dash-event-bus/src/event_bus.rs b/packages/rs-dash-event-bus/src/event_bus.rs new file mode 100644 index 00000000000..640ba838a8e --- /dev/null +++ b/packages/rs-dash-event-bus/src/event_bus.rs @@ -0,0 +1,528 @@ +//! Generic, clonable in-process event bus with pluggable filtering. + +use std::collections::BTreeMap; +use std::fmt::Debug; +use std::sync::Arc; +use std::sync::atomic::{AtomicU64, Ordering}; + +use tokio::sync::mpsc::error::TrySendError; +use tokio::sync::{Mutex, RwLock, mpsc}; + +const DEFAULT_SUBSCRIPTION_CAPACITY: usize = 256; + +/// Filter trait for event matching on a specific event type. +pub trait Filter: Send + Sync { + /// Return true if the event matches the filter. + fn matches(&self, event: &E) -> bool; +} + +/// Internal subscription structure. +/// +/// Note: no Clone impl, so that dropping the sender closes the channel. +struct Subscription { + filter: F, + sender: mpsc::Sender, +} + +/// Generic, clonable in-process event bus with pluggable filtering. +pub struct EventBus { + subs: Arc>>>, + counter: Arc, + tasks: Arc>>, // tasks spawned for this subscription, cancelled on drop + channel_capacity: usize, +} + +impl Clone for EventBus { + fn clone(&self) -> Self { + Self { + subs: Arc::clone(&self.subs), + counter: Arc::clone(&self.counter), + tasks: Arc::clone(&self.tasks), + channel_capacity: self.channel_capacity, + } + } +} + +impl Default for EventBus +where + E: Clone + Send + 'static, + F: Filter + Send + Sync + Debug + 'static, +{ + fn default() -> Self { + Self::new() + } +} + +impl EventBus { + /// Remove a subscription by id and update metrics. + pub async fn remove_subscription(&self, id: u64) { + let mut subs = self.subs.write().await; + if subs.remove(&id).is_some() { + metrics_unsubscribe_inc(); + metrics_active_gauge_set(subs.len()); + tracing::debug!("event_bus: removed subscription id={}", id); + } else { + tracing::debug!("event_bus: subscription id={} not found, not removed", id); + } + } +} + +impl EventBus +where + E: Clone + Send + 'static, + F: Filter + Debug + Send + Sync + 'static, +{ + /// Create a new, empty event bus. + pub fn new() -> Self { + Self::with_capacity(DEFAULT_SUBSCRIPTION_CAPACITY) + } + + /// Create a new event bus with a custom per-subscription channel capacity. + pub fn with_capacity(capacity: usize) -> Self { + metrics_register_once(); + Self { + subs: Arc::new(RwLock::new(BTreeMap::new())), + counter: Arc::new(AtomicU64::new(0)), + tasks: Arc::new(Mutex::new(tokio::task::JoinSet::new())), + channel_capacity: capacity.max(1), + } + } + + /// Add a new subscription using the provided filter. + pub async fn add_subscription(&self, filter: F) -> SubscriptionHandle { + tracing::trace!(?filter, "event_bus: adding subscription"); + + let id = self.counter.fetch_add(1, Ordering::SeqCst); + let (tx, rx) = mpsc::channel::(self.channel_capacity); + + let sub = Subscription { filter, sender: tx }; + + { + let mut subs = self.subs.write().await; + subs.insert(id, sub); + metrics_active_gauge_set(subs.len()); + metrics_subscribe_inc(); + } + tracing::debug!(sub_id = id, "event_bus: added subscription"); + + SubscriptionHandle { + id, + rx: Arc::new(Mutex::new(rx)), + drop: true, + event_bus: self.clone(), + } + } + + /// Publish an event to all subscribers whose filters match, using + /// the current Tokio runtime if available, otherwise log a warning. + /// + /// This is a best-effort, fire-and-forget variant of `notify`. + pub fn notify_sync(&self, event: E) { + let bus = self.clone(); + if let Ok(handle) = tokio::runtime::Handle::try_current() { + handle.spawn(async move { + bus.notify(event).await; + }); + } else { + tracing::warn!("event_bus.notify_sync: no current tokio runtime"); + } + } + + /// Publish an event to all subscribers whose filters match. + pub async fn notify(&self, event: E) { + metrics_events_published_inc(); + + let mut targets = Vec::new(); + { + let subs_guard = self.subs.read().await; + for (id, sub) in subs_guard.iter() { + if sub.filter.matches(&event) { + targets.push((*id, sub.sender.clone())); + } + } + } + + if targets.is_empty() { + return; + } + + let mut dead = Vec::new(); + + for (id, sender) in targets.into_iter() { + let payload = event.clone(); + + match sender.try_send(payload) { + Ok(()) => { + metrics_events_delivered_inc(); + tracing::trace!(subscription_id = id, "event_bus: event delivered"); + } + Err(TrySendError::Full(_value)) => { + metrics_events_dropped_inc(); + tracing::warn!( + subscription_id = id, + "event_bus: subscriber queue full, removing laggy subscriber to protect others" + ); + // Drop the event for this subscriber and remove subscription + dead.push(id); + } + Err(TrySendError::Closed(_value)) => { + metrics_events_dropped_inc(); + dead.push(id); + } + } + } + + for id in dead { + tracing::debug!( + subscription_id = id, + "event_bus: removing dead subscription" + ); + self.remove_subscription(id).await; + } + } + + /// Get the current number of active subscriptions. + pub async fn subscription_count(&self) -> usize { + self.subs.read().await.len() + } + + /// Copy all event messages from an unbounded mpsc receiver into the event bus. + pub async fn copy_from_unbounded_mpsc(&self, mut rx: mpsc::UnboundedReceiver) { + let bus = self.clone(); + let mut tasks = self.tasks.lock().await; + tasks.spawn(async move { + while let Some(event) = rx.recv().await { + bus.notify(event).await; + } + }); + } +} + +/// RAII subscription handle; dropping the last clone removes the subscription. +pub struct SubscriptionHandle +where + E: Send + 'static, + F: Send + Sync + 'static, +{ + id: u64, + rx: Arc>>, + event_bus: EventBus, + drop: bool, // true only for primary handles +} + +impl Clone for SubscriptionHandle +where + E: Send + 'static, + F: Send + Sync + 'static, +{ + fn clone(&self) -> Self { + Self { + id: self.id, + rx: Arc::clone(&self.rx), + event_bus: self.event_bus.clone(), + drop: self.drop, + } + } +} + +impl SubscriptionHandle +where + E: Send + 'static, + F: Send + Sync + 'static, +{ + /// Get the unique ID of this subscription. + pub fn id(&self) -> u64 { + self.id + } + + /// Receive the next event for this subscription. + pub async fn recv(&self) -> Option { + let mut rx = self.rx.lock().await; + rx.recv().await + } + + /// Disable automatic unsubscription when the last handle is dropped. + /// + /// By default, dropping the final [`SubscriptionHandle`] removes the + /// subscription from the [`EventBus`]. Calling this method keeps the + /// subscription registered so that the caller can explicitly remove it + /// via [`EventBus::remove_subscription`]. + pub fn no_unsubscribe_on_drop(mut self) -> Self { + self.drop = false; + self + } +} + +impl Drop for SubscriptionHandle +where + E: Send + 'static, + F: Send + Sync + 'static, +{ + fn drop(&mut self) { + if self.drop { + // Remove only when the last clone of this handle is dropped. + // As we are in a Drop impl, strong_count == 1 means that it cannot be cloned anymore, + // so no race condition is possible. + if Arc::strong_count(&self.rx) == 1 { + let bus = self.event_bus.clone(); + let id = self.id; + + // Prefer removing via Tokio if a runtime is available + if let Ok(handle) = tokio::runtime::Handle::try_current() { + handle.spawn(async move { + bus.remove_subscription(id).await; + tracing::trace!("event_bus: removed subscription id={} on drop", id); + }); + } else { + // Fallback: best-effort synchronous removal using try_write() + tracing::debug!( + "event_bus: no current tokio runtime, not removing subscription id={}", + id + ); + } + } + } + } +} + +// ---- Metrics helpers (gated) ---- + +#[cfg(feature = "metrics")] +mod met { + use metrics::{counter, describe_counter, describe_gauge, gauge}; + use std::sync::Once; + + pub const ACTIVE_SUBSCRIPTIONS: &str = "event_bus_active_subscriptions"; + pub const SUBSCRIBE_TOTAL: &str = "event_bus_subscribe_total"; + pub const UNSUBSCRIBE_TOTAL: &str = "event_bus_unsubscribe_total"; + pub const EVENTS_PUBLISHED_TOTAL: &str = "event_bus_events_published_total"; + pub const EVENTS_DELIVERED_TOTAL: &str = "event_bus_events_delivered_total"; + pub const EVENTS_DROPPED_TOTAL: &str = "event_bus_events_dropped_total"; + + pub fn register_metrics_once() { + static ONCE: Once = Once::new(); + ONCE.call_once(|| { + describe_gauge!( + ACTIVE_SUBSCRIPTIONS, + "Current number of active event bus subscriptions" + ); + describe_counter!( + SUBSCRIBE_TOTAL, + "Total subscriptions created on the event bus" + ); + describe_counter!( + UNSUBSCRIBE_TOTAL, + "Total subscriptions removed from the event bus" + ); + describe_counter!( + EVENTS_PUBLISHED_TOTAL, + "Total events published to the event bus" + ); + describe_counter!( + EVENTS_DELIVERED_TOTAL, + "Total events delivered to subscribers" + ); + describe_counter!( + EVENTS_DROPPED_TOTAL, + "Total events dropped due to dead subscribers" + ); + }); + } + + pub fn active_gauge_set(n: usize) { + gauge!(ACTIVE_SUBSCRIPTIONS).set(n as f64); + } + pub fn subscribe_inc() { + counter!(SUBSCRIBE_TOTAL).increment(1); + } + pub fn unsubscribe_inc() { + counter!(UNSUBSCRIBE_TOTAL).increment(1); + } + pub fn events_published_inc() { + counter!(EVENTS_PUBLISHED_TOTAL).increment(1); + } + pub fn events_delivered_inc() { + counter!(EVENTS_DELIVERED_TOTAL).increment(1); + } + pub fn events_dropped_inc() { + counter!(EVENTS_DROPPED_TOTAL).increment(1); + } +} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_register_once() { + met::register_metrics_once() +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_register_once() {} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_active_gauge_set(n: usize) { + met::active_gauge_set(n) +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_active_gauge_set(_n: usize) {} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_subscribe_inc() { + met::subscribe_inc() +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_subscribe_inc() {} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_unsubscribe_inc() { + met::unsubscribe_inc() +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_unsubscribe_inc() {} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_events_published_inc() { + met::events_published_inc() +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_events_published_inc() {} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_events_delivered_inc() { + met::events_delivered_inc() +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_events_delivered_inc() {} + +#[cfg(feature = "metrics")] +#[inline] +fn metrics_events_dropped_inc() { + met::events_dropped_inc() +} +#[cfg(not(feature = "metrics"))] +#[inline] +fn metrics_events_dropped_inc() {} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::time::{Duration, timeout}; + + #[derive(Clone, Debug, PartialEq)] + enum Evt { + Num(u32), + } + + #[derive(Clone, Debug)] + struct EvenOnly; + + impl Filter for EvenOnly { + fn matches(&self, e: &Evt) -> bool { + matches!(e, Evt::Num(n) if n % 2 == 0) + } + } + + #[tokio::test] + async fn basic_subscribe_and_notify() { + let bus: EventBus = EventBus::new(); + let sub = bus.add_subscription(EvenOnly).await; + + bus.notify(Evt::Num(1)).await; // filtered out + bus.notify(Evt::Num(2)).await; // delivered + + let got = timeout(Duration::from_millis(200), sub.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(got, Evt::Num(2)); + } + + #[tokio::test] + async fn drop_removes_subscription() { + let bus: EventBus = EventBus::new(); + let sub = bus.add_subscription(EvenOnly).await; + assert_eq!(bus.subscription_count().await, 1); + drop(sub); + + for _ in 0..10 { + if bus.subscription_count().await == 0 { + break; + } + tokio::time::sleep(Duration::from_millis(20)).await; + } + assert_eq!(bus.subscription_count().await, 0); + } + + #[tokio::test] + async fn multiple_events_delivered() { + let bus: EventBus = EventBus::new(); + let sub = bus.add_subscription(EvenOnly).await; + + bus.notify(Evt::Num(2)).await; + bus.notify(Evt::Num(12)).await; + + let a = timeout(Duration::from_millis(200), sub.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(a, Evt::Num(2)); + let b = timeout(Duration::from_millis(200), sub.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(b, Evt::Num(12)); + } + + #[tokio::test] + async fn no_unsubscribe_on_drop_allows_manual_cleanup() { + let bus: EventBus = EventBus::new(); + let handle = bus + .add_subscription(EvenOnly) + .await + .no_unsubscribe_on_drop(); + let id = handle.id(); + + drop(handle); + // Automatic removal should not happen + assert_eq!(bus.subscription_count().await, 1); + + bus.remove_subscription(id).await; + assert_eq!(bus.subscription_count().await, 0); + } + + #[tokio::test] + async fn unsubscribe() { + let bus: EventBus = EventBus::new(); + let sub = bus.add_subscription(EvenOnly).await; + + bus.notify(Evt::Num(2)).await; + bus.notify(Evt::Num(12)).await; + + bus.remove_subscription(sub.id()).await; + + bus.notify(Evt::Num(3)).await; // not delivered as we already unsubscribed + + let a = timeout(Duration::from_millis(200), sub.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(a, Evt::Num(2)); + let b = timeout(Duration::from_millis(200), sub.recv()) + .await + .unwrap() + .unwrap(); + assert_eq!(b, Evt::Num(12)); + + let c = timeout(Duration::from_millis(200), sub.recv()).await; + assert!(c.unwrap().is_none(), "only two events should be received",); + } +} diff --git a/packages/rs-dash-event-bus/src/lib.rs b/packages/rs-dash-event-bus/src/lib.rs new file mode 100644 index 00000000000..ab950920409 --- /dev/null +++ b/packages/rs-dash-event-bus/src/lib.rs @@ -0,0 +1,8 @@ +//! rs-dash-event-bus: shared event bus and Platform events multiplexer +//! +//! - `event_bus`: generic in-process pub/sub with pluggable filtering +//! - `event_mux`: upstream bi-di gRPC multiplexer for Platform events + +pub mod event_bus; + +pub use event_bus::{EventBus, Filter, SubscriptionHandle}; diff --git a/packages/rs-dpp/Cargo.toml b/packages/rs-dpp/Cargo.toml index ca9c51ccafe..8547477c6a8 100644 --- a/packages/rs-dpp/Cargo.toml +++ b/packages/rs-dpp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dpp" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true authors = [ @@ -37,6 +37,7 @@ dashcore-rpc = { git = "https://github.com/dashpay/rust-dashcore", tag = "v0.40. env_logger = { version = "0.11" } getrandom = { version = "0.2", features = ["js"] } + hex = { version = "0.4" } integer-encoding = { version = "4.0.0" } itertools = { version = "0.13" } diff --git a/packages/rs-dpp/src/identity/get_biggest_possible_identity.rs b/packages/rs-dpp/src/identity/get_biggest_possible_identity.rs index 42ad0469d37..ac1fff4eb0b 100644 --- a/packages/rs-dpp/src/identity/get_biggest_possible_identity.rs +++ b/packages/rs-dpp/src/identity/get_biggest_possible_identity.rs @@ -1,9 +1,7 @@ -use getrandom::getrandom; - use crate::prelude::Identifier; fn generate_random_identifier_struct() -> Identifier { let mut buffer = [0u8; 32]; - let _ = getrandom(&mut buffer); + let _ = getrandom::getrandom(&mut buffer); Identifier::from_bytes(&buffer).unwrap() } diff --git a/packages/rs-dpp/src/tests/fixtures/get_dpns_document_fixture.rs b/packages/rs-dpp/src/tests/fixtures/get_dpns_document_fixture.rs index d68f1d0842a..9c406cb9e90 100644 --- a/packages/rs-dpp/src/tests/fixtures/get_dpns_document_fixture.rs +++ b/packages/rs-dpp/src/tests/fixtures/get_dpns_document_fixture.rs @@ -1,6 +1,5 @@ use std::collections::BTreeMap; -use getrandom::getrandom; use platform_value::{Identifier, Value}; use crate::document::document_factory::DocumentFactory; @@ -42,7 +41,7 @@ pub fn get_dpns_parent_document_fixture( let document_factory = DocumentFactory::new(protocol_version).expect("expected to get document factory"); let mut pre_order_salt = [0u8; 32]; - let _ = getrandom(&mut pre_order_salt); + let _ = getrandom::getrandom(&mut pre_order_salt); let normalized_label = convert_to_homograph_safe_chars(options.label.as_str()); @@ -94,7 +93,7 @@ pub fn get_dpns_parent_extended_document_fixture( let document_factory = DocumentFactory::new(protocol_version).expect("expected to get document factory"); let mut pre_order_salt = [0u8; 32]; - let _ = getrandom(&mut pre_order_salt); + let _ = getrandom::getrandom(&mut pre_order_salt); let normalized_label = convert_to_homograph_safe_chars(options.label.as_str()); diff --git a/packages/rs-dpp/src/tests/utils/mod.rs b/packages/rs-dpp/src/tests/utils/mod.rs index 3b7d14cb8f5..852a7d6f8b1 100644 --- a/packages/rs-dpp/src/tests/utils/mod.rs +++ b/packages/rs-dpp/src/tests/utils/mod.rs @@ -2,7 +2,6 @@ use anyhow::Result; use dashcore::block::Version; use dashcore::hashes::Hash; use dashcore::{Block, BlockHash, CompactTarget, Header, TxMerkleNode}; -use getrandom::getrandom; use platform_value::Value; #[cfg(test)] use serde_json::Value as JsonValue; @@ -60,7 +59,7 @@ where pub fn generate_random_identifier_struct() -> Identifier { let mut buffer = [0u8; 32]; - let _ = getrandom(&mut buffer); + getrandom::getrandom(&mut buffer).unwrap(); Identifier::from_bytes(&buffer).unwrap() } diff --git a/packages/rs-dpp/src/util/entropy_generator.rs b/packages/rs-dpp/src/util/entropy_generator.rs index 09b18017d3a..ad97f054594 100644 --- a/packages/rs-dpp/src/util/entropy_generator.rs +++ b/packages/rs-dpp/src/util/entropy_generator.rs @@ -1,6 +1,3 @@ -use anyhow::Context; -use getrandom::getrandom; - /// A way to provide external entropy generator. pub trait EntropyGenerator { fn generate(&self) -> anyhow::Result<[u8; 32]>; @@ -11,7 +8,8 @@ pub struct DefaultEntropyGenerator; impl EntropyGenerator for DefaultEntropyGenerator { fn generate(&self) -> anyhow::Result<[u8; 32]> { let mut buffer = [0u8; 32]; - getrandom(&mut buffer).context("generating entropy failed")?; + getrandom::getrandom(&mut buffer) + .map_err(|e| anyhow::anyhow!(format!("generating entropy failed: {}", e)))?; Ok(buffer) } } diff --git a/packages/rs-drive-abci/Cargo.toml b/packages/rs-drive-abci/Cargo.toml index a3a1803a8c4..2a3167f01d7 100644 --- a/packages/rs-drive-abci/Cargo.toml +++ b/packages/rs-drive-abci/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "drive-abci" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" authors = [ "Samuel Westrich ", "Ivan Shumkov ", diff --git a/packages/rs-drive-proof-verifier/Cargo.toml b/packages/rs-drive-proof-verifier/Cargo.toml index 845ae469129..8e771f65cf8 100644 --- a/packages/rs-drive-proof-verifier/Cargo.toml +++ b/packages/rs-drive-proof-verifier/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "drive-proof-verifier" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true diff --git a/packages/rs-drive/Cargo.toml b/packages/rs-drive/Cargo.toml index 43836ea9532..ac8a29ed262 100644 --- a/packages/rs-drive/Cargo.toml +++ b/packages/rs-drive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "drive" description = "Dash drive built on top of GroveDB" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" authors = [ "Samuel Westrich ", "Ivan Shumkov ", diff --git a/packages/rs-drive/src/query/conditions.rs b/packages/rs-drive/src/query/conditions.rs index 5f3f70e3668..4b5a7141b21 100644 --- a/packages/rs-drive/src/query/conditions.rs +++ b/packages/rs-drive/src/query/conditions.rs @@ -2051,7 +2051,7 @@ mod tests { let clause = WhereClause { field: "$revision".to_string(), operator: Equal, - value: Value::Float(3.14), + value: Value::Float(3.15), }; let res = clause.validate_against_schema(doc_type); assert!(res.is_err()); diff --git a/packages/rs-json-schema-compatibility-validator/Cargo.toml b/packages/rs-json-schema-compatibility-validator/Cargo.toml index 3633cb64080..9040c0d0b3f 100644 --- a/packages/rs-json-schema-compatibility-validator/Cargo.toml +++ b/packages/rs-json-schema-compatibility-validator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "json-schema-compatibility-validator" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true authors = ["Ivan Shumkov "] diff --git a/packages/rs-platform-serialization-derive/Cargo.toml b/packages/rs-platform-serialization-derive/Cargo.toml index e9e6cff571a..ea318b320c9 100644 --- a/packages/rs-platform-serialization-derive/Cargo.toml +++ b/packages/rs-platform-serialization-derive/Cargo.toml @@ -2,7 +2,7 @@ name = "platform-serialization-derive" authors = ["Samuel Westrich "] description = "Bincode serialization and deserialization derivations" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/rs-platform-serialization/Cargo.toml b/packages/rs-platform-serialization/Cargo.toml index b859f849926..142bd106e6b 100644 --- a/packages/rs-platform-serialization/Cargo.toml +++ b/packages/rs-platform-serialization/Cargo.toml @@ -2,7 +2,7 @@ name = "platform-serialization" authors = ["Samuel Westrich "] description = "Bincode based serialization and deserialization" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/rs-platform-value-convertible/Cargo.toml b/packages/rs-platform-value-convertible/Cargo.toml index 11fbbe2c8c6..19adad3f325 100644 --- a/packages/rs-platform-value-convertible/Cargo.toml +++ b/packages/rs-platform-value-convertible/Cargo.toml @@ -2,7 +2,7 @@ name = "platform-value-convertible" authors = ["Samuel Westrich "] description = "Convertion to and from platform values" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/rs-platform-value/Cargo.toml b/packages/rs-platform-value/Cargo.toml index cbc3f90e59c..bf6b0a5a5da 100644 --- a/packages/rs-platform-value/Cargo.toml +++ b/packages/rs-platform-value/Cargo.toml @@ -2,7 +2,7 @@ name = "platform-value" authors = ["Samuel Westrich "] description = "A simple value module" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/rs-platform-version/Cargo.toml b/packages/rs-platform-version/Cargo.toml index cc7a7d82a29..2675d3cec06 100644 --- a/packages/rs-platform-version/Cargo.toml +++ b/packages/rs-platform-version/Cargo.toml @@ -2,7 +2,7 @@ name = "platform-version" authors = ["Samuel Westrich "] description = "Versioning library for Platform" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/rs-platform-versioning/Cargo.toml b/packages/rs-platform-versioning/Cargo.toml index 70f4bfb9dee..7231354b5a3 100644 --- a/packages/rs-platform-versioning/Cargo.toml +++ b/packages/rs-platform-versioning/Cargo.toml @@ -2,7 +2,7 @@ name = "platform-versioning" authors = ["Samuel Westrich "] description = "Version derivation" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/rs-platform-wallet/Cargo.toml b/packages/rs-platform-wallet/Cargo.toml index 9856c29a8b4..8417dc1ae6c 100644 --- a/packages/rs-platform-wallet/Cargo.toml +++ b/packages/rs-platform-wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "platform-wallet" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" authors = ["Dash Core Team"] license = "MIT" @@ -29,4 +29,4 @@ indexmap = "2.0" default = ["bls", "eddsa", "manager"] bls = ["key-wallet/bls"] eddsa = ["key-wallet/eddsa"] -manager = ["key-wallet-manager"] \ No newline at end of file +manager = ["key-wallet-manager"] diff --git a/packages/rs-sdk-ffi/Cargo.toml b/packages/rs-sdk-ffi/Cargo.toml index 29b323bbf3a..679469384cc 100644 --- a/packages/rs-sdk-ffi/Cargo.toml +++ b/packages/rs-sdk-ffi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rs-sdk-ffi" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" authors = ["Dash Core Group "] edition = "2021" license = "MIT" @@ -9,12 +9,16 @@ description = "FFI bindings for Dash Platform SDK - C-compatible interface for c [lib] crate-type = ["staticlib", "cdylib", "rlib"] - [dependencies] -dash-sdk = { path = "../rs-sdk", features = ["dpns-contract", "dashpay-contract"] } +dash-sdk = { path = "../rs-sdk", features = [ + "dpns-contract", + "dashpay-contract", +] } drive-proof-verifier = { path = "../rs-drive-proof-verifier" } -rs-sdk-trusted-context-provider = { path = "../rs-sdk-trusted-context-provider", features = ["dpns-contract"] } +rs-sdk-trusted-context-provider = { path = "../rs-sdk-trusted-context-provider", features = [ + "dpns-contract", +] } simple-signer = { path = "../simple-signer" } # Core SDK integration (always included for unified SDK) @@ -54,11 +58,11 @@ reqwest = { version = "0.12", features = ["json", "rustls-tls-native-roots"] } cbindgen = "0.27" [profile.release] -lto = "fat" # Enable cross-crate optimization -codegen-units = 1 # Single codegen unit for better optimization -strip = "symbols" # Strip debug symbols for smaller size -opt-level = "z" # Optimize for size -panic = "abort" # Required for iOS +lto = "fat" # Enable cross-crate optimization +codegen-units = 1 # Single codegen unit for better optimization +strip = "symbols" # Strip debug symbols for smaller size +opt-level = "z" # Optimize for size +panic = "abort" # Required for iOS [dev-dependencies] hex = "0.4" diff --git a/packages/rs-sdk-trusted-context-provider/Cargo.toml b/packages/rs-sdk-trusted-context-provider/Cargo.toml index 84be0b85aec..60a397d104c 100644 --- a/packages/rs-sdk-trusted-context-provider/Cargo.toml +++ b/packages/rs-sdk-trusted-context-provider/Cargo.toml @@ -1,14 +1,18 @@ [package] name = "rs-sdk-trusted-context-provider" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" authors = ["sam@dash.org"] license = "MIT" description = "Trusted HTTP-based context provider for Dash Platform SDK" [dependencies] + dash-context-provider = { path = "../rs-context-provider" } -dpp = { path = "../rs-dpp", default-features = false, features = ["dash-sdk-features", "bls-signatures"] } +dpp = { path = "../rs-dpp", default-features = false, features = [ + "dash-sdk-features", + "bls-signatures", +] } reqwest = { version = "0.12", features = ["json", "rustls-tls"] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" @@ -32,7 +36,7 @@ all-system-contracts = [ "dashpay-contract", "wallet-utils-contract", "token-history-contract", - "keywords-contract" + "keywords-contract", ] # Individual contract features - these enable specific contracts in DPP @@ -45,4 +49,4 @@ keywords-contract = ["dpp/keywords-contract"] [dev-dependencies] tokio = { version = "1.40", features = ["macros", "rt-multi-thread"] } -tokio-test = "0.4.4" \ No newline at end of file +tokio-test = "0.4.4" diff --git a/packages/rs-sdk/Cargo.toml b/packages/rs-sdk/Cargo.toml index 76f46261f7e..3460b0489c7 100644 --- a/packages/rs-sdk/Cargo.toml +++ b/packages/rs-sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dash-sdk" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" [dependencies] @@ -46,7 +46,7 @@ zeroize = { version = "1.8", features = ["derive"] } tokio = { version = "1.40", features = ["macros", "time", "rt-multi-thread"] } [target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3" +js-sys = "0.3.64" [dev-dependencies] rs-dapi-client = { path = "../rs-dapi-client" } diff --git a/packages/simple-signer/Cargo.toml b/packages/simple-signer/Cargo.toml index 061159514b0..2336ea2195c 100644 --- a/packages/simple-signer/Cargo.toml +++ b/packages/simple-signer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "simple-signer" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true diff --git a/packages/strategy-tests/Cargo.toml b/packages/strategy-tests/Cargo.toml index f4c8eb2bc36..6aa38750755 100644 --- a/packages/strategy-tests/Cargo.toml +++ b/packages/strategy-tests/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "strategy-tests" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" authors = [ "Samuel Westrich ", "Ivan Shumkov ", diff --git a/packages/token-history-contract/Cargo.toml b/packages/token-history-contract/Cargo.toml index b1b2b9fa278..8d99406ce78 100644 --- a/packages/token-history-contract/Cargo.toml +++ b/packages/token-history-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "token-history-contract" description = "Token history data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/token-history-contract/package.json b/packages/token-history-contract/package.json index 0463274f7d0..7fbab254447 100644 --- a/packages/token-history-contract/package.json +++ b/packages/token-history-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/token-history-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "The token history contract", "scripts": { "lint": "eslint .", diff --git a/packages/wallet-lib/package.json b/packages/wallet-lib/package.json index 7cd04260abf..58596891980 100644 --- a/packages/wallet-lib/package.json +++ b/packages/wallet-lib/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/wallet-lib", - "version": "9.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Light wallet library for Dash", "main": "src/index.js", "unpkg": "dist/wallet-lib.min.js", diff --git a/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsReader.js b/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsReader.js index 5b7b5f1b1f5..f398e535af4 100644 --- a/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsReader.js +++ b/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsReader.js @@ -1,7 +1,11 @@ const { EventEmitter } = require('events'); const GrpcErrorCodes = require('@dashevo/grpc-common/lib/server/error/GrpcErrorCodes'); const { - createBloomFilter, parseRawTransactions, parseRawMerkleBlock, parseRawInstantLocks, + createBloomFilter, + parseRawTransactions, + parseRawMerkleBlock, + parseRawInstantLocks, + getTxHashesFromMerkleBlock, } = require('./utils'); const logger = require('../../../logger'); @@ -160,10 +164,20 @@ class TransactionsReader extends EventEmitter { const transactions = parseRawTransactions(rawTransactions, addresses, this.network); if (transactions.length) { + this.logger.silly('[TransactionsReader] Received historical transactions from stream', { + txids: transactions.map((tx) => tx.hash), + }); this.emit(EVENTS.HISTORICAL_TRANSACTIONS, transactions); } } else if (rawMerkleBlock) { const merkleBlock = parseRawMerkleBlock(rawMerkleBlock); + const blockHash = merkleBlock && merkleBlock.header + ? merkleBlock.header.hash + : undefined; + + this.logger.silly('[TransactionsReader] Received historical merkle block from stream', { + blockHash, + }); let rejected = false; let accepted = false; @@ -278,7 +292,9 @@ class TransactionsReader extends EventEmitter { throw new Error(`Invalid fromBlockHeight: ${fromBlockHeight}`); } - const bloomFilter = createBloomFilter(addresses); + let currentAddresses = addresses; + + const bloomFilter = createBloomFilter(currentAddresses); const stream = await this.createContinuousSyncStream(bloomFilter, { fromBlockHeight, count: 0, @@ -287,7 +303,7 @@ class TransactionsReader extends EventEmitter { this.logger.silly('[TransactionsReader] Started continuous sync with', { fromBlockHeight, - _addressesCount: addresses.length, + _addressesCount: currentAddresses.length, }); let lastSyncedBlockHeight = fromBlockHeight; @@ -306,15 +322,16 @@ class TransactionsReader extends EventEmitter { this.cancelStream(stream); this.continuousSyncStream = null; - const newAddresses = [...addresses, ...addressesGenerated]; - addressesGenerated.slice(); + const resumeFromHeight = Math.max(1, lastSyncedBlockHeight); + const newAddresses = [...currentAddresses, ...addressesGenerated]; + currentAddresses = newAddresses; this.logger.silly('[TransactionsReader] New addresses generated. Restarting continuous sync with', { - fromBlockHeight, + fromBlockHeight: resumeFromHeight, _addressesCount: newAddresses.length, }); this.startContinuousSync( - fromBlockHeight, + resumeFromHeight, newAddresses, ).then((newStream) => { this.continuousSyncStream = newStream; @@ -336,7 +353,7 @@ class TransactionsReader extends EventEmitter { return; } - const transactions = parseRawTransactions(rawTransactions, addresses, this.network); + const transactions = parseRawTransactions(rawTransactions, currentAddresses, this.network); /** * @param {string[]} newAddresses @@ -346,10 +363,23 @@ class TransactionsReader extends EventEmitter { }; if (transactions.length) { + this.logger.silly('[TransactionsReader] Received continuous transactions from stream', { + txids: transactions.map((tx) => tx.hash), + }); this.emit(EVENTS.NEW_TRANSACTIONS, { transactions, handleNewAddresses }); } } else if (rawMerkleBlock) { const merkleBlock = parseRawMerkleBlock(rawMerkleBlock); + const blockHash = merkleBlock && merkleBlock.header + ? merkleBlock.header.hash + : undefined; + + const txids = Array.from(getTxHashesFromMerkleBlock(merkleBlock)); + + this.logger.silly('[TransactionsReader] Received continuous merkle block from stream', { + blockHash, + txids, + }); let rejected = false; let accepted = false; @@ -410,14 +440,15 @@ class TransactionsReader extends EventEmitter { }; const beforeReconnectHandler = (updateArguments) => { + const resumeFromHeight = Math.max(1, lastSyncedBlockHeight); this.logger.silly('[TransactionsReader] Reconnecting to stream with', { - fromBlockHeight: lastSyncedBlockHeight, - _addressesCount: addresses.length, + fromBlockHeight: resumeFromHeight, + _addressesCount: currentAddresses.length, }); updateArguments( - createBloomFilter(addresses), + createBloomFilter(currentAddresses), { - fromBlockHeight: lastSyncedBlockHeight, + fromBlockHeight: resumeFromHeight, count: 0, }, ); diff --git a/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsSyncWorker.js b/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsSyncWorker.js index 94affe7bbdd..df00b61c994 100644 --- a/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsSyncWorker.js +++ b/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/TransactionsSyncWorker.js @@ -616,7 +616,14 @@ class TransactionsSyncWorker extends Worker { chainStore.pruneHeadersMetadata(headerHeight); this.storage.scheduleStateSave(); - this.logger.debug(`[TransactionsSyncWorker#newMerkleBlockHandler] ${$transactionsFound} txs found, ${this.historicalTransactionsToVerify.size} pending to be verified.`); + const pendingTransactions = Array + .from(this.historicalTransactionsToVerify.values()) + .map((tx) => ({ hash: tx.hash })); + + this.logger.debug( + `[TransactionsSyncWorker#newMerkleBlockHandler] ${$transactionsFound} txs found, ${this.historicalTransactionsToVerify.size} pending to be verified.`, + { pendingTransactions }, + ); } /** diff --git a/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/utils.js b/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/utils.js index a1a4c0a0b67..0648a2393dc 100644 --- a/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/utils.js +++ b/packages/wallet-lib/src/plugins/Workers/TransactionsSyncWorker/utils.js @@ -84,7 +84,14 @@ const getAddressesToSync = (keyChainStore) => keyChainStore.getKeyChains() */ const parseRawInstantLocks = (rawInstantLocks) => rawInstantLocks .getMessagesList() - .map((instantSendLock) => new InstantLock(Buffer.from(instantSendLock))); + .map((instantSendLock) => { + try { + return new InstantLock(Buffer.from(instantSendLock)); + } catch (e) { + return null; + } + }) + .filter(Boolean); /** * @param merkleBlock diff --git a/packages/wallet-utils-contract/Cargo.toml b/packages/wallet-utils-contract/Cargo.toml index f9de8d5e85d..a2e4714fdd4 100644 --- a/packages/wallet-utils-contract/Cargo.toml +++ b/packages/wallet-utils-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "wallet-utils-contract" description = "Wallet data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/wallet-utils-contract/package.json b/packages/wallet-utils-contract/package.json index f189011d6ab..2335aaa4e6f 100644 --- a/packages/wallet-utils-contract/package.json +++ b/packages/wallet-utils-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/wallet-utils-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "A contract and helper scripts for Wallet DApp", "scripts": { "lint": "eslint .", diff --git a/packages/wasm-dpp/Cargo.toml b/packages/wasm-dpp/Cargo.toml index db99d6ff06b..fe3c95f5d29 100644 --- a/packages/wasm-dpp/Cargo.toml +++ b/packages/wasm-dpp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "wasm-dpp" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true authors = ["Anton Suprunchuk "] @@ -18,9 +18,9 @@ serde_json = { version = "1.0", features = ["preserve_order"] } # Meantime if you want to update wasm-bindgen you also need to update version in: # - packages/wasm-dpp/scripts/build-wasm.sh # - Dockerfile -wasm-bindgen = { version = "=0.2.100" } -js-sys = "0.3.53" -web-sys = { version = "0.3.6", features = ["console"] } +wasm-bindgen = { version = "=0.2.103" } +js-sys = "0.3.64" +web-sys = { version = "0.3.64", features = ["console"] } thiserror = { version = "2.0.12" } serde-wasm-bindgen = { git = "https://github.com/QuantumExplorer/serde-wasm-bindgen", branch = "feat/not_human_readable" } dpp = { path = "../rs-dpp", default-features = false, features = [ diff --git a/packages/wasm-dpp/README.md b/packages/wasm-dpp/README.md index 3978a288ec6..c1da995eba0 100644 --- a/packages/wasm-dpp/README.md +++ b/packages/wasm-dpp/README.md @@ -39,7 +39,7 @@ Library consumers must ignore class names minification for `@dashevo/wasm-dpp` l - Install [Rust](https://www.rust-lang.org/tools/install) v1.73+ - Add wasm32 target: `$ rustup target add wasm32-unknown-unknown` -- Install wasm-bingen-cli: `cargo install wasm-bindgen-cli@0.2.100` +- Install wasm-bingen-cli: `cargo install wasm-bindgen-cli@0.2.103` - *double-check that wasm-bindgen-cli version above matches wasm-bindgen version in Cargo.lock file* - *Depending on system, additional packages may need to be installed as a prerequisite for wasm-bindgen-cli. If anything is missing, installation will error and prompt what packages are missing (i.e. clang, llvm, libssl-dev)* diff --git a/packages/wasm-dpp/package.json b/packages/wasm-dpp/package.json index aa9c8954479..b9225e216f8 100644 --- a/packages/wasm-dpp/package.json +++ b/packages/wasm-dpp/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/wasm-dpp", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "The JavaScript implementation of the Dash Platform Protocol", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/packages/wasm-drive-verify/Cargo.lock b/packages/wasm-drive-verify/Cargo.lock index d706e995215..0f965da861d 100644 --- a/packages/wasm-drive-verify/Cargo.lock +++ b/packages/wasm-drive-verify/Cargo.lock @@ -4026,7 +4026,7 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ @@ -4038,7 +4038,7 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ @@ -4065,7 +4065,7 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ @@ -4075,7 +4075,7 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ @@ -4088,7 +4088,7 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" dependencies = [ diff --git a/packages/wasm-drive-verify/Cargo.toml b/packages/wasm-drive-verify/Cargo.toml index ed6d5a53fe3..9be20b9ddc3 100644 --- a/packages/wasm-drive-verify/Cargo.toml +++ b/packages/wasm-drive-verify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "wasm-drive-verify" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" authors = ["Dash Core Group "] edition = "2021" rust-version = "1.89" @@ -10,7 +10,9 @@ license = "MIT" crate-type = ["cdylib", "rlib"] [dependencies] -drive = { path = "../rs-drive", default-features = false, features = ["verify"] } +drive = { path = "../rs-drive", default-features = false, features = [ + "verify", +] } dpp = { path = "../rs-dpp", default-features = false, features = [ "state-transitions", "data-contracts", @@ -21,26 +23,33 @@ dpp = { path = "../rs-dpp", default-features = false, features = [ "document-json-conversion", "state-transition-serde-conversion", "vote-serde-conversion", - "platform-value-json" + "platform-value-json", ] } -wasm-bindgen = { version = "0.2.89" } -serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } +wasm-bindgen = { version = "=0.2.103" } +serde = { version = "1.0.193", default-features = false, features = [ + "alloc", + "derive", +] } serde_json = { version = "1.0", default-features = false, features = ["alloc"] } serde-wasm-bindgen = { version = "0.6.0" } js-sys = { version = "0.3.64" } -web-sys = { version = "0.3.64", features = ["console", "Window", "Performance"] } +web-sys = { version = "0.3.64", features = [ + "console", + "Window", + "Performance", +] } console_error_panic_hook = { version = "0.1.7", optional = true } ciborium = { version = "0.2.1" } base64 = { version = "0.22.0" } bs58 = { version = "0.5.1" } -indexmap = { version = "2.0.0" } +indexmap = { version = "2.11.4" } nohash-hasher = { version = "0.2.0" } bincode = { version = "2.0.0-rc.3" } [dev-dependencies] -wasm-bindgen-test = "0.3.39" -criterion = { version = "0.5", default-features = false, features = [] } +wasm-bindgen-test = "0.3.53" +criterion = { version = "0.7", default-features = false, features = [] } dpp = { path = "../rs-dpp", default-features = false, features = [ "state-transitions", "random-public-keys", @@ -54,7 +63,7 @@ dpp = { path = "../rs-dpp", default-features = false, features = [ "document-json-conversion", "state-transition-serde-conversion", "vote-serde-conversion", - "platform-value-json" + "platform-value-json", ] } js-sys = "0.3.64" web-sys = "0.3.64" @@ -66,7 +75,14 @@ serde_json = "1.0" [features] default = ["console_error_panic_hook", "full"] -full = ["identity", "document", "contract", "tokens", "governance", "transitions"] +full = [ + "identity", + "document", + "contract", + "tokens", + "governance", + "transitions", +] identity = [] document = [] contract = [] @@ -79,4 +95,3 @@ debug_logs = [] name = "simple_benchmarks" harness = false required-features = ["full"] - diff --git a/packages/wasm-drive-verify/benches/simple_benchmarks.rs b/packages/wasm-drive-verify/benches/simple_benchmarks.rs index 9a5f76992bc..566d235d074 100644 --- a/packages/wasm-drive-verify/benches/simple_benchmarks.rs +++ b/packages/wasm-drive-verify/benches/simple_benchmarks.rs @@ -3,9 +3,12 @@ //! This file contains timing benchmarks for various verification functions //! to measure performance characteristics with different proof sizes. +use dpp::version::PlatformVersion; use js_sys::Uint8Array; use std::time::Instant; use wasm_bindgen::JsValue; +use wasm_drive_verify::contract_verification::verify_contract::verify_contract as wasm_verify_contract; +use wasm_drive_verify::document_verification::verify_proof::verify_document_proof; // Helper functions fn create_mock_proof(size: usize) -> Uint8Array { @@ -18,10 +21,6 @@ fn create_mock_id(seed: u8) -> Uint8Array { Uint8Array::from(&data[..]) } -fn create_mock_query() -> JsValue { - JsValue::from_str("{}") -} - /// Time a function execution fn time_function(name: &str, iterations: u32, f: F) { let start = Instant::now(); @@ -64,12 +63,25 @@ fn main() { println!("\n=== Document Verification ==="); for (size, label) in &proof_sizes { let proof = create_mock_proof(*size); - let contract_id = create_mock_id(2); - let query = create_mock_query(); - - time_function(&format!("verify_proof ({})", label), 100, || { - use wasm_drive_verify::document_verification::verify_proof; - let _ = verify_proof(&proof, &contract_id, "test_doc", &query, 1); + let contract_js = JsValue::from(create_mock_proof(512)); + + time_function(&format!("verify_document_proof ({})", label), 100, || { + let where_clauses = JsValue::UNDEFINED; + let order_by = JsValue::UNDEFINED; + + let _ = verify_document_proof( + &proof, + &contract_js, + "test_doc", + &where_clauses, + &order_by, + None, + None, + None, + false, + None, + 1, + ); }); } @@ -79,36 +91,25 @@ fn main() { let contract_id = create_mock_id(3); time_function(&format!("verify_contract ({})", label), 100, || { - use wasm_drive_verify::contract_verification::verify_contract; - let _ = verify_contract(&proof, &contract_id, false, 1); + let _ = wasm_verify_contract(&proof, None, false, false, &contract_id, 1); }); } println!("\n=== Platform Version Validation ==="); - time_function( - "get_platform_version_with_validation (all versions)", - 1000, - || { - use wasm_drive_verify::utils::platform_version::get_platform_version_with_validation; - for version in 1..=9 { - let _ = get_platform_version_with_validation(version); - } - }, - ); + time_function("PlatformVersion::get (all versions)", 1000, || { + for version in 1..=9 { + let _ = PlatformVersion::get(version); + } + }); println!("\n=== Getter Performance ==="); let data_sizes = vec![32, 256, 1024, 10240]; for size in data_sizes { let data = vec![0u8; size]; - time_function( - &format!("VecU8ToUint8Array::to_uint8array ({}B)", size), - 1000, - || { - use wasm_drive_verify::utils::getters::VecU8ToUint8Array; - let _ = data.to_uint8array(); - }, - ); + time_function(&format!("Uint8Array::from ({}B)", size), 1000, || { + let _ = Uint8Array::from(&data[..]); + }); } println!("\nBenchmarks complete!"); diff --git a/packages/wasm-drive-verify/package.json b/packages/wasm-drive-verify/package.json index d7717f5d00c..3ff427c8df2 100644 --- a/packages/wasm-drive-verify/package.json +++ b/packages/wasm-drive-verify/package.json @@ -3,7 +3,7 @@ "collaborators": [ "Dash Core Group " ], - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "license": "MIT", "description": "WASM bindings for Drive verify functions", "repository": { diff --git a/packages/wasm-sdk/Cargo.toml b/packages/wasm-sdk/Cargo.toml index 00aa7f1a246..fcc92b35ccb 100644 --- a/packages/wasm-sdk/Cargo.toml +++ b/packages/wasm-sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "wasm-sdk" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" publish = false rust-version.workspace = true @@ -9,30 +9,63 @@ rust-version.workspace = true crate-type = ["cdylib"] [features] -default = ["dpns-contract", "dashpay-contract", "wallet-utils-contract", "token-history-contract", "keywords-contract"] +default = [ + "dpns-contract", + "dashpay-contract", + "wallet-utils-contract", + "token-history-contract", + "keywords-contract", + "mocks", +] mocks = ["dash-sdk/mocks"] # All system contracts -all-system-contracts = ["dash-sdk/all-system-contracts", "rs-sdk-trusted-context-provider/all-system-contracts"] +all-system-contracts = [ + "dash-sdk/all-system-contracts", + "rs-sdk-trusted-context-provider/all-system-contracts", +] # Individual contract features -withdrawals-contract = ["dash-sdk/withdrawals-contract", "rs-sdk-trusted-context-provider/withdrawals-contract"] -dpns-contract = ["dash-sdk/dpns-contract", "rs-sdk-trusted-context-provider/dpns-contract"] -dashpay-contract = ["dash-sdk/dashpay-contract", "rs-sdk-trusted-context-provider/dashpay-contract"] -wallet-utils-contract = ["dash-sdk/wallet-utils-contract", "rs-sdk-trusted-context-provider/wallet-utils-contract"] -token-history-contract = ["dash-sdk/token-history-contract", "rs-sdk-trusted-context-provider/token-history-contract"] -keywords-contract = ["dash-sdk/keywords-contract", "rs-sdk-trusted-context-provider/keywords-contract"] +withdrawals-contract = [ + "dash-sdk/withdrawals-contract", + "rs-sdk-trusted-context-provider/withdrawals-contract", +] +dpns-contract = [ + "dash-sdk/dpns-contract", + "rs-sdk-trusted-context-provider/dpns-contract", +] +dashpay-contract = [ + "dash-sdk/dashpay-contract", + "rs-sdk-trusted-context-provider/dashpay-contract", +] +wallet-utils-contract = [ + "dash-sdk/wallet-utils-contract", + "rs-sdk-trusted-context-provider/wallet-utils-contract", +] +token-history-contract = [ + "dash-sdk/token-history-contract", + "rs-sdk-trusted-context-provider/token-history-contract", +] +keywords-contract = [ + "dash-sdk/keywords-contract", + "rs-sdk-trusted-context-provider/keywords-contract", +] token_reward_explanations = ["dash-sdk/token_reward_explanations"] [dependencies] -dash-sdk = { path = "../rs-sdk", features = ["serde", "core_key_wallet"], default-features = false } +dash-sdk = { path = "../rs-sdk", features = [ + "serde", + "core_key_wallet", +], default-features = false } simple-signer = { path = "../simple-signer", features = ["state-transitions"] } -drive = { path = "../rs-drive", default-features = false, features = ["verify"] } +drive = { path = "../rs-drive", default-features = false, features = [ + "verify", +] } console_error_panic_hook = { version = "0.1.6" } thiserror = { version = "2.0.12" } -web-sys = { version = "0.3.4", features = [ +web-sys = { version = "0.3.64", features = [ 'console', 'Document', 'Element', @@ -41,11 +74,14 @@ web-sys = { version = "0.3.4", features = [ 'Window', 'Crypto', ] } -wasm-bindgen = { version = "=0.2.100" } +wasm-bindgen = { version = "=0.2.103" } wasm-bindgen-futures = { version = "0.4.49" } drive-proof-verifier = { path = "../rs-drive-proof-verifier", default-features = false } # TODO: I think it's not needed (LKl) tracing = { version = "0.1.41" } -tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "registry"] } +tracing-subscriber = { version = "0.3", default-features = false, features = [ + "env-filter", + "registry", +] } tracing-wasm = { version = "0.2.1" } wee_alloc = "0.4" platform-value = { path = "../rs-platform-value", features = ["json"] } @@ -60,7 +96,7 @@ bip39 = { version = "2.0", features = ["rand", "all-languages"] } rand = { version = "0.8", features = ["std"] } rs-sdk-trusted-context-provider = { path = "../rs-sdk-trusted-context-provider" } once_cell = "1.19" -js-sys = "0.3" +js-sys = "0.3.64" dapi-grpc = { path = "../dapi-grpc" } rs-dapi-client = { path = "../rs-dapi-client" } hmac = { version = "0.12" } diff --git a/packages/wasm-sdk/package.json b/packages/wasm-sdk/package.json index dd38390fcf6..d2fc768c3f1 100644 --- a/packages/wasm-sdk/package.json +++ b/packages/wasm-sdk/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/wasm-sdk", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "type": "module", "main": "./dist/sdk.js", "types": "./dist/sdk.d.ts", diff --git a/packages/withdrawals-contract/Cargo.toml b/packages/withdrawals-contract/Cargo.toml index da4cc47907d..3d23157c838 100644 --- a/packages/withdrawals-contract/Cargo.toml +++ b/packages/withdrawals-contract/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "withdrawals-contract" description = "Witdrawals data contract schema and tools" -version = "2.1.0-dev.8" +version = "2.1.0-pr.2716.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/packages/withdrawals-contract/package.json b/packages/withdrawals-contract/package.json index daa23532988..f5c187bf670 100644 --- a/packages/withdrawals-contract/package.json +++ b/packages/withdrawals-contract/package.json @@ -1,6 +1,6 @@ { "name": "@dashevo/withdrawals-contract", - "version": "2.1.0-dev.8", + "version": "2.1.0-pr.2716.1", "description": "Data Contract to manipulate and track withdrawals", "scripts": { "build": "", diff --git a/scripts/configure_dashmate.sh b/scripts/configure_dashmate.sh index 9f722e1b83d..86156b47d1d 100755 --- a/scripts/configure_dashmate.sh +++ b/scripts/configure_dashmate.sh @@ -15,6 +15,7 @@ ROOT_PATH=$(dirname "$DIR_PATH") #yarn dashmate config set --config=${CONFIG_NAME} docker.baseImage.build.target deps yarn dashmate config set --config=${CONFIG_NAME} platform.drive.abci.docker.build.enabled true yarn dashmate config set --config=${CONFIG_NAME} platform.dapi.api.docker.build.enabled true +yarn dashmate config set --config=${CONFIG_NAME} platform.dapi.rsDapi.docker.build.enabled true yarn dashmate config set --config=${CONFIG_NAME} dashmate.helper.docker.build.enabled true # create tenderdash blocks every 10s to speed up test suite