diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 000000000..ef049ad85 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,46 @@ +name: CI Checks - Benchmarks + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + benchmark: + runs-on: ubuntu-latest + env: + TOOLCHAIN: stable + steps: + - name: Checkout source code + uses: actions/checkout@v3 + - name: Install Rust toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-${{ runner.os }}-${{ runner.arch }} + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} + - name: Download bitcoind/electrs + if: "(steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" + run: | + source ./scripts/download_bitcoind_electrs.sh + mkdir bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + - name: Run benchmarks + run: | + cargo bench diff --git a/.github/workflows/cln-integration.yml b/.github/workflows/cln-integration.yml index 2c427cbde..32e7b74c0 100644 --- a/.github/workflows/cln-integration.yml +++ b/.github/workflows/cln-integration.yml @@ -2,6 +2,10 @@ name: CI Checks - CLN Integration Tests on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: check-cln: runs-on: ubuntu-latest diff --git a/.github/workflows/cron-weekly-rustfmt.yml b/.github/workflows/cron-weekly-rustfmt.yml new file mode 100644 index 000000000..d6326f03b --- /dev/null +++ b/.github/workflows/cron-weekly-rustfmt.yml @@ -0,0 +1,33 @@ +name: Nightly rustfmt + +permissions: + contents: write + pull-requests: write + +on: + schedule: + - cron: "0 0 * * 0" # runs weekly on Sunday at 00:00 + workflow_dispatch: # allows manual triggering +jobs: + format: + name: Nightly rustfmt + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - uses: dtolnay/rust-toolchain@nightly + with: + components: rustfmt + - name: Run Nightly rustfmt + # Run the formatter and manually remove trailing whitespace. + run: cargo +nightly fmt && git ls-files -- '*.rs' -z | xargs sed -E -i'' -e 's/[[:space:]]+$//' + - name: Get the current date + run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_ENV + - name: Create Pull Request + uses: peter-evans/create-pull-request@v7 + with: + author: Fmt Bot + title: Automated nightly rustfmt (${{ env.date }}) + body: | + Automated nightly `rustfmt` changes by [create-pull-request](https://github.com/peter-evans/create-pull-request) GitHub action + commit-message: ${{ env.date }} automated rustfmt nightly + labels: rustfmt diff --git a/.github/workflows/lnd-integration.yml b/.github/workflows/lnd-integration.yml index 219e929b1..f913e92ad 100644 --- a/.github/workflows/lnd-integration.yml +++ b/.github/workflows/lnd-integration.yml @@ -2,6 +2,10 @@ name: CI Checks - LND Integration Tests on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: check-lnd: runs-on: ubuntu-latest @@ -49,4 +53,4 @@ jobs: run: LND_CERT_PATH=$LND_DATA_DIR/tls.cert LND_MACAROON_PATH=$LND_DATA_DIR/data/chain/bitcoin/regtest/admin.macaroon RUSTFLAGS="--cfg lnd_test" cargo test --test integration_tests_lnd -- --exact --show-output env: - LND_DATA_DIR: ${{ env.LND_DATA_DIR }} \ No newline at end of file + LND_DATA_DIR: ${{ env.LND_DATA_DIR }} diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 821f35a68..fb1cf7a74 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -34,15 +34,9 @@ jobs: - name: Install Rust ${{ matrix.toolchain }} toolchain run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - rustup override set ${{ matrix.toolchain }} - name: Check formatting on Rust ${{ matrix.toolchain }} if: matrix.check-fmt run: rustup component add rustfmt && cargo fmt --all -- --check - - name: Pin packages to allow for MSRV - if: matrix.msrv - run: | - cargo update -p home --precise "0.5.9" --verbose # home v0.5.11 requires rustc 1.81 or newer - cargo update -p idna_adapter --precise "1.1.0" --verbose # idna_adapter 1.2 switched to ICU4X, requiring 1.81 and newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml new file mode 100644 index 000000000..3410d09aa --- /dev/null +++ b/.github/workflows/swift.yml @@ -0,0 +1,21 @@ +name: CI Checks - Swift Tests + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-swift: + runs-on: macos-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set default Rust version to stable + run: rustup default stable + + - name: Generate Swift bindings + run: ./scripts/uniffi_bindgen_generate_swift.sh diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index 2a6c63704..8473ed413 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -18,7 +18,7 @@ jobs: env: POSTGRES_DB: postgres POSTGRES_USER: postgres - POSTGRES_PASSWORD: YOU_MUST_CHANGE_THIS_PASSWORD + POSTGRES_PASSWORD: postgres options: >- --health-cmd pg_isready --health-interval 10s @@ -36,47 +36,13 @@ jobs: repository: lightningdevkit/vss-server path: vss-server - - name: Set up Java - uses: actions/setup-java@v3 - with: - distribution: 'corretto' - java-version: '17' - - - name: Start Tomcat + - name: Build and Deploy VSS Server run: | - docker run -d --network=host --name tomcat tomcat:latest - - - name: Setup Gradle - uses: gradle/gradle-build-action@v2 - with: - gradle-version: release-candidate - - - name: Create database table - run: | - psql -h localhost -U postgres -d postgres -f ./vss-server/java/app/src/main/java/org/vss/impl/postgres/sql/v0_create_vss_db.sql - env: - PGPASSWORD: YOU_MUST_CHANGE_THIS_PASSWORD - - - name: Build and Deploy VSS - run: | - # Print Info - java -version - gradle --version - - cd vss-server/java - gradle wrapper --gradle-version 8.1.1 - ./gradlew --version - ./gradlew build - - docker cp app/build/libs/vss-1.0.war tomcat:/usr/local/tomcat/webapps/vss.war - cd ../ - - name: Run VSS Integration tests against vss-instance. + cd vss-server/rust + cargo run server/vss-server-config.toml& + - name: Run VSS Integration tests run: | cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" - RUSTFLAGS="--cfg vss_test" cargo build --verbose --color always + RUSTFLAGS="--cfg vss_test" cargo test io::vss_store RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss - - - name: Cleanup - run: | - docker stop tomcat && docker rm tomcat diff --git a/CHANGELOG.md b/CHANGELOG.md index 05813b621..d03401d85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,56 @@ +# 0.7.0 - TODO +This seventh minor release introduces numerous new features, bug fixes, and API improvements. In particular, it adds support for channel Splicing, Async Payments, as well as sourcing chain data from a Bitcoin Core REST backend. + +## Feature and API updates +- Experimental support for channel splicing has been added. (#677) + - **Note**: Splicing-related transactions might currently still get misclassified in the payment store. +- Support for serving and paying static invoices for Async Payments has been added. (#621, #632) +- Sourcing chain data via Bitcoin Core's REST interface is now supported. (#526) +- A new `Builder::set_chain_source_esplora_with_headers` method has been added + that allows specifying headers to be sent to the Esplora backend. (#596) +- The ability to import and merge pathfinding scores has been added. (#449) +- Passing a custom pre-image when sending spontaneous payments is now supported. (#549) +- When running in the context of a `tokio` runtime, we now attempt to reuse the + outer runtime context for our main runtime. (#543) +- Specifying a `RouteParametersConfig` when paying BOLT12 offers or sending refunds is now supported. (#702) +- Liquidity service data is now persisted across restarts. (#650) +- The bLIP-52/LSPS2 service now supports the 'client-trusts-LSP' model. (#687) +- The manual-claiming flow is now also supported for JIT invoices. (#608) +- Any key-value stores provided to `Builder::build_with_store` are now + required to implement LDK's `KVStore` as well as `KVStoreSync` interfaces. + (#633) +- The `generate_entropy_mnemonic` method now supports specifying a word count. (#699) + +## Bug Fixes and Improvements +- Robustness of the shutdown procedure has been improved, minimizing risk of blocking during `Node::stop`. (#592, #612, #619, #622) +- The VSS storage backend now supports 'lazy' deletes, allowing it to avoid unnecessary remote calls for certain operations. (#689) +- The encryption and obfuscation scheme used when storing data against a VSS backend has been improved. (#627) +- Transient errors during `bitcoind` RPC chain synchronization are now retried with an exponential back-off. (#588) +- Transactions evicted from the mempool are now correctly handled when syncing via `bitcoind` RPC/REST. (#605) +- When sourcing chain data from a Bitcoin Core backend, we now poll for the + current tip in `Builder::build`, avoiding re-validating the chain from + genesis on first startup. (#706) +- A bug that could result in the node hanging on shutdown when sourcing chain data from a Bitcoin Core backend has been fixed. (#682) +- Unnecessary fee estimation calls to Bitcoin Core RPC are now avoided. (#631) +- The node now persists differential updates instead of re-persisting full channel monitor, reducing IO load. (#661) +- The previously rather restrictive `MaximumFeeEstimate` was relaxed. (#629) +- The node now listens on all provided listening addresses. (#644) + +## Compatibility Notes +- The minimum supported Rust version (MSRV) has been bumped to `rustc` v1.85 (#606) +- The LDK dependency has been bumped to v0.2. +- The BDK dependency has been bumped to v2.2. (#656) +- The VSS client dependency has been updated to utilize the new `vss-client-ng` crate v0.4. (#627) +- The `rust-bitcoin` dependency has been bumped to v0.32.7. (#656) +- The `uniffi` dependency has been bumped to v0.28.3. (#591) +- The `electrum-client` dependency has been bumped to v0.24.0. (#602) +- For Kotlin/Android builds we now require 16kb page sizes, ensuring Play Store compatibility. (#625) + +In total, this release features TODO files changed, TODO insertions, TODO +deletions in TODO commits from TODO authors in alphabetical order: + +- TODO TODO + # 0.6.2 - Aug. 14, 2025 This patch release fixes a panic that could have been hit when syncing to a TLS-enabled Electrum server, as well as some minor issues when shutting down diff --git a/Cargo.lock b/Cargo.lock index 040e8bc24..ff8318a35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -53,6 +53,18 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + [[package]] name = "anyhow" version = "1.0.99" @@ -334,9 +346,9 @@ dependencies = [ [[package]] name = "bdk_wallet" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30b5dba770184863b5d966ccbc6a11d12c145450be3b6a4435308297e6a12dc" +checksum = "8b172f2caa6311b8172cf99559cd7f7a61cb58834e35e4ca208b3299e7be8bec" dependencies = [ "bdk_chain", "bip39", @@ -402,6 +414,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43d193de1f7487df1914d3a568b772458861d33f9c54249612cc2893d6915054" dependencies = [ "bitcoin_hashes 0.13.0", + "rand 0.8.5", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -584,6 +598,12 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.2.33" @@ -616,6 +636,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20-poly1305" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b4b0fc281743d80256607bd65e8beedc42cb0787ea119c85b81b4c0eab85e5f" + [[package]] name = "chrono" version = "0.4.41" @@ -635,6 +661,33 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "clang-sys" version = "1.8.1" @@ -646,6 +699,31 @@ dependencies = [ "libloading", ] +[[package]] +name = "clap" +version = "4.5.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" + [[package]] name = "clightningrpc" version = "0.3.0-beta.8" @@ -694,9 +772,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "corepc-client" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565724b3d7556c06827b27c468333bbf97e2173097aaed576b5da73faf7a1e04" +checksum = "7755b8b9219b23d166a5897b5e2d8266cbdd0de5861d351b96f6db26bcf415f3" dependencies = [ "bitcoin", "corepc-types", @@ -708,9 +786,9 @@ dependencies = [ [[package]] name = "corepc-node" -version = "0.8.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "596e4ee1ff3616ae3f36e89e0b847604034ba689243cc5e7dd08da395b88ec58" +checksum = "768391062ec3812e223bb3031c5b2fcdd6e0e60b816157f21df82fd3e6617dc0" dependencies = [ "anyhow", "bitcoin_hashes 0.14.0", @@ -727,9 +805,9 @@ dependencies = [ [[package]] name = "corepc-types" -version = "0.8.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a7f2faedffc7c654e348e2da6c6416090525c6072979fee9681d620d1d398a4" +checksum = "c22db78b0223b66f82f92b14345f06307078f76d94b18280431ea9bc6cd9cbb6" dependencies = [ "bitcoin", "serde", @@ -745,12 +823,71 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338" +dependencies = [ + "cast", + "itertools 0.13.0", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-utils" version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + [[package]] name = "displaydoc" version = "0.2.5" @@ -782,9 +919,9 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "electrsd" -version = "0.35.0" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4cfa920cfdd634934eb26825ef13f520f296769ffc1b045dc7c6a8f37e92c6f" +checksum = "d8926868af723c2819807809e54585992aaea0e26a6f5089ac8c2598eaec8d01" dependencies = [ "bitcoin_hashes 0.14.0", "corepc-client", @@ -793,7 +930,6 @@ dependencies = [ "log", "minreq", "nix", - "which 4.4.2", "zip", ] @@ -1146,6 +1282,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -1654,6 +1801,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -1707,6 +1863,48 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "ldk-node" version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78165f06c94ecdb568b1850cf075d60049c7b395dce65266ba1362da01511670" +dependencies = [ + "base64 0.22.1", + "bdk_chain", + "bdk_electrum", + "bdk_esplora", + "bdk_wallet", + "bip21", + "bip39", + "bitcoin", + "chrono", + "electrum-client 0.24.0", + "esplora-client 0.11.0", + "esplora-client 0.12.1", + "libc", + "lightning 0.1.5", + "lightning-background-processor 0.1.0", + "lightning-block-sync 0.1.0", + "lightning-invoice 0.33.2", + "lightning-liquidity 0.1.0", + "lightning-net-tokio 0.1.0", + "lightning-persister 0.1.0", + "lightning-rapid-gossip-sync 0.1.0", + "lightning-transaction-sync 0.1.0", + "lightning-types 0.2.0", + "log", + "prost 0.11.9", + "rand 0.8.5", + "reqwest 0.12.23", + "rusqlite", + "rustls 0.23.31", + "serde", + "serde_json", + "tokio", + "vss-client", + "winapi", +] + +[[package]] +name = "ldk-node" +version = "0.7.0+git" dependencies = [ "aws-lc-rs", "base64 0.22.1", @@ -1720,26 +1918,28 @@ dependencies = [ "chrono", "clightningrpc", "corepc-node", + "criterion", "electrsd", "electrum-client 0.24.0", - "esplora-client 0.11.0", "esplora-client 0.12.1", + "ldk-node 0.6.2", "libc", - "lightning", - "lightning-background-processor", - "lightning-block-sync", - "lightning-invoice", - "lightning-liquidity", - "lightning-net-tokio", - "lightning-persister", - "lightning-rapid-gossip-sync", - "lightning-transaction-sync", - "lightning-types", + "lightning 0.2.0-rc2", + "lightning-background-processor 0.2.0-rc2", + "lightning-block-sync 0.2.0-rc2", + "lightning-invoice 0.34.0-rc2", + "lightning-liquidity 0.2.0-rc2", + "lightning-macros 0.2.0-rc2", + "lightning-net-tokio 0.2.0-rc2", + "lightning-persister 0.2.0-rc2", + "lightning-rapid-gossip-sync 0.2.0-rc2", + "lightning-transaction-sync 0.2.0-rc2", + "lightning-types 0.3.0-rc2", "lnd_grpc_rust", "log", "proptest", "prost 0.11.9", - "rand 0.8.5", + "rand 0.9.2", "regex", "reqwest 0.12.23", "rusqlite", @@ -1748,7 +1948,7 @@ dependencies = [ "serde_json", "tokio", "uniffi", - "vss-client", + "vss-client-ng", "winapi", ] @@ -1807,8 +2007,25 @@ dependencies = [ "dnssec-prover", "hashbrown 0.13.2", "libm", - "lightning-invoice", - "lightning-types", + "lightning-invoice 0.33.2", + "lightning-types 0.2.0", + "possiblyrandom", +] + +[[package]] +name = "lightning" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e8c5eeef89fe30052a5b77f8ac4a4a8caba85fc04a040e047552efa69270267" +dependencies = [ + "bech32", + "bitcoin", + "dnssec-prover", + "hashbrown 0.13.2", + "libm", + "lightning-invoice 0.34.0-rc2", + "lightning-macros 0.2.0-rc2", + "lightning-types 0.3.0-rc2", "possiblyrandom", "regex", ] @@ -1822,8 +2039,23 @@ dependencies = [ "bitcoin", "bitcoin-io", "bitcoin_hashes 0.14.0", - "lightning", - "lightning-rapid-gossip-sync", + "lightning 0.1.5", + "lightning-rapid-gossip-sync 0.1.0", +] + +[[package]] +name = "lightning-background-processor" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32679998ac462e91a5e7893465cce28991f08127f578e49aff64105a398d8e8b" +dependencies = [ + "bitcoin", + "bitcoin-io", + "bitcoin_hashes 0.14.0", + "lightning 0.2.0-rc2", + "lightning-liquidity 0.2.0-rc2", + "lightning-rapid-gossip-sync 0.2.0-rc2", + "possiblyrandom", ] [[package]] @@ -1834,7 +2066,20 @@ checksum = "baab5bdee174a2047d939a4ca0dc2e1c23caa0f8cab0b4380aed77a20e116f1e" dependencies = [ "bitcoin", "chunked_transfer", - "lightning", + "lightning 0.1.5", + "serde_json", + "tokio", +] + +[[package]] +name = "lightning-block-sync" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ecb497ce9abd43e1df269b31e67bc3a3a7134dac2ef6f8bfb3da2263de90339" +dependencies = [ + "bitcoin", + "chunked_transfer", + "lightning 0.2.0-rc2", "serde_json", "tokio", ] @@ -1847,7 +2092,19 @@ checksum = "11209f386879b97198b2bfc9e9c1e5d42870825c6bd4376f17f95357244d6600" dependencies = [ "bech32", "bitcoin", - "lightning-types", + "lightning-types 0.2.0", + "serde", +] + +[[package]] +name = "lightning-invoice" +version = "0.34.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26f16434d5a379cf65760c7457aa4847758e941a17ba566218450d33f2be8c39" +dependencies = [ + "bech32", + "bitcoin", + "lightning-types 0.3.0-rc2", "serde", ] @@ -1859,9 +2116,25 @@ checksum = "bfbed71e656557185f25e006c1bcd8773c5c83387c727166666d3b0bce0f0ca5" dependencies = [ "bitcoin", "chrono", - "lightning", - "lightning-invoice", - "lightning-types", + "lightning 0.1.5", + "lightning-invoice 0.33.2", + "lightning-types 0.2.0", + "serde", + "serde_json", +] + +[[package]] +name = "lightning-liquidity" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c1bf396c85d66f3a29f4cfee5f537c407e8ee3fd84023d891bb93d0cd614fa2" +dependencies = [ + "bitcoin", + "chrono", + "lightning 0.2.0-rc2", + "lightning-invoice 0.34.0-rc2", + "lightning-macros 0.2.0-rc2", + "lightning-types 0.3.0-rc2", "serde", "serde_json", ] @@ -1877,6 +2150,17 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "lightning-macros" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08214352589874b41062db0564dd92a36e6fe1219071eae0e73db4c79ffb324" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "lightning-net-tokio" version = "0.1.0" @@ -1884,7 +2168,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb6a6c93b1e592f1d46bb24233cac4a33b4015c99488ee229927a81d16226e45" dependencies = [ "bitcoin", - "lightning", + "lightning 0.1.5", + "tokio", +] + +[[package]] +name = "lightning-net-tokio" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45bd789aacf2af7be1dc98c22110435c49adf9ec3d55a899abe8807b5d2b3969" +dependencies = [ + "bitcoin", + "lightning 0.2.0-rc2", "tokio", ] @@ -1895,7 +2190,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d80558dc398eb4609b1079044d8eb5760a58724627ff57c6d7c194c78906e026" dependencies = [ "bitcoin", - "lightning", + "lightning 0.1.5", + "windows-sys 0.48.0", +] + +[[package]] +name = "lightning-persister" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f7889e041cab0751b2b2855b7291f4e51b9d78604d3da8fffe347ba08d1ac43" +dependencies = [ + "bitcoin", + "lightning 0.2.0-rc2", + "tokio", "windows-sys 0.48.0", ] @@ -1908,7 +2215,19 @@ dependencies = [ "bitcoin", "bitcoin-io", "bitcoin_hashes 0.14.0", - "lightning", + "lightning 0.1.5", +] + +[[package]] +name = "lightning-rapid-gossip-sync" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "127075fc13e0e2192bd03cdc8ffb65e206ba85a5d693aec27c8e16289a66c633" +dependencies = [ + "bitcoin", + "bitcoin-io", + "bitcoin_hashes 0.14.0", + "lightning 0.2.0-rc2", ] [[package]] @@ -1921,8 +2240,22 @@ dependencies = [ "electrum-client 0.21.0", "esplora-client 0.11.0", "futures", - "lightning", - "lightning-macros", + "lightning 0.1.5", + "lightning-macros 0.1.0", +] + +[[package]] +name = "lightning-transaction-sync" +version = "0.2.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501f967f30e7bbeb130ed613836fb0a5947bad47f2f2b78ea0eb6048a1a5325b" +dependencies = [ + "bitcoin", + "electrum-client 0.24.0", + "esplora-client 0.12.1", + "futures", + "lightning 0.2.0-rc2", + "lightning-macros 0.2.0-rc2", ] [[package]] @@ -1934,6 +2267,15 @@ dependencies = [ "bitcoin", ] +[[package]] +name = "lightning-types" +version = "0.3.0-rc2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f1e5f38e940924f69a148ef52aac984dd721315baa9500aacede62c1d7eafad" +dependencies = [ + "bitcoin", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2152,6 +2494,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "openssl" version = "0.10.73" @@ -2285,6 +2633,34 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "possiblyrandom" version = "0.2.0" @@ -2624,6 +3000,26 @@ dependencies = [ "rand_core 0.9.3", ] +[[package]] +name = "rayon" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "redox_syscall" version = "0.5.17" @@ -2712,7 +3108,9 @@ checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb" dependencies = [ "base64 0.22.1", "bytes", + "futures-channel", "futures-core", + "futures-util", "http 1.3.1", "http-body 1.0.1", "http-body-util", @@ -2907,6 +3305,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -3246,6 +3653,16 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.10.0" @@ -3783,6 +4200,27 @@ dependencies = [ "url", ] +[[package]] +name = "vss-client-ng" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c6bf7f2c3e22e62c638ad7d8c48dd5dc7e79033c5e088bdd797bbc815b29bb" +dependencies = [ + "async-trait", + "base64 0.22.1", + "bitcoin", + "bitcoin_hashes 0.14.0", + "chacha20-poly1305", + "prost 0.11.9", + "prost-build 0.11.9", + "rand 0.8.5", + "reqwest 0.12.23", + "serde", + "serde_json", + "tokio", + "url", +] + [[package]] name = "wait-timeout" version = "0.2.1" @@ -3792,6 +4230,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" diff --git a/Cargo.toml b/Cargo.toml index cf909a2f5..2521be7f2 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,11 @@ [package] name = "ldk-node" -version = "0.6.2" +version = "0.7.0+git" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" edition = "2021" +rust-version = "1.85" description = "A ready-to-go node implementation built using LDK." repository = "https://github.com/lightningdevkit/ldk-node/" readme = "README.md" @@ -39,90 +40,63 @@ default = [] # max_level_trace = [] [dependencies] -lightning = { version = "0.1.4", features = ["std"] } -lightning-types = { version = "0.2.0" } -lightning-invoice = { version = "0.33.0", features = ["std"] } -lightning-net-tokio = { version = "0.1.0" } -lightning-persister = { version = "0.1.0" } -lightning-background-processor = { version = "0.1.0", features = ["futures"] } -lightning-rapid-gossip-sync = { version = "0.1.0" } -lightning-block-sync = { version = "0.1.0", features = ["rpc-client", "tokio"] } -lightning-transaction-sync = { version = "0.1.0", features = ["esplora-async-https", "time", "electrum"] } -lightning-liquidity = { version = "0.1.0", features = ["std"] } - # Alby: see https://aws.github.io/aws-lc-rs/platform_support.html aws-lc-rs = { version = "1", features = ["bindgen"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } -#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["std"] } -#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["futures"] } -#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } -#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["rpc-client", "tokio"] } -#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main", features = ["esplora-async-https", "electrum", "time"] } -#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } - -#lightning = { path = "../rust-lightning/lightning", features = ["std"] } -#lightning-types = { path = "../rust-lightning/lightning-types" } -#lightning-invoice = { path = "../rust-lightning/lightning-invoice", features = ["std"] } -#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } -#lightning-persister = { path = "../rust-lightning/lightning-persister" } -#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor", features = ["futures"] } -#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } -#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync", features = ["rpc-client", "tokio"] } -#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async-https", "electrum", "time"] } -#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity", features = ["std"] } +lightning = { version = "0.2.0-rc1", features = ["std"] } +lightning-types = { version = "0.3.0-rc1" } +lightning-invoice = { version = "0.34.0-rc1", features = ["std"] } +lightning-net-tokio = { version = "0.2.0-rc1" } +lightning-persister = { version = "0.2.0-rc1", features = ["tokio"] } +lightning-background-processor = { version = "0.2.0-rc1" } +lightning-rapid-gossip-sync = { version = "0.2.0-rc1" } +lightning-block-sync = { version = "0.2.0-rc1", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { version = "0.2.0-rc1", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { version = "0.2.0-rc1", features = ["std"] } +lightning-macros = { version = "0.2.0-rc1" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} -bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls"]} -bdk_wallet = { version = "2.0.0", default-features = false, features = ["std", "keys-bip39"]} +bdk_electrum = { version = "0.23.0", default-features = false, features = ["use-rustls-ring"]} +bdk_wallet = { version = "2.2.0", default-features = false, features = ["std", "keys-bip39"]} reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] } rustls = { version = "0.23", default-features = false } rusqlite = { version = "0.31.0", features = ["bundled"] } -bitcoin = "0.32.4" -bip39 = "2.0.0" +bitcoin = "0.32.7" +bip39 = { version = "2.0.0", features = ["rand"] } bip21 = { version = "0.5", features = ["std"], default-features = false } base64 = { version = "0.22.1", default-features = false, features = ["std"] } -rand = "0.8.5" +rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } esplora-client = { version = "0.12", default-features = false, features = ["tokio", "async-https-rustls"] } - -# FIXME: This was introduced to decouple the `bdk_esplora` and -# `lightning-transaction-sync` APIs. We should drop it as part of the upgrade -# to LDK 0.2. -esplora-client_0_11 = { package = "esplora-client", version = "0.11", default-features = false, features = ["tokio", "async-https-rustls"] } -electrum-client = { version = "0.24.0", default-features = true } +electrum-client = { version = "0.24.0", default-features = false, features = ["proxy", "use-rustls-ring"] } libc = "0.2" uniffi = { version = "0.28.3", features = ["build"], optional = true } serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } serde_json = { version = "1.0.128", default-features = false, features = ["std"] } log = { version = "0.4.22", default-features = false, features = ["std"]} -vss-client = "0.3" +vss-client = { package = "vss-client-ng", version = "0.4" } prost = { version = "0.11.6", default-features = false} [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.1.0", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } -#lightning = { path = "../rust-lightning/lightning", features = ["std", "_test_utils"] } +lightning = { version = "0.2.0-rc1", features = ["std", "_test_utils"] } proptest = "1.0.0" regex = "1.5.6" +criterion = { version = "0.7.0", features = ["async_tokio"] } [target.'cfg(not(no_download))'.dev-dependencies] -electrsd = { version = "0.35.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } +electrsd = { version = "0.36.1", default-features = false, features = ["legacy", "esplora_a33e97e1", "corepc-node_27_2"] } [target.'cfg(no_download)'.dev-dependencies] -electrsd = { version = "0.35.0", default-features = false, features = ["legacy"] } -corepc-node = { version = "0.8.0", default-features = false, features = ["27_2"] } +electrsd = { version = "0.36.1", default-features = false, features = ["legacy"] } +corepc-node = { version = "0.10.0", default-features = false, features = ["27_2"] } [target.'cfg(cln_test)'.dev-dependencies] clightningrpc = { version = "0.3.0-beta.8", default-features = false } @@ -131,6 +105,9 @@ clightningrpc = { version = "0.3.0-beta.8", default-features = false } lnd_grpc_rust = { version = "2.10.0", default-features = false } tokio = { version = "1.37", features = ["fs"] } +[target.'cfg(vss_test)'.dev-dependencies] +ldk-node-062 = { package = "ldk-node", version = "=0.6.2" } + [build-dependencies] uniffi = { version = "0.28.3", features = ["build"], optional = true } @@ -152,3 +129,47 @@ check-cfg = [ "cfg(cln_test)", "cfg(lnd_test)", ] + +[[bench]] +name = "payments" +harness = false + +#[patch.crates-io] +#lightning = { path = "../rust-lightning/lightning" } +#lightning-types = { path = "../rust-lightning/lightning-types" } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice" } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor" } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-block-sync = { path = "../rust-lightning/lightning-block-sync" } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } +#lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } +#lightning-macros = { path = "../rust-lightning/lightning-macros" } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } + +#vss-client-ng = { path = "../vss-client" } +#vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } diff --git a/README.md b/README.md index 8907e2a66..54bf1083f 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ LDK Node currently comes with a decidedly opinionated set of design choices: LDK Node itself is written in [Rust][rust] and may therefore be natively added as a library dependency to any `std` Rust program. However, beyond its Rust API it also offers language bindings for [Swift][swift], [Kotlin][kotlin], and [Python][python] based on the [UniFFI](https://github.com/mozilla/uniffi-rs/). Moreover, [Flutter bindings][flutter_bindings] are also available. ## MSRV -The Minimum Supported Rust Version (MSRV) is currently 1.75.0. +The Minimum Supported Rust Version (MSRV) is currently 1.85.0. [api_docs]: https://docs.rs/ldk-node/*/ldk_node/ [api_docs_node]: https://docs.rs/ldk-node/*/ldk_node/struct.Node.html diff --git a/benches/payments.rs b/benches/payments.rs new file mode 100644 index 000000000..ba69e046d --- /dev/null +++ b/benches/payments.rs @@ -0,0 +1,195 @@ +#[path = "../tests/common/mod.rs"] +mod common; + +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use bitcoin::hex::DisplayHex; +use bitcoin::Amount; +use common::{ + expect_channel_ready_event, generate_blocks_and_wait, premine_and_distribute_funds, + setup_bitcoind_and_electrsd, setup_two_nodes_with_store, TestChainSource, +}; +use criterion::{criterion_group, criterion_main, Criterion}; +use ldk_node::{Event, Node}; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use rand::RngCore; +use tokio::task::{self}; + +use crate::common::open_channel_push_amt; + +fn spawn_payment(node_a: Arc, node_b: Arc, amount_msat: u64) { + let mut preimage_bytes = [0u8; 32]; + rand::rng().fill_bytes(&mut preimage_bytes); + let preimage = PaymentPreimage(preimage_bytes); + let payment_hash: PaymentHash = preimage.into(); + + // Spawn each payment as a separate async task + task::spawn(async move { + println!("{}: Starting payment", payment_hash.0.as_hex()); + + loop { + // Pre-check the HTLC slots to try to avoid the performance impact of a failed payment. + while node_a.list_channels()[0].next_outbound_htlc_limit_msat == 0 { + println!("{}: Waiting for HTLC slots to free up", payment_hash.0.as_hex()); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + let payment_id = node_a.spontaneous_payment().send_with_preimage( + amount_msat, + node_b.node_id(), + preimage, + None, + ); + + match payment_id { + Ok(payment_id) => { + println!( + "{}: Awaiting payment with id {}", + payment_hash.0.as_hex(), + payment_id + ); + break; + }, + Err(e) => { + println!("{}: Payment attempt failed: {:?}", payment_hash.0.as_hex(), e); + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + }, + } + } + }); +} + +async fn send_payments(node_a: Arc, node_b: Arc) -> std::time::Duration { + let start = Instant::now(); + + let total_payments = 1000; + let amount_msat = 10_000_000; + + let mut success_count = 0; + for _ in 0..total_payments { + spawn_payment(node_a.clone(), node_b.clone(), amount_msat); + } + + while success_count < total_payments { + match node_a.next_event_async().await { + Event::PaymentSuccessful { payment_id, payment_hash, .. } => { + if let Some(id) = payment_id { + success_count += 1; + println!("{}: Payment with id {:?} completed", payment_hash.0.as_hex(), id); + } else { + println!("Payment completed (no payment_id)"); + } + }, + Event::PaymentFailed { payment_id, payment_hash, .. } => { + println!("{}: Payment {:?} failed", payment_hash.unwrap().0.as_hex(), payment_id); + + // The payment failed, so we need to respawn it. + spawn_payment(node_a.clone(), node_b.clone(), amount_msat); + }, + ref e => { + println!("Received non-payment event: {:?}", e); + }, + } + + node_a.event_handled().unwrap(); + } + + let duration = start.elapsed(); + println!("Time elapsed: {:?}", duration); + + // Send back the money for the next iteration. + let mut preimage_bytes = [0u8; 32]; + rand::rng().fill_bytes(&mut preimage_bytes); + node_b + .spontaneous_payment() + .send_with_preimage( + amount_msat * total_payments, + node_a.node_id(), + PaymentPreimage(preimage_bytes), + None, + ) + .ok() + .unwrap(); + + duration +} + +fn payment_benchmark(c: &mut Criterion) { + // Set up two nodes. Because this is slow, we reuse the same nodes for each sample. + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + let (node_a, node_b) = setup_two_nodes_with_store( + &chain_source, + false, + true, + false, + common::TestStoreType::Sqlite, + ); + + let runtime = + tokio::runtime::Builder::new_multi_thread().worker_threads(4).enable_all().build().unwrap(); + + let node_a = Arc::new(node_a); + let node_b = Arc::new(node_b); + + // Fund the nodes and setup a channel between them. The criterion function cannot be async, so we need to execute + // the setup using a runtime. + let node_a_cloned = Arc::clone(&node_a); + let node_b_cloned = Arc::clone(&node_b); + runtime.block_on(async move { + let address_a = node_a_cloned.onchain_payment().new_address().unwrap(); + let premine_sat = 25_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premine_sat), + ) + .await; + node_a_cloned.sync_wallets().unwrap(); + node_b_cloned.sync_wallets().unwrap(); + open_channel_push_amt( + &node_a_cloned, + &node_b_cloned, + 16_000_000, + Some(1_000_000_000), + false, + &electrsd, + ) + .await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a_cloned.sync_wallets().unwrap(); + node_b_cloned.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a_cloned, node_b_cloned.node_id()); + expect_channel_ready_event!(node_b_cloned, node_a_cloned.node_id()); + }); + + let mut group = c.benchmark_group("payments"); + group.sample_size(10); + + group.bench_function("payments", |b| { + // Use custom timing so that sending back the money at the end of each iteration isn't included in the + // measurement. + b.to_async(&runtime).iter_custom(|iter| { + let node_a = Arc::clone(&node_a); + let node_b = Arc::clone(&node_b); + + async move { + let mut total = Duration::ZERO; + for _i in 0..iter { + let node_a = Arc::clone(&node_a); + let node_b = Arc::clone(&node_b); + + total += send_payments(node_a, node_b).await; + } + total + } + }); + }); +} + +criterion_group!(benches, payment_benchmark); +criterion_main!(benches); diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index d3e4b0865..578c3308b 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -2,4 +2,4 @@ org.gradle.jvmargs=-Xmx1536m android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official -libraryVersion=0.6.2 +libraryVersion=0.6.0 diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index d57989d3e..913b5caea 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,3 +1,3 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official -libraryVersion=0.6.2 +libraryVersion=0.6.0 diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index cd3c5d876..d3c788463 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -1,5 +1,5 @@ namespace ldk_node { - Mnemonic generate_entropy_mnemonic(); + Mnemonic generate_entropy_mnemonic(WordCount? word_count); Config default_config(); }; @@ -12,8 +12,8 @@ dictionary Config { sequence trusted_peers_0conf; u64 probing_liquidity_limit_multiplier; AnchorChannelsConfig? anchor_channels_config; - SendingParameters? sending_parameters; boolean transient_network_graph; + RouteParametersConfig? route_parameters; }; dictionary AnchorChannelsConfig { @@ -45,6 +45,15 @@ dictionary LSPS2ServiceConfig { u32 max_client_to_self_delay; u64 min_payment_size_msat; u64 max_payment_size_msat; + boolean client_trusts_lsp; +}; + +enum WordCount { + "Words12", + "Words15", + "Words18", + "Words21", + "Words24", }; enum LogLevel { @@ -65,7 +74,7 @@ dictionary LogRecord { [Trait, WithForeign] interface LogWriter { - void log(LogRecord record); + void log(LogRecord record); }; interface Builder { @@ -79,8 +88,10 @@ interface Builder { void set_chain_source_esplora(string server_url, EsploraSyncConfig? config); void set_chain_source_electrum(string server_url, ElectrumSyncConfig? config); void set_chain_source_bitcoind_rpc(string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); + void set_chain_source_bitcoind_rest(string rest_host, u16 rest_port, string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); void set_gossip_source_p2p(); void set_gossip_source_rgs(string rgs_server_url); + void set_pathfinding_scores_source(string url); void set_liquidity_source_lsps1(PublicKey node_id, SocketAddress address, string? token); void set_liquidity_source_lsps2(PublicKey node_id, SocketAddress address, string? token); void set_storage_dir_path(string storage_dir_path); @@ -95,6 +106,8 @@ interface Builder { [Throws=BuildError] void set_node_alias(string node_alias); [Throws=BuildError] + void set_async_payments_role(AsyncPaymentsRole? role); + [Throws=BuildError] Node build(); [Throws=BuildError] Node build_with_fs_store(); @@ -141,6 +154,10 @@ interface Node { [Throws=NodeError] UserChannelId open_announced_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] + void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); + [Throws=NodeError] + void splice_out([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, [ByRef]Address address, u64 splice_amount_sats); + [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); @@ -169,19 +186,19 @@ interface Node { [Enum] interface Bolt11InvoiceDescription { - Hash(string hash); - Direct(string description); + Hash(string hash); + Direct(string description); }; interface Bolt11Payment { [Throws=NodeError] - PaymentId send([ByRef]Bolt11Invoice invoice, SendingParameters? sending_parameters); + PaymentId send([ByRef]Bolt11Invoice invoice, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, SendingParameters? sending_parameters); + PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, RouteParametersConfig? route_parameters); [Throws=NodeError] - void send_probes([ByRef]Bolt11Invoice invoice); + void send_probes([ByRef]Bolt11Invoice invoice, RouteParametersConfig? route_parameters); [Throws=NodeError] - void send_probes_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat); + void send_probes_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, RouteParametersConfig? route_parameters); [Throws=NodeError] void claim_for_hash(PaymentHash payment_hash, u64 claimable_amount_msat, PaymentPreimage preimage); [Throws=NodeError] @@ -197,14 +214,18 @@ interface Bolt11Payment { [Throws=NodeError] Bolt11Invoice receive_via_jit_channel(u64 amount_msat, [ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_lsp_fee_limit_msat); [Throws=NodeError] + Bolt11Invoice receive_via_jit_channel_for_hash(u64 amount_msat, [ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_lsp_fee_limit_msat, PaymentHash payment_hash); + [Throws=NodeError] Bolt11Invoice receive_variable_amount_via_jit_channel([ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_proportional_lsp_fee_limit_ppm_msat); + [Throws=NodeError] + Bolt11Invoice receive_variable_amount_via_jit_channel_for_hash([ByRef]Bolt11InvoiceDescription description, u32 expiry_secs, u64? max_proportional_lsp_fee_limit_ppm_msat, PaymentHash payment_hash); }; interface Bolt12Payment { [Throws=NodeError] - PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note); + PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note); + PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); [Throws=NodeError] Offer receive(u64 amount_msat, [ByRef]string description, u32? expiry_secs, u64? quantity); [Throws=NodeError] @@ -212,17 +233,24 @@ interface Bolt12Payment { [Throws=NodeError] Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] - Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note); + Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note, RouteParametersConfig? route_parameters); + [Throws=NodeError] + Offer receive_async(); + [Throws=NodeError] + void set_paths_to_static_invoice_server(bytes paths); + [Throws=NodeError] + bytes blinded_paths_for_async_recipient(bytes recipient_id); }; interface SpontaneousPayment { - // Alby: custom TLV & preimage [Throws=NodeError] - PaymentId send_with_tlvs_and_preimage(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters, sequence custom_tlvs, PaymentPreimage? preimage); - //[Throws=NodeError] - //PaymentId send(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters); - //[Throws=NodeError] - //PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters, sequence custom_tlvs); + PaymentId send(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters); + [Throws=NodeError] + PaymentId send_with_custom_tlvs(u64 amount_msat, PublicKey node_id, RouteParametersConfig? route_parameters, sequence custom_tlvs); + [Throws=NodeError] + PaymentId send_with_preimage(u64 amount_msat, PublicKey node_id, PaymentPreimage preimage, RouteParametersConfig? route_parameters); + [Throws=NodeError] + PaymentId send_with_preimage_and_custom_tlvs(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage preimage, RouteParametersConfig? route_parameters); [Throws=NodeError] void send_probes(u64 amount_msat, PublicKey node_id); }; @@ -250,14 +278,14 @@ interface UnifiedQrPayment { [Throws=NodeError] string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); [Throws=NodeError] - QrPaymentResult send([ByRef]string uri_str); + QrPaymentResult send([ByRef]string uri_str, RouteParametersConfig? route_parameters); }; interface LSPS1Liquidity { [Throws=NodeError] LSPS1OrderStatus request_channel(u64 lsp_balance_sat, u64 client_balance_sat, u32 channel_expiry_blocks, boolean announce_channel); [Throws=NodeError] - LSPS1OrderStatus check_order_status(OrderId order_id); + LSPS1OrderStatus check_order_status(LSPS1OrderId order_id); }; [Error] @@ -275,6 +303,7 @@ enum NodeError { "ProbeSendingFailed", "ChannelCreationFailed", "ChannelClosingFailed", + "ChannelSplicingFailed", "ChannelConfigUpdateFailed", "PersistenceFailed", "FeerateEstimationUpdateFailed", @@ -315,16 +344,18 @@ enum NodeError { "InsufficientFunds", "LiquiditySourceUnavailable", "LiquidityFeeTooHigh", + "InvalidBlindedPaths", + "AsyncPaymentServicesDisabled", }; dictionary NodeStatus { boolean is_running; - boolean is_listening; BestBlock current_best_block; u64? latest_lightning_wallet_sync_timestamp; u64? latest_onchain_wallet_sync_timestamp; u64? latest_fee_rate_cache_update_timestamp; u64? latest_rgs_snapshot_timestamp; + u64? latest_pathfinding_scores_sync_timestamp; u64? latest_node_announcement_broadcast_timestamp; u32? latest_channel_monitor_archival_height; }; @@ -343,6 +374,7 @@ enum BuildError { "InvalidListeningAddresses", "InvalidAnnouncementAddresses", "InvalidNodeAlias", + "RuntimeSetupFailed", "ReadFailed", "WriteFailed", "StoragePathAccessFailed", @@ -350,6 +382,7 @@ enum BuildError { "WalletSetupFailed", "LoggerSetupFailed", "NetworkMismatch", + "AsyncPaymentsConfigMismatch", }; //[Trait] @@ -375,8 +408,10 @@ interface Event { PaymentForwarded(ChannelId prev_channel_id, ChannelId next_channel_id, UserChannelId? prev_user_channel_id, UserChannelId? next_user_channel_id, PublicKey? prev_node_id, PublicKey? next_node_id, u64? total_fee_earned_msat, u64? skimmed_fee_msat, boolean claim_from_onchain_tx, u64? outbound_amount_forwarded_msat); ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); - ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id); + ChannelReady(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, OutPoint? funding_txo); ChannelClosed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey? counterparty_node_id, ClosureReason? reason); + SplicePending(ChannelId channel_id, UserChannelId user_channel_id, PublicKey counterparty_node_id, OutPoint new_funding_txo); + SpliceFailed(ChannelId channel_id, UserChannelId user_channel_id, PublicKey counterparty_node_id, OutPoint? abandoned_funding_txo); }; enum PaymentFailureReason { @@ -395,7 +430,7 @@ enum PaymentFailureReason { [Enum] interface ClosureReason { CounterpartyForceClosed(UntrustedString peer_msg); - HolderForceClosed(boolean? broadcasted_latest_txn); + HolderForceClosed(boolean? broadcasted_latest_txn, string message); LegacyCooperativeClosure(); CounterpartyInitiatedCooperativeClosure(); LocallyInitiatedCooperativeClosure(); @@ -405,8 +440,9 @@ interface ClosureReason { DisconnectedPeer(); OutdatedChannelManager(); CounterpartyCoopClosedUnfundedChannel(); + LocallyCoopClosedUnfundedChannel(); FundingBatchClosure(); - HTLCsTimedOut(); + HTLCsTimedOut( PaymentHash? payment_hash ); PeerFeerateTooLow(u32 peer_feerate_sat_per_kw, u32 required_feerate_sat_per_kw); }; @@ -460,11 +496,11 @@ dictionary PaymentDetails { u64 latest_update_timestamp; }; -dictionary SendingParameters { - MaxTotalRoutingFeeLimit? max_total_routing_fee_msat; - u32? max_total_cltv_expiry_delta; - u8? max_path_count; - u8? max_channel_saturation_power_of_half; +dictionary RouteParametersConfig { + u64? max_total_routing_fee_msat; + u32 max_total_cltv_expiry_delta; + u8 max_path_count; + u8 max_channel_saturation_power_of_half; }; dictionary CustomTlvRecord { @@ -473,13 +509,13 @@ dictionary CustomTlvRecord { }; dictionary LSPS1OrderStatus { - OrderId order_id; - OrderParameters order_params; - PaymentInfo payment_options; - ChannelOrderInfo? channel_state; + LSPS1OrderId order_id; + LSPS1OrderParams order_params; + LSPS1PaymentInfo payment_options; + LSPS1ChannelInfo? channel_state; }; -dictionary OrderParameters { +dictionary LSPS1OrderParams { u64 lsp_balance_sat; u64 client_balance_sat; u16 required_channel_confirmations; @@ -489,22 +525,22 @@ dictionary OrderParameters { boolean announce_channel; }; -dictionary PaymentInfo { - Bolt11PaymentInfo? bolt11; - OnchainPaymentInfo? onchain; +dictionary LSPS1PaymentInfo { + LSPS1Bolt11PaymentInfo? bolt11; + LSPS1OnchainPaymentInfo? onchain; }; -dictionary Bolt11PaymentInfo { - PaymentState state; - DateTime expires_at; +dictionary LSPS1Bolt11PaymentInfo { + LSPS1PaymentState state; + LSPSDateTime expires_at; u64 fee_total_sat; u64 order_total_sat; Bolt11Invoice invoice; }; -dictionary OnchainPaymentInfo { - PaymentState state; - DateTime expires_at; +dictionary LSPS1OnchainPaymentInfo { + LSPS1PaymentState state; + LSPSDateTime expires_at; u64 fee_total_sat; u64 order_total_sat; Address address; @@ -513,34 +549,28 @@ dictionary OnchainPaymentInfo { Address? refund_onchain_address; }; -dictionary ChannelOrderInfo { - DateTime funded_at; +dictionary LSPS1ChannelInfo { + LSPSDateTime funded_at; OutPoint funding_outpoint; - DateTime expires_at; + LSPSDateTime expires_at; }; -enum PaymentState { +enum LSPS1PaymentState { "ExpectPayment", "Paid", "Refunded", }; -[Enum] -interface MaxTotalRoutingFeeLimit { - None (); - Some ( u64 amount_msat ); +[NonExhaustive] +enum Network { + "Bitcoin", + "Testnet", + "Signet", + "Regtest", }; -// [NonExhaustive] -// enum Network { -// "Bitcoin", -// "Testnet", -// "Signet", -// "Regtest", -// }; - -[Custom] -typedef string Network; +// [Custom] +// typedef string Network; dictionary OutPoint { Txid txid; @@ -777,6 +807,11 @@ enum Currency { "Signet", }; +enum AsyncPaymentsRole { + "Client", + "Server", +}; + dictionary RouteHintHop { PublicKey src_node_id; u64 short_channel_id; @@ -808,6 +843,71 @@ interface Bolt11Invoice { PublicKey recover_payee_pub_key(); }; +[Enum] +interface OfferAmount { + Bitcoin(u64 amount_msats); + Currency(string iso4217_code, u64 amount); +}; + +[Traits=(Debug, Display, Eq)] +interface Offer { + [Throws=NodeError, Name=from_str] + constructor([ByRef] string offer_str); + OfferId id(); + boolean is_expired(); + string? offer_description(); + string? issuer(); + OfferAmount? amount(); + boolean is_valid_quantity(u64 quantity); + boolean expects_quantity(); + boolean supports_chain(Network chain); + sequence chains(); + sequence? metadata(); + u64? absolute_expiry_seconds(); + PublicKey? issuer_signing_pubkey(); +}; + +[Traits=(Debug, Display, Eq)] +interface Refund { + [Throws=NodeError, Name=from_str] + constructor([ByRef] string refund_str); + string refund_description(); + u64? absolute_expiry_seconds(); + boolean is_expired(); + string? issuer(); + sequence payer_metadata(); + Network? chain(); + u64 amount_msats(); + u64? quantity(); + PublicKey payer_signing_pubkey(); + string? payer_note(); +}; + +interface Bolt12Invoice { + [Throws=NodeError, Name=from_str] + constructor([ByRef] string invoice_str); + PaymentHash payment_hash(); + u64 amount_msats(); + OfferAmount? amount(); + PublicKey signing_pubkey(); + u64 created_at(); + u64? absolute_expiry_seconds(); + u64 relative_expiry(); + boolean is_expired(); + string? invoice_description(); + string? issuer(); + string? payer_note(); + sequence? metadata(); + u64? quantity(); + sequence signable_hash(); + PublicKey payer_signing_pubkey(); + PublicKey? issuer_signing_pubkey(); + sequence chain(); + sequence>? offer_chains(); + sequence
fallback_addresses(); + sequence encode(); +}; + [Custom] typedef string Txid; @@ -826,15 +926,6 @@ typedef string NodeId; [Custom] typedef string Address; -[Custom] -typedef string Offer; - -[Custom] -typedef string Refund; - -[Custom] -typedef string Bolt12Invoice; - [Custom] typedef string OfferId; @@ -878,7 +969,7 @@ enum MigrateStorage { }; [Custom] -typedef string OrderId; +typedef string LSPS1OrderId; [Custom] -typedef string DateTime; +typedef string LSPSDateTime; diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index f5aa42f22..496781a6a 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.6.2" +version = "0.6.0" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] diff --git a/bindings/uniffi-bindgen/Cargo.toml b/bindings/uniffi-bindgen/Cargo.toml index 951b59f78..a33c0f9ae 100644 --- a/bindings/uniffi-bindgen/Cargo.toml +++ b/bindings/uniffi-bindgen/Cargo.toml @@ -6,4 +6,4 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -uniffi = { version = "0.25.3", features = ["cli"] } +uniffi = { version = "0.28.3", features = ["cli"] } diff --git a/docker-compose.yml b/docker-compose.yml index 425dc129a..e71fd70fb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,10 +13,11 @@ services: "-rpcbind=0.0.0.0", "-rpcuser=user", "-rpcpassword=pass", - "-fallbackfee=0.00001" + "-fallbackfee=0.00001", + "-rest" ] ports: - - "18443:18443" # Regtest RPC port + - "18443:18443" # Regtest REST and RPC port - "18444:18444" # Regtest P2P port networks: - bitcoin-electrs diff --git a/rustfmt.toml b/rustfmt.toml index 4f88472be..4900e142f 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -4,9 +4,16 @@ hard_tabs = true use_field_init_shorthand = true max_width = 100 match_block_trailing_comma = true -# UNSTABLE: format_code_in_doc_comments = true -# UNSTABLE: overflow_delimited_expr = true -# UNSTABLE: comment_width = 100 -# UNSTABLE: format_macro_matchers = true -# UNSTABLE: format_strings = true -# UNSTABLE: group_imports = "StdExternalCrate" +format_code_in_doc_comments = true +comment_width = 100 +format_macro_matchers = true +group_imports = "StdExternalCrate" +reorder_imports = true +imports_granularity = "Module" +normalize_comments = true +normalize_doc_attributes = true +style_edition = "2021" +# TBD: do we want comment and string wrapping? +#wrap_comments = true +#format_strings = true +#overflow_delimited_expr = true diff --git a/scripts/uniffi_bindgen_generate_kotlin_android.sh b/scripts/uniffi_bindgen_generate_kotlin_android.sh index 142e5f75d..c87db5924 100755 --- a/scripts/uniffi_bindgen_generate_kotlin_android.sh +++ b/scripts/uniffi_bindgen_generate_kotlin_android.sh @@ -35,9 +35,9 @@ case "$OSTYPE" in PATH="$ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/$LLVM_ARCH_PATH/bin:$PATH" rustup target add x86_64-linux-android aarch64-linux-android armv7-linux-androideabi -CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="x86_64-linux-android21-clang" CC="x86_64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target x86_64-linux-android || exit 1 -CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="armv7a-linux-androideabi21-clang" CC="armv7a-linux-androideabi21-clang" cargo build --profile release-smaller --features uniffi --target armv7-linux-androideabi || exit 1 -CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="aarch64-linux-android21-clang" CC="aarch64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target aarch64-linux-android || exit 1 +RUSTFLAGS="-C link-args=-Wl,-z,max-page-size=16384,-z,common-page-size=16384" CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="x86_64-linux-android21-clang" CC="x86_64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target x86_64-linux-android || exit 1 +RUSTFLAGS="-C link-args=-Wl,-z,max-page-size=16384,-z,common-page-size=16384" CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="armv7a-linux-androideabi21-clang" CC="armv7a-linux-androideabi21-clang" cargo build --profile release-smaller --features uniffi --target armv7-linux-androideabi || exit 1 +RUSTFLAGS="-C link-args=-Wl,-z,max-page-size=16384,-z,common-page-size=16384" CFLAGS="-D__ANDROID_MIN_SDK_VERSION__=21" AR=llvm-ar CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="aarch64-linux-android21-clang" CC="aarch64-linux-android21-clang" cargo build --profile release-smaller --features uniffi --target aarch64-linux-android || exit 1 $UNIFFI_BINDGEN_BIN generate bindings/ldk_node.udl --language kotlin --config uniffi-android.toml -o "$BINDINGS_DIR"/"$PROJECT_DIR"/lib/src/main/kotlin || exit 1 JNI_LIB_DIR="$BINDINGS_DIR"/"$PROJECT_DIR"/lib/src/main/jniLibs/ diff --git a/src/balance.rs b/src/balance.rs index c5c3e5348..75fac1b90 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -5,19 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::sweep::value_from_descriptor; - -use lightning::chain::channelmonitor::Balance as LdkBalance; -use lightning::chain::channelmonitor::BalanceSource; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{Amount, BlockHash, Txid}; +use lightning::chain::channelmonitor::{Balance as LdkBalance, BalanceSource}; +use lightning::chain::transaction::OutPoint; use lightning::ln::types::ChannelId; +use lightning::sign::SpendableOutputDescriptor; use lightning::util::sweep::{OutputSpendStatus, TrackedSpendableOutput}; - use lightning_types::payment::{PaymentHash, PaymentPreimage}; -use bitcoin::secp256k1::PublicKey; -use bitcoin::{BlockHash, Txid}; -use lightning::chain::transaction::OutPoint; - /// Details of the known available balances returned by [`Node::list_balances`]. /// /// [`Node::list_balances`]: crate::Node::list_balances @@ -75,7 +71,8 @@ pub struct BalanceDetails { pub enum LightningBalance { /// The channel is not yet closed (or the commitment or closing transaction has not yet /// appeared in a block). The given balance is claimable (less on-chain fees) if the channel is - /// force-closed now. + /// force-closed now. Values do not take into account any pending splices and are only based + /// on the confirmed state of the channel. ClaimableOnChannelClose { /// The identifier of the channel this balance belongs to. channel_id: ChannelId, @@ -252,23 +249,28 @@ impl LightningBalance { let OutPoint { txid: funding_tx_id, index: funding_tx_index } = funding_txo; match balance { LdkBalance::ClaimableOnChannelClose { - amount_satoshis, - transaction_fee_satoshis, - outbound_payment_htlc_rounded_msat, - outbound_forwarded_htlc_rounded_msat, - inbound_claiming_htlc_rounded_msat, - inbound_htlc_rounded_msat, - } => Self::ClaimableOnChannelClose { - channel_id, - counterparty_node_id, - funding_tx_id, - funding_tx_index, - amount_satoshis, - transaction_fee_satoshis, + balance_candidates, + confirmed_balance_candidate_index, outbound_payment_htlc_rounded_msat, outbound_forwarded_htlc_rounded_msat, inbound_claiming_htlc_rounded_msat, inbound_htlc_rounded_msat, + } => { + // unwrap safety: confirmed_balance_candidate_index is guaranteed to index into balance_candidates + let balance = balance_candidates.get(confirmed_balance_candidate_index).unwrap(); + + Self::ClaimableOnChannelClose { + channel_id, + counterparty_node_id, + amount_satoshis: balance.amount_satoshis, + transaction_fee_satoshis: balance.transaction_fee_satoshis, + outbound_payment_htlc_rounded_msat, + outbound_forwarded_htlc_rounded_msat, + inbound_claiming_htlc_rounded_msat, + inbound_htlc_rounded_msat, + funding_tx_id, + funding_tx_index, + } }, LdkBalance::ClaimableAwaitingConfirmations { amount_satoshis, @@ -457,3 +459,11 @@ impl PendingSweepBalance { } } } + +fn value_from_descriptor(descriptor: &SpendableOutputDescriptor) -> Amount { + match &descriptor { + SpendableOutputDescriptor::StaticOutput { output, .. } => output.value, + SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.output.value, + SpendableOutputDescriptor::StaticPaymentOutput(output) => output.output.value, + } +} diff --git a/src/builder.rs b/src/builder.rs index bd78ae949..f780ac9bb 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -5,18 +5,61 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL}; -use crate::config::{ - default_user_config, may_announce_channel, AnnounceError, Config, ElectrumSyncConfig, - EsploraSyncConfig, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, +use std::collections::HashMap; +use std::convert::TryInto; +use std::default::Default; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex, Once, RwLock}; +use std::time::SystemTime; +use std::{fmt, fs}; + +use bdk_wallet::template::Bip84; +use bdk_wallet::{KeychainKind, Wallet as BdkWallet}; +use bip39::Mnemonic; +use bitcoin::bip32::{ChildNumber, Xpriv}; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{BlockHash, Network}; +use lightning::chain::{chainmonitor, BestBlock, Watch}; +use lightning::io::Cursor; +use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; +use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; +use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; +use lightning::log_trace; +use lightning::routing::gossip::NodeAlias; +use lightning::routing::router::DefaultRouter; +use lightning::routing::scoring::{ + CombinedScorer, ProbabilisticScorer, ProbabilisticScoringDecayParameters, + ProbabilisticScoringFeeParameters, +}; +use lightning::sign::{EntropySource, NodeSigner}; +use lightning::util::persist::{ + KVStoreSync, CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; +use lightning::util::ser::ReadableArgs; +use lightning::util::sweep::OutputSweeper; +use lightning_persister::fs_store::FilesystemStore; +use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; + +use crate::chain::ChainSource; +use crate::config::{ + default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole, + BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, + DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, WALLET_KEYS_SEED_LEN, +}; use crate::connection::ConnectionManager; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; -use crate::io::sqlite_store::{SqliteStore, SqliteStoreConfig}; -use crate::io::utils::{read_node_metrics, write_node_metrics}; +use crate::io::sqlite_store::SqliteStore; +use crate::io::utils::{ + read_external_pathfinding_scores_from_cache, read_node_metrics, write_node_metrics, +}; use crate::io::vss_store::VssStore; use crate::io::{ self, NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, @@ -29,72 +72,43 @@ use crate::liquidity::{ }; use crate::logger::{log_error, log_info, LdkLogger, LogLevel, LogWriter, Logger}; use crate::message_handler::NodeCustomMessageHandler; +use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; use crate::peer_store::PeerStore; +use crate::runtime::Runtime; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeyValue, KeysManager, - MessageRouter, MigrateStorage, OnionMessenger, PaymentStore, PeerManager, ResetState, + MessageRouter, MigrateStorage, OnionMessenger, PaymentStore, PeerManager, Persister, + ResetState, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; use crate::{Node, NodeMetrics}; - use chrono::Local; -use lightning::chain::{chainmonitor, BestBlock, Watch}; -use lightning::io::Cursor; -use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; -use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; -use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; -use lightning::routing::gossip::NodeAlias; -use lightning::routing::router::DefaultRouter; -use lightning::routing::scoring::{ - ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, -}; -use lightning::sign::EntropySource; - -use lightning::util::persist::{ - read_channel_monitors, KVStore, CHANNEL_MANAGER_PERSISTENCE_KEY, - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, -}; -use lightning::util::ser::ReadableArgs; -use lightning::util::sweep::OutputSweeper; - -use lightning_persister::fs_store::FilesystemStore; - -use bdk_wallet::template::Bip84; -use bdk_wallet::KeychainKind; -use bdk_wallet::Wallet as BdkWallet; - -use bip39::Mnemonic; - -use bitcoin::secp256k1::PublicKey; -use bitcoin::{BlockHash, Network}; - -use bitcoin::bip32::{ChildNumber, Xpriv}; -use std::collections::HashMap; -use std::convert::TryInto; -use std::default::Default; -use std::fmt; -use std::fs; -use std::path::{Path, PathBuf}; -use std::sync::atomic::AtomicBool; -use std::sync::{Arc, Mutex, Once, RwLock}; -use std::time::SystemTime; -use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; const VSS_HARDENED_CHILD_INDEX: u32 = 877; const VSS_LNURL_AUTH_HARDENED_CHILD_INDEX: u32 = 138; const LSPS_HARDENED_CHILD_INDEX: u32 = 577; +const PERSISTER_MAX_PENDING_UPDATES: u64 = 100; #[derive(Debug, Clone)] enum ChainDataSourceConfig { - Esplora { server_url: String, sync_config: Option }, - Electrum { server_url: String, sync_config: Option }, - BitcoindRpc { rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String }, + Esplora { + server_url: String, + headers: HashMap, + sync_config: Option, + }, + Electrum { + server_url: String, + sync_config: Option, + }, + Bitcoind { + rpc_host: String, + rpc_port: u16, + rpc_user: String, + rpc_password: String, + rest_client_config: Option, + }, } #[derive(Debug, Clone)] @@ -110,6 +124,11 @@ enum GossipSourceConfig { RapidGossipSync(String), } +#[derive(Debug, Clone)] +struct PathfindingScoresSyncConfig { + url: String, +} + #[derive(Debug, Clone, Default)] struct LiquiditySourceConfig { // Act as an LSPS1 client connecting to the given service. @@ -162,19 +181,21 @@ pub enum BuildError { InvalidAnnouncementAddresses, /// The provided alias is invalid. InvalidNodeAlias, + /// An attempt to setup a runtime has failed. + RuntimeSetupFailed, /// We failed to read data from the [`KVStore`]. /// - /// [`KVStore`]: lightning::util::persist::KVStore + /// [`KVStore`]: lightning::util::persist::KVStoreSync ReadFailed, /// We failed to write data to the [`KVStore`]. /// - /// [`KVStore`]: lightning::util::persist::KVStore + /// [`KVStore`]: lightning::util::persist::KVStoreSync WriteFailed, /// We failed to access the given `storage_dir_path`. StoragePathAccessFailed, /// We failed to setup our [`KVStore`]. /// - /// [`KVStore`]: lightning::util::persist::KVStore + /// [`KVStore`]: lightning::util::persist::KVStoreSync KVStoreSetupFailed, /// We failed to setup the onchain wallet. WalletSetupFailed, @@ -182,6 +203,8 @@ pub enum BuildError { LoggerSetupFailed, /// The given network does not match the node's previously configured network. NetworkMismatch, + /// The role of the node in an asynchronous payments context is not compatible with the current configuration. + AsyncPaymentsConfigMismatch, } impl fmt::Display for BuildError { @@ -199,6 +222,7 @@ impl fmt::Display for BuildError { Self::InvalidAnnouncementAddresses => { write!(f, "Given announcement addresses are invalid.") }, + Self::RuntimeSetupFailed => write!(f, "Failed to setup a runtime."), Self::ReadFailed => write!(f, "Failed to read from store."), Self::WriteFailed => write!(f, "Failed to write to store."), Self::StoragePathAccessFailed => write!(f, "Failed to access the given storage path."), @@ -209,6 +233,12 @@ impl fmt::Display for BuildError { Self::NetworkMismatch => { write!(f, "Given network does not match the node's previously configured network.") }, + Self::AsyncPaymentsConfigMismatch => { + write!( + f, + "The async payments role is not compatible with the current configuration." + ) + }, } } } @@ -229,10 +259,13 @@ pub struct NodeBuilder { chain_data_source_config: Option, gossip_source_config: Option, liquidity_source_config: Option, - monitors_to_restore: Option>, + monitors_to_restore: Option>, // Alby: for hub recovery with SCB backup file reset_state: Option, migrate_storage: Option, log_writer_config: Option, + async_payments_role: Option, + runtime_handle: Option, + pathfinding_scores_sync_config: Option, } impl NodeBuilder { @@ -252,6 +285,8 @@ impl NodeBuilder { let reset_state = None; let migrate_storage = None; let log_writer_config = None; + let runtime_handle = None; + let pathfinding_scores_sync_config = None; Self { config, entropy_source_config, @@ -262,6 +297,9 @@ impl NodeBuilder { reset_state, migrate_storage, log_writer_config, + runtime_handle, + async_payments_role: None, + pathfinding_scores_sync_config, } } @@ -283,6 +321,16 @@ impl NodeBuilder { self } + /// Configures the [`Node`] instance to (re-)use a specific `tokio` runtime. + /// + /// If not provided, the node will spawn its own runtime or reuse any outer runtime context it + /// can detect. + #[cfg_attr(feature = "uniffi", allow(dead_code))] + pub fn set_runtime(&mut self, runtime_handle: tokio::runtime::Handle) -> &mut Self { + self.runtime_handle = Some(runtime_handle); + self + } + /// Configures the [`Node`] instance to source its wallet entropy from a seed file on disk. /// /// If the given file does not exist a new random seed file will be generated and @@ -316,9 +364,28 @@ impl NodeBuilder { /// information. pub fn set_chain_source_esplora( &mut self, server_url: String, sync_config: Option, + ) -> &mut Self { + self.chain_data_source_config = Some(ChainDataSourceConfig::Esplora { + server_url, + headers: Default::default(), + sync_config, + }); + self + } + + /// Configures the [`Node`] instance to source its chain data from the given Esplora server. + /// + /// The given `headers` will be included in all requests to the Esplora server, typically used for + /// authentication purposes. + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora_with_headers( + &mut self, server_url: String, headers: HashMap, + sync_config: Option, ) -> &mut Self { self.chain_data_source_config = - Some(ChainDataSourceConfig::Esplora { server_url, sync_config }); + Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }); self } @@ -334,13 +401,48 @@ impl NodeBuilder { self } - /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC - /// endpoint. + /// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC. + /// + /// This method establishes an RPC connection that enables all essential chain operations including + /// transaction broadcasting and chain data synchronization. + /// + /// ## Parameters: + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection. pub fn set_chain_source_bitcoind_rpc( &mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) -> &mut Self { - self.chain_data_source_config = - Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }); + self.chain_data_source_config = Some(ChainDataSourceConfig::Bitcoind { + rpc_host, + rpc_port, + rpc_user, + rpc_password, + rest_client_config: None, + }); + self + } + + /// Configures the [`Node`] instance to synchronize chain data from a Bitcoin Core REST endpoint. + /// + /// This method enables chain data synchronization via Bitcoin Core's REST interface. We pass + /// additional RPC configuration to non-REST-supported API calls like transaction broadcasting. + /// + /// ## Parameters: + /// * `rest_host`, `rest_port` - Required parameters for the Bitcoin Core REST connection. + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection + pub fn set_chain_source_bitcoind_rest( + &mut self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, + rpc_user: String, rpc_password: String, + ) -> &mut Self { + self.chain_data_source_config = Some(ChainDataSourceConfig::Bitcoind { + rpc_host, + rpc_port, + rpc_user, + rpc_password, + rest_client_config: Some(BitcoindRestClientConfig { rest_host, rest_port }), + }); + self } @@ -358,6 +460,14 @@ impl NodeBuilder { self } + /// Configures the [`Node`] instance to source its external scores from the given URL. + /// + /// The external scores are merged into the local scoring system to improve routing. + pub fn set_pathfinding_scores_source(&mut self, url: String) -> &mut Self { + self.pathfinding_scores_sync_config = Some(PathfindingScoresSyncConfig { url }); + self + } + /// Configures the [`Node`] instance to source inbound liquidity from the given /// [bLIP-51 / LSPS1] service. /// @@ -494,20 +604,36 @@ impl NodeBuilder { Ok(self) } + /// Sets the role of the node in an asynchronous payments context. + /// + /// See for more information about the async payments protocol. + pub fn set_async_payments_role( + &mut self, role: Option, + ) -> Result<&mut Self, BuildError> { + if let Some(AsyncPaymentsRole::Server) = role { + may_announce_channel(&self.config) + .map_err(|_| BuildError::AsyncPaymentsConfigMismatch)?; + } + + self.async_payments_role = role; + Ok(self) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result { let storage_dir_path = self.config.storage_dir_path.clone(); fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; - let sql_store_config = - SqliteStoreConfig { transient_graph: self.config.transient_network_graph }; + let sql_store_config = io::sqlite_store::SqliteStoreConfig { + transient_graph: self.config.transient_network_graph, + }; let kv_store = Arc::new( - SqliteStore::with_config( + SqliteStore::new( storage_dir_path.into(), Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), - sql_store_config, + Some(sql_store_config), ) .map_err(|_| BuildError::KVStoreSetupFailed)?, ); @@ -621,6 +747,15 @@ impl NodeBuilder { ) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) + } else { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + let seed_bytes = seed_bytes_from_config( &self.config, self.entropy_source_config.as_ref(), @@ -676,21 +811,23 @@ impl NodeBuilder { storage_dir_path.into(), Some(backup_filename), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), + None, ) .map_err(|_| BuildError::KVStoreSetupFailed)?, - ) as Arc); + ) as Arc); } // Alby: use a secondary KV store for non-essential data (not needed by VSS) let storage_dir_path = config.storage_dir_path.clone(); - let sql_store_config = - SqliteStoreConfig { transient_graph: self.config.transient_network_graph }; + let sql_store_config = io::sqlite_store::SqliteStoreConfig { + transient_graph: self.config.transient_network_graph, + }; let secondary_kv_store = Arc::new( - SqliteStore::with_config( + SqliteStore::new( storage_dir_path.into(), Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), - sql_store_config, + Some(sql_store_config), ) .map_err(|_| BuildError::KVStoreSetupFailed)?, ) as Arc; @@ -724,7 +861,7 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; // write value to new store - vss_store.write(primary_namespace, secondary_namespace, key, &value).map_err( + vss_store.write(primary_namespace, secondary_namespace, key, value).map_err( |e| { log_error!(logger, "Failed to migrate value: {}", e); BuildError::KVStoreSetupFailed @@ -773,7 +910,10 @@ impl NodeBuilder { self.chain_data_source_config.as_ref(), self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), + self.pathfinding_scores_sync_config.as_ref(), + self.async_payments_role, seed_bytes, + runtime, logger, Arc::new(vss_store), self.reset_state, @@ -784,6 +924,15 @@ impl NodeBuilder { pub fn build_with_store(&self, kv_store: Arc) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; + let runtime = if let Some(handle) = self.runtime_handle.as_ref() { + Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) + } else { + Arc::new(Runtime::new(Arc::clone(&logger)).map_err(|e| { + log_error!(logger, "Failed to setup tokio runtime: {}", e); + BuildError::RuntimeSetupFailed + })?) + }; + let seed_bytes = seed_bytes_from_config( &self.config, self.entropy_source_config.as_ref(), @@ -795,7 +944,13 @@ impl NodeBuilder { if self.monitors_to_restore.is_some() { let monitors = self.monitors_to_restore.clone().unwrap(); for monitor in monitors { - let result = kv_store.write("monitors", "", &monitor.key, &monitor.value); + let result = KVStoreSync::write( + &*kv_store, + "monitors", + "", + &monitor.key, + monitor.value.clone(), + ); if result.is_err() { log_error!(logger, "Failed to restore monitor: {}", result.unwrap_err()); } @@ -807,7 +962,10 @@ impl NodeBuilder { self.chain_data_source_config.as_ref(), self.gossip_source_config.as_ref(), self.liquidity_source_config.as_ref(), + self.pathfinding_scores_sync_config.as_ref(), + self.async_payments_role, seed_bytes, + runtime, logger, kv_store, self.reset_state, @@ -898,6 +1056,24 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); } + /// Configures the [`Node`] instance to source its chain data from the given Esplora server. + /// + /// The given `headers` will be included in all requests to the Esplora server, typically used for + /// authentication purposes. + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora_with_headers( + &self, server_url: String, headers: HashMap, + sync_config: Option, + ) { + self.inner.write().unwrap().set_chain_source_esplora_with_headers( + server_url, + headers, + sync_config, + ); + } + /// Configures the [`Node`] instance to source its chain data from the given Electrum server. /// /// If no `sync_config` is given, default values are used. See [`ElectrumSyncConfig`] for more @@ -908,8 +1084,14 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config); } - /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC - /// endpoint. + /// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC. + /// + /// This method establishes an RPC connection that enables all essential chain operations including + /// transaction broadcasting and chain data synchronization. + /// + /// ## Parameters: + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection. pub fn set_chain_source_bitcoind_rpc( &self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) { @@ -921,6 +1103,29 @@ impl ArcedNodeBuilder { ); } + /// Configures the [`Node`] instance to synchronize chain data from a Bitcoin Core REST endpoint. + /// + /// This method enables chain data synchronization via Bitcoin Core's REST interface. We pass + /// additional RPC configuration to non-REST-supported API calls like transaction broadcasting. + /// + /// ## Parameters: + /// * `rest_host`, `rest_port` - Required parameters for the Bitcoin Core REST connection. + /// * `rpc_host`, `rpc_port`, `rpc_user`, `rpc_password` - Required parameters for the Bitcoin Core RPC + /// connection + pub fn set_chain_source_bitcoind_rest( + &self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, + rpc_user: String, rpc_password: String, + ) { + self.inner.write().unwrap().set_chain_source_bitcoind_rest( + rest_host, + rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + ); + } + /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer /// network. pub fn set_gossip_source_p2p(&self) { @@ -933,6 +1138,13 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url); } + /// Configures the [`Node`] instance to source its external scores from the given URL. + /// + /// The external scores are merged into the local scoring system to improve routing. + pub fn set_pathfinding_scores_source(&self, url: String) { + self.inner.write().unwrap().set_pathfinding_scores_source(url); + } + /// Configures the [`Node`] instance to source inbound liquidity from the given /// [bLIP-51 / LSPS1] service. /// @@ -1032,6 +1244,13 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) } + /// Sets the role of the node in an asynchronous payments context. + pub fn set_async_payments_role( + &self, role: Option, + ) -> Result<(), BuildError> { + self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self) -> Result, BuildError> { @@ -1125,7 +1344,9 @@ impl ArcedNodeBuilder { fn build_with_store_internal( config: Arc, chain_data_source_config: Option<&ChainDataSourceConfig>, gossip_source_config: Option<&GossipSourceConfig>, - liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64], + liquidity_source_config: Option<&LiquiditySourceConfig>, + pathfinding_scores_sync_config: Option<&PathfindingScoresSyncConfig>, + async_payments_role: Option, seed_bytes: [u8; 64], runtime: Arc, logger: Arc, kv_store: Arc, reset_state: Option, ) -> Result { optionally_install_rustls_cryptoprovider(); @@ -1153,65 +1374,17 @@ fn build_with_store_internal( } // Initialize the status fields. - let is_listening = Arc::new(AtomicBool::new(false)); let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) { Ok(metrics) => Arc::new(RwLock::new(metrics)), Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { Arc::new(RwLock::new(NodeMetrics::default())) } else { + log_error!(logger, "Failed to read node metrics from store: {}", e); return Err(BuildError::ReadFailed); } }, }; - - // Initialize the on-chain wallet and chain access - let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; - - let descriptor = Bip84(xprv, KeychainKind::External); - let change_descriptor = Bip84(xprv, KeychainKind::Internal); - let mut wallet_persister = - KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); - let wallet_opt = BdkWallet::load() - .descriptor(KeychainKind::External, Some(descriptor.clone())) - .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) - .extract_keys() - .check_network(config.network) - .load_wallet(&mut wallet_persister) - .map_err(|e| match e { - bdk_wallet::LoadWithPersistError::InvalidChangeSet( - bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network { - loaded, - expected, - }), - ) => { - log_error!( - logger, - "Failed to setup wallet: Networks do not match. Expected {} but got {}", - expected, - loaded - ); - BuildError::NetworkMismatch - }, - _ => { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - }, - })?; - let bdk_wallet = match wallet_opt { - Some(wallet) => wallet, - None => BdkWallet::create(descriptor, change_descriptor) - .network(config.network) - .create_wallet(&mut wallet_persister) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?, - }; - let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); let fee_estimator = Arc::new(OnchainFeeEstimator::new()); @@ -1223,93 +1396,172 @@ fn build_with_store_internal( Arc::clone(&kv_store), Arc::clone(&logger), )), - Err(_) => { + Err(e) => { + log_error!(logger, "Failed to read payment data from store: {}", e); return Err(BuildError::ReadFailed); }, }; - let wallet = Arc::new(Wallet::new( - bdk_wallet, - wallet_persister, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&payment_store), - Arc::clone(&config), - Arc::clone(&logger), - )); - - let chain_source = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => { - log_info!(logger, "Using esplora server: {}", server_url); + let (chain_source, chain_tip_opt) = match chain_data_source_config { + Some(ChainDataSourceConfig::Esplora { server_url, headers, sync_config }) => { let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); - Arc::new(ChainSource::new_esplora( + ChainSource::new_esplora( server_url.clone(), + headers.clone(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => { let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default()); - Arc::new(ChainSource::new_electrum( + ChainSource::new_electrum( server_url.clone(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, - Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => { - Arc::new(ChainSource::new_bitcoind_rpc( - rpc_host.clone(), - *rpc_port, - rpc_user.clone(), - rpc_password.clone(), - Arc::clone(&wallet), - Arc::clone(&fee_estimator), - Arc::clone(&tx_broadcaster), - Arc::clone(&kv_store), - Arc::clone(&config), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )) + Some(ChainDataSourceConfig::Bitcoind { + rpc_host, + rpc_port, + rpc_user, + rpc_password, + rest_client_config, + }) => match rest_client_config { + Some(rest_client_config) => runtime.block_on(async { + ChainSource::new_bitcoind_rest( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + rest_client_config.clone(), + Arc::clone(&logger), + Arc::clone(&node_metrics), + ) + .await + }), + None => runtime.block_on(async { + ChainSource::new_bitcoind_rpc( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), + Arc::clone(&logger), + Arc::clone(&node_metrics), + ) + .await + }), }, + None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); let sync_config = EsploraSyncConfig::default(); - Arc::new(ChainSource::new_esplora( + ChainSource::new_esplora( server_url.clone(), + HashMap::new(), sync_config, - Arc::clone(&wallet), Arc::clone(&fee_estimator), Arc::clone(&tx_broadcaster), Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), Arc::clone(&node_metrics), - )) + ) }, }; + let chain_source = Arc::new(chain_source); - let runtime = Arc::new(RwLock::new(None)); + // Initialize the on-chain wallet and chain access + let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; - // Initialize the ChainMonitor - let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - Some(Arc::clone(&chain_source)), + let descriptor = Bip84(xprv, KeychainKind::External); + let change_descriptor = Bip84(xprv, KeychainKind::Internal); + let mut wallet_persister = + KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); + let wallet_opt = BdkWallet::load() + .descriptor(KeychainKind::External, Some(descriptor.clone())) + .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) + .extract_keys() + .check_network(config.network) + .load_wallet(&mut wallet_persister) + .map_err(|e| match e { + bdk_wallet::LoadWithPersistError::InvalidChangeSet( + bdk_wallet::LoadError::Mismatch(bdk_wallet::LoadMismatch::Network { + loaded, + expected, + }), + ) => { + log_error!( + logger, + "Failed to setup wallet: Networks do not match. Expected {} but got {}", + expected, + loaded + ); + BuildError::NetworkMismatch + }, + _ => { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + }, + })?; + let bdk_wallet = match wallet_opt { + Some(wallet) => wallet, + None => { + let mut wallet = BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?; + + if let Some(best_block) = chain_tip_opt { + // Insert the first checkpoint if we have it, to avoid resyncing from genesis. + // TODO: Use a proper wallet birthday once BDK supports it. + let mut latest_checkpoint = wallet.latest_checkpoint(); + let block_id = + bdk_chain::BlockId { height: best_block.height, hash: best_block.block_hash }; + latest_checkpoint = latest_checkpoint.insert(block_id); + let update = + bdk_wallet::Update { chain: Some(latest_checkpoint), ..Default::default() }; + wallet.apply_update(update).map_err(|e| { + log_error!(logger, "Failed to apply checkpoint during wallet setup: {}", e); + BuildError::WalletSetupFailed + })?; + } + wallet + }, + }; + + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, Arc::clone(&tx_broadcaster), - Arc::clone(&logger), Arc::clone(&fee_estimator), - Arc::clone(&kv_store), + Arc::clone(&payment_store), + Arc::clone(&config), + Arc::clone(&logger), )); // Initialize the KeysManager @@ -1327,6 +1579,41 @@ fn build_with_store_internal( Arc::clone(&logger), )); + let peer_storage_key = keys_manager.get_peer_storage_key(); + let persister = Arc::new(Persister::new( + Arc::clone(&kv_store), + Arc::clone(&logger), + PERSISTER_MAX_PENDING_UPDATES, + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + )); + + // Read ChannelMonitor state from store + let channel_monitors = match persister.read_all_channel_monitors_with_updates() { + Ok(monitors) => monitors, + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + Vec::new() + } else { + log_error!(logger, "Failed to read channel monitors from store: {}", e.to_string()); + return Err(BuildError::ReadFailed); + } + }, + }; + + // Initialize the ChainMonitor + let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( + Some(Arc::clone(&chain_source)), + Arc::clone(&tx_broadcaster), + Arc::clone(&logger), + Arc::clone(&fee_estimator), + Arc::clone(&persister), + Arc::clone(&keys_manager), + peer_storage_key, + )); + // Initialize the network graph, scorer, and router let network_graph = match io::utils::read_network_graph(Arc::clone(&kv_store), Arc::clone(&logger)) { @@ -1335,31 +1622,46 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(Graph::new(config.network.into(), Arc::clone(&logger))) } else { + log_error!(logger, "Failed to read network graph from store: {}", e); return Err(BuildError::ReadFailed); } }, }; - let scorer = match io::utils::read_scorer( + let local_scorer = match io::utils::read_scorer( Arc::clone(&kv_store), Arc::clone(&network_graph), Arc::clone(&logger), ) { - Ok(scorer) => Arc::new(Mutex::new(scorer)), + Ok(scorer) => scorer, Err(e) => { if e.kind() == std::io::ErrorKind::NotFound { let params = ProbabilisticScoringDecayParameters::default(); - Arc::new(Mutex::new(ProbabilisticScorer::new( - params, - Arc::clone(&network_graph), - Arc::clone(&logger), - ))) + ProbabilisticScorer::new(params, Arc::clone(&network_graph), Arc::clone(&logger)) } else { + log_error!(logger, "Failed to read scoring data from store: {}", e); return Err(BuildError::ReadFailed); } }, }; + let scorer = Arc::new(Mutex::new(CombinedScorer::new(local_scorer))); + + // Restore external pathfinding scores from cache if possible. + match read_external_pathfinding_scores_from_cache(Arc::clone(&kv_store), Arc::clone(&logger)) { + Ok(external_scores) => { + scorer.lock().unwrap().merge(external_scores, cur_time); + log_trace!(logger, "External scores from cache merged successfully"); + }, + Err(e) => { + if e.kind() != std::io::ErrorKind::NotFound { + log_error!(logger, "Error while reading external scores from cache: {}", e); + return Err(BuildError::ReadFailed); + } + }, + } + + // let scoring_fee_params = ProbabilisticScoringFeeParameters::default(); let scoring_fee_params = ProbabilisticScoringFeeParameters { // Alby: Penalize longer routes https://blog.mutinywallet.com/fixing-payment-reliability/ // * 4 recommended by BlueMatt // https://github.com/lightningdevkit/rust-lightning/issues/3040 @@ -1378,35 +1680,7 @@ fn build_with_store_internal( scoring_fee_params, )); - // Read ChannelMonitor state from store - let channel_monitors = match read_channel_monitors( - Arc::clone(&kv_store), - Arc::clone(&keys_manager), - Arc::clone(&keys_manager), - ) { - Ok(monitors) => monitors, - Err(e) => { - if e.kind() == lightning::io::ErrorKind::NotFound { - Vec::new() - } else { - log_error!(logger, "Failed to read channel monitors: {}", e.to_string()); - return Err(BuildError::ReadFailed); - } - }, - }; - let mut user_config = default_user_config(&config); - if liquidity_source_config.and_then(|lsc| lsc.lsps2_client.as_ref()).is_some() { - // Generally allow claiming underpaying HTLCs as the LSP will skim off some fee. We'll - // check that they don't take too much before claiming. - user_config.channel_config.accept_underpaying_htlcs = true; - - // FIXME: When we're an LSPS2 client, set maximum allowed inbound HTLC value in flight - // to 100%. We should eventually be able to set this on a per-channel basis, but for - // now we just bump the default for all channels. - user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; - } if liquidity_source_config.and_then(|lsc| lsc.lsps2_service.as_ref()).is_some() { // If we act as an LSPS2 service, we need to to be able to intercept HTLCs and forward the @@ -1425,12 +1699,23 @@ fn build_with_store_internal( // Alby: always allow receiving 100% of channel size. user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + if let Some(role) = async_payments_role { + match role { + AsyncPaymentsRole::Server => { + user_config.accept_forwards_to_priv_channels = true; + user_config.enable_htlc_hold = true; + }, + AsyncPaymentsRole::Client => user_config.hold_outbound_htlcs_at_next_hop = true, + } + } + let message_router = Arc::new(MessageRouter::new(Arc::clone(&network_graph), Arc::clone(&keys_manager))); // Initialize the ChannelManager let channel_manager = { - if let Ok(res) = kv_store.read( + if let Ok(res) = KVStoreSync::read( + &*kv_store, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -1453,19 +1738,16 @@ fn build_with_store_internal( ); let (_hash, channel_manager) = <(BlockHash, ChannelManager)>::read(&mut reader, read_args).map_err(|e| { - log_error!(logger, "Failed to read channel manager from KVStore: {}", e); + log_error!(logger, "Failed to read channel manager from store: {}", e); BuildError::ReadFailed })?; channel_manager } else { // We're starting a fresh node. - let genesis_block_hash = - bitcoin::blockdata::constants::genesis_block(config.network).block_hash(); + let best_block = + chain_tip_opt.unwrap_or_else(|| BestBlock::from_network(config.network)); - let chain_params = ChainParameters { - network: config.network.into(), - best_block: BestBlock::new(genesis_block_hash, 0), - }; + let chain_params = ChainParameters { network: config.network.into(), best_block }; channelmanager::ChannelManager::new( Arc::clone(&fee_estimator), Arc::clone(&chain_monitor), @@ -1487,25 +1769,40 @@ fn build_with_store_internal( // Give ChannelMonitors to ChainMonitor for (_blockhash, channel_monitor) in channel_monitors.into_iter() { - let funding_outpoint = channel_monitor.get_funding_txo().0; - chain_monitor.watch_channel(funding_outpoint, channel_monitor).map_err(|e| { + let channel_id = channel_monitor.channel_id(); + chain_monitor.watch_channel(channel_id, channel_monitor).map_err(|e| { log_error!(logger, "Failed to watch channel monitor: {:?}", e); BuildError::InvalidChannelMonitor })?; } // Initialize the PeerManager - let onion_messenger: Arc = Arc::new(OnionMessenger::new( - Arc::clone(&keys_manager), - Arc::clone(&keys_manager), - Arc::clone(&logger), - Arc::clone(&channel_manager), - message_router, - Arc::clone(&channel_manager), - IgnoringMessageHandler {}, - IgnoringMessageHandler {}, - IgnoringMessageHandler {}, - )); + let onion_messenger: Arc = + if let Some(AsyncPaymentsRole::Server) = async_payments_role { + Arc::new(OnionMessenger::new_with_offline_peer_interception( + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&logger), + Arc::clone(&channel_manager), + message_router, + Arc::clone(&channel_manager), + Arc::clone(&channel_manager), + IgnoringMessageHandler {}, + IgnoringMessageHandler {}, + )) + } else { + Arc::new(OnionMessenger::new( + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&logger), + Arc::clone(&channel_manager), + message_router, + Arc::clone(&channel_manager), + Arc::clone(&channel_manager), + IgnoringMessageHandler {}, + IgnoringMessageHandler {}, + )) + }; let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes(); // Initialize the GossipSource @@ -1568,6 +1865,8 @@ fn build_with_store_internal( Arc::clone(&channel_manager), Arc::clone(&keys_manager), Arc::clone(&chain_source), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), ); @@ -1601,7 +1900,8 @@ fn build_with_store_internal( liquidity_source_builder.lsps2_service(promise_secret, config.clone()) }); - let liquidity_source = Arc::new(liquidity_source_builder.build()); + let liquidity_source = runtime + .block_on(async move { liquidity_source_builder.build().await.map(Arc::new) })?; let custom_message_handler = Arc::new(NodeCustomMessageHandler::new_liquidity(Arc::clone(&liquidity_source))); (Some(liquidity_source), custom_message_handler) @@ -1616,6 +1916,7 @@ fn build_with_store_internal( as Arc, onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, + send_only_message_handler: Arc::clone(&chain_monitor), }, GossipSync::Rapid(_) => MessageHandler { chan_handler: Arc::clone(&channel_manager), @@ -1623,6 +1924,7 @@ fn build_with_store_internal( as Arc, onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, + send_only_message_handler: Arc::clone(&chain_monitor), }, GossipSync::None => { unreachable!("We must always have a gossip sync!"); @@ -1678,25 +1980,12 @@ fn build_with_store_internal( Arc::clone(&logger), )) } else { + log_error!(logger, "Failed to read output sweeper data from store: {}", e); return Err(BuildError::ReadFailed); } }, }; - match io::utils::migrate_deprecated_spendable_outputs( - Arc::clone(&output_sweeper), - Arc::clone(&kv_store), - Arc::clone(&logger), - ) { - Ok(()) => { - log_info!(logger, "Successfully migrated OutputSweeper data."); - }, - Err(e) => { - log_error!(logger, "Failed to migrate OutputSweeper data: {}", e); - return Err(BuildError::ReadFailed); - }, - } - let event_queue = match io::utils::read_event_queue(Arc::clone(&kv_store), Arc::clone(&logger)) { Ok(event_queue) => Arc::new(event_queue), @@ -1704,6 +1993,7 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(EventQueue::new(Arc::clone(&kv_store), Arc::clone(&logger))) } else { + log_error!(logger, "Failed to read event queue from store: {}", e); return Err(BuildError::ReadFailed); } }, @@ -1715,26 +2005,33 @@ fn build_with_store_internal( if e.kind() == std::io::ErrorKind::NotFound { Arc::new(PeerStore::new(Arc::clone(&kv_store), Arc::clone(&logger))) } else { + log_error!(logger, "Failed to read peer data from store: {}", e); return Err(BuildError::ReadFailed); } }, }; + let om_mailbox = if let Some(AsyncPaymentsRole::Server) = async_payments_role { + Some(Arc::new(OnionMessageMailbox::new())) + } else { + None + }; + let (stop_sender, _) = tokio::sync::watch::channel(()); - let background_processor_task = Mutex::new(None); - let background_tasks = Mutex::new(None); - let cancellable_background_tasks = Mutex::new(None); + let (background_processor_stop_sender, _) = tokio::sync::watch::channel(()); + let is_running = Arc::new(RwLock::new(false)); + + let pathfinding_scores_sync_url = pathfinding_scores_sync_config.map(|c| c.url.clone()); Ok(Node { runtime, stop_sender, - background_processor_task, - background_tasks, - cancellable_background_tasks, + background_processor_stop_sender, config, wallet, chain_source, tx_broadcaster, + fee_estimator, event_queue, channel_manager, chain_monitor, @@ -1745,6 +2042,7 @@ fn build_with_store_internal( keys_manager, network_graph, gossip_source, + pathfinding_scores_sync_url, liquidity_source, kv_store, logger, @@ -1752,8 +2050,10 @@ fn build_with_store_internal( scorer, peer_store, payment_store, - is_listening, + is_running, node_metrics, + om_mailbox, + async_payments_role, }) } @@ -1765,7 +2065,7 @@ fn optionally_install_rustls_cryptoprovider() { INIT_CRYPTO.call_once(|| { // Ensure we always install a `CryptoProvider` for `rustls` if it was somehow not previously installed by now. if rustls::crypto::CryptoProvider::get_default().is_none() { - let _ = rustls::crypto::aws_lc_rs::default_provider().install_default(); + let _ = rustls::crypto::ring::default_provider().install_default(); } // Refuse to startup without TLS support. Better to catch it now than even later at runtime. @@ -1855,7 +2155,8 @@ fn reset_persistent_state(logger: Arc, kv_store: Arc, what: Re }; if node_metrics { - let result = kv_store.remove( + let result = KVStoreSync::remove( + &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, @@ -1867,7 +2168,8 @@ fn reset_persistent_state(logger: Arc, kv_store: Arc, what: Re } if scorer { - let result = kv_store.remove( + let result = KVStoreSync::remove( + &*kv_store, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, @@ -1879,7 +2181,8 @@ fn reset_persistent_state(logger: Arc, kv_store: Arc, what: Re } if network_graph { - let result = kv_store.remove( + let result = KVStoreSync::remove( + &*kv_store, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs new file mode 100644 index 000000000..b3d7880d6 --- /dev/null +++ b/src/chain/bitcoind.rs @@ -0,0 +1,1642 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use base64::prelude::BASE64_STANDARD; +use base64::Engine; +use bitcoin::{BlockHash, FeeRate, Network, Transaction, Txid}; +use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; +use lightning::chain::{BestBlock, Listen}; +use lightning::util::ser::Writeable; +use lightning_block_sync::gossip::UtxoSource; +use lightning_block_sync::http::{HttpEndpoint, JsonResponse}; +use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; +use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; +use lightning_block_sync::rest::RestClient; +use lightning_block_sync::rpc::{RpcClient, RpcError}; +use lightning_block_sync::{ + AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceErrorKind, Cache, + SpvClient, +}; +use serde::Serialize; + +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use crate::config::{ + BitcoindRestClientConfig, Config, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, +}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + ConfirmationTarget, OnchainFeeEstimator, +}; +use crate::io::utils::write_node_metrics; +use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; + +const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; +const CHAIN_POLLING_TIMEOUT_SECS: u64 = 10; + +pub(super) struct BitcoindChainSource { + api_client: Arc, + header_cache: tokio::sync::Mutex, + latest_chain_tip: RwLock>, + wallet_polling_status: Mutex, + fee_estimator: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl BitcoindChainSource { + pub(crate) fn new_rpc( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rpc( + rpc_host.clone(), + rpc_port.clone(), + rpc_user.clone(), + rpc_password.clone(), + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + Self { + api_client, + header_cache, + latest_chain_tip, + wallet_polling_status, + fee_estimator, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(crate) fn new_rest( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + fee_estimator: Arc, kv_store: Arc, config: Arc, + rest_client_config: BitcoindRestClientConfig, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let api_client = Arc::new(BitcoindClient::new_rest( + rest_client_config.rest_host, + rest_client_config.rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + )); + + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + + Self { + api_client, + header_cache, + latest_chain_tip, + wallet_polling_status, + fee_estimator, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } + + pub(super) fn as_utxo_source(&self) -> Arc { + self.api_client.utxo_source() + } + + pub(super) async fn continuously_sync_wallets( + &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, + ) { + // First register for the wallet polling status to make sure `Node::sync_wallets` calls + // wait on the result before proceeding. + { + let mut status_lock = self.wallet_polling_status.lock().unwrap(); + if status_lock.register_or_subscribe_pending_sync().is_some() { + debug_assert!(false, "Sync already in progress. This should never happen."); + } + } + + log_info!( + self.logger, + "Starting initial synchronization of chain listeners. This might take a while..", + ); + + let mut backoff = CHAIN_POLLING_INTERVAL_SECS; + const MAX_BACKOFF_SECS: u64 = 300; + + loop { + // if the stop_sync_sender has been dropped, we should just exit + if stop_sync_receiver.has_changed().unwrap_or(true) { + log_trace!(self.logger, "Stopping initial chain sync."); + return; + } + + let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; + let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; + let onchain_wallet_best_block_hash = onchain_wallet.current_best_block().block_hash; + + let mut chain_listeners = vec![ + (onchain_wallet_best_block_hash, &*onchain_wallet as &(dyn Listen + Send + Sync)), + (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), + (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), + ]; + + // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s + // before giving them to `ChainMonitor` it the first place. However, this isn't + // trivial as we load them on initialization (in the `Builder`) and only gain + // network access during `start`. For now, we just make sure we get the worst known + // block hash and sychronize them via `ChainMonitor`. + if let Some(worst_channel_monitor_block_hash) = chain_monitor + .list_monitors() + .iter() + .flat_map(|channel_id| chain_monitor.get_monitor(*channel_id)) + .map(|m| m.current_best_block()) + .min_by_key(|b| b.height) + .map(|b| b.block_hash) + { + chain_listeners.push(( + worst_channel_monitor_block_hash, + &*chain_monitor as &(dyn Listen + Send + Sync), + )); + } + + let mut locked_header_cache = self.header_cache.lock().await; + let now = SystemTime::now(); + match synchronize_listeners( + self.api_client.as_ref(), + self.config.network, + &mut *locked_header_cache, + chain_listeners.clone(), + ) + .await + { + Ok(chain_tip) => { + { + log_info!( + self.logger, + "Finished synchronizing listeners in {}ms", + now.elapsed().unwrap().as_millis() + ); + *self.latest_chain_tip.write().unwrap() = Some(chain_tip); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); + } + break; + }, + + Err(e) => { + log_error!(self.logger, "Failed to synchronize chain listeners: {:?}", e); + if e.kind() == BlockSourceErrorKind::Transient { + log_info!( + self.logger, + "Transient error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + backoff + ); + // Sleep with stop signal check to allow immediate shutdown + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping initial chain sync.", + ); + return; + } + _ = tokio::time::sleep(Duration::from_secs(backoff)) => {} + } + backoff = std::cmp::min(backoff * 2, MAX_BACKOFF_SECS); + } else { + log_error!( + self.logger, + "Persistent error syncing chain listeners: {:?}. Retrying in {} seconds.", + e, + MAX_BACKOFF_SECS + ); + // Sleep with stop signal check to allow immediate shutdown + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping initial chain sync during backoff.", + ); + return; + } + _ = tokio::time::sleep(Duration::from_secs(MAX_BACKOFF_SECS)) => {} + } + } + }, + } + } + + // Now propagate the initial result to unblock waiting subscribers. + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + log_info!(self.logger, "Starting continuous polling for chain updates."); + + // Start the polling loop. + let mut last_best_block_hash = None; + loop { + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = chain_polling_interval.tick() => { + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = self.poll_and_update_listeners( + Arc::clone(&onchain_wallet), + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&output_sweeper) + ) => {} + } + } + _ = fee_rate_update_interval.tick() => { + if last_best_block_hash != Some(channel_manager.current_best_block().block_hash) { + tokio::select! { + biased; + _ = stop_sync_receiver.changed() => { + log_trace!( + self.logger, + "Stopping polling for new chain data.", + ); + return; + } + update_res = self.update_fee_rate_estimates() => { + if update_res.is_ok() { + last_best_block_hash = Some(channel_manager.current_best_block().block_hash); + } + } + } + } + } + } + } + } + + pub(super) async fn poll_best_block(&self) -> Result { + self.poll_chain_tip().await.map(|tip| tip.to_best_block()) + } + + async fn poll_chain_tip(&self) -> Result { + let validate_res = tokio::time::timeout( + Duration::from_secs(CHAIN_POLLING_TIMEOUT_SECS), + validate_best_block_header(self.api_client.as_ref()), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + Error::TxSyncTimeout + })?; + + match validate_res { + Ok(tip) => { + *self.latest_chain_tip.write().unwrap() = Some(tip); + Ok(tip) + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + return Err(Error::TxSyncFailed); + }, + } + } + + pub(super) async fn poll_and_update_listeners( + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.wallet_polling_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet polling result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let res = self + .poll_and_update_listeners_inner( + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, + ) + .await; + + self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn poll_and_update_listeners_inner( + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, + ) -> Result<(), Error> { + let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); + let chain_tip = + if let Some(tip) = latest_chain_tip_opt { tip } else { self.poll_chain_tip().await? }; + + let mut locked_header_cache = self.header_cache.lock().await; + let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); + let chain_listener = ChainListener { + onchain_wallet: Arc::clone(&onchain_wallet), + channel_manager: Arc::clone(&channel_manager), + chain_monitor: Arc::clone(&chain_monitor), + output_sweeper, + }; + let mut spv_client = + SpvClient::new(chain_tip, chain_poller, &mut *locked_header_cache, &chain_listener); + + let now = SystemTime::now(); + match spv_client.poll_best_tip().await { + Ok((ChainTip::Better(tip), true)) => { + log_trace!( + self.logger, + "Finished polling best tip in {}ms", + now.elapsed().unwrap().as_millis() + ); + *self.latest_chain_tip.write().unwrap() = Some(tip); + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + chain_monitor, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + }, + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to poll for chain data: {:?}", e); + return Err(Error::TxSyncFailed); + }, + } + + let cur_height = channel_manager.current_best_block().height; + + let now = SystemTime::now(); + let bdk_unconfirmed_txids = onchain_wallet.get_unconfirmed_txids(); + match self + .api_client + .get_updated_mempool_transactions(cur_height, bdk_unconfirmed_txids) + .await + { + Ok((unconfirmed_txs, evicted_txids)) => { + log_trace!( + self.logger, + "Finished polling mempool of size {} and {} evicted transactions in {}ms", + unconfirmed_txs.len(), + evicted_txids.len(), + now.elapsed().unwrap().as_millis() + ); + onchain_wallet.apply_mempool_txs(unconfirmed_txs, evicted_txids).unwrap_or_else( + |e| { + log_error!(self.logger, "Failed to apply mempool transactions: {:?}", e); + }, + ); + }, + Err(e) => { + log_error!(self.logger, "Failed to poll for mempool transactions: {:?}", e); + return Err(Error::TxSyncFailed); + }, + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + + Ok(()) + } + + pub(super) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + macro_rules! get_fee_rate_update { + ($estimation_fut:expr) => {{ + let update_res = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + $estimation_fut, + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })?; + update_res + }}; + } + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + let now = Instant::now(); + for target in confirmation_targets { + let fee_rate_update_res = match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, + ) => { + let estimation_fut = self.api_client.get_mempool_minimum_fee_rate(); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::MaximumFeeEstimate) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning(LdkConfirmationTarget::UrgentOnChainSweep) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + _ => { + // Otherwise, we default to economical block-target estimate. + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Economical; + let estimation_fut = + self.api_client.get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + }; + + let fee_rate = match (fee_rate_update_res, self.config.network) { + (Ok(rate), _) => rate, + (Err(e), Network::Bitcoin) => { + // Strictly fail on mainnet. + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + return Err(Error::FeerateEstimationUpdateFailed); + }, + (Err(e), n) if n == Network::Regtest || n == Network::Signet => { + // On regtest/signet we just fall back to the usual 1 sat/vb == 250 + // sat/kwu default. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", + e, + ); + FeeRate::from_sat_per_kwu(250) + }, + (Err(e), _) => { + // On testnet `estimatesmartfee` can be unreliable so we just skip in + // case of a failure, which will have us falling back to defaults. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", + e, + ); + return Ok(()); + }, + }; + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + self.logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + if self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { + // We only log if the values changed, as it might be very spammy otherwise. + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + Ok(()) + } + + pub(crate) async fn process_broadcast_package(&self, package: Vec) { + // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 + // features, we should eventually switch to use `submitpackage` via the + // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual + // transactions. + for tx in &package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.api_client.broadcast_transaction(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(id) => { + debug_assert_eq!(id, txid); + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); + }, + Err(e) => { + log_error!(self.logger, "Failed to broadcast transaction {}: {}", txid, e); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } +} + +pub enum BitcoindClient { + Rpc { + rpc_client: Arc, + latest_mempool_timestamp: AtomicU64, + mempool_entries_cache: tokio::sync::Mutex>, + mempool_txs_cache: tokio::sync::Mutex>, + }, + Rest { + rest_client: Arc, + rpc_client: Arc, + latest_mempool_timestamp: AtomicU64, + mempool_entries_cache: tokio::sync::Mutex>, + mempool_txs_cache: tokio::sync::Mutex>, + }, +} + +impl BitcoindClient { + /// Creates a new RPC API client for the chain interactions with Bitcoin Core. + pub(crate) fn new_rpc(host: String, port: u16, rpc_user: String, rpc_password: String) -> Self { + let http_endpoint = endpoint(host, port); + let rpc_credentials = rpc_credentials(rpc_user, rpc_password); + + let rpc_client = Arc::new(RpcClient::new(&rpc_credentials, http_endpoint)); + + let latest_mempool_timestamp = AtomicU64::new(0); + + let mempool_entries_cache = tokio::sync::Mutex::new(HashMap::new()); + let mempool_txs_cache = tokio::sync::Mutex::new(HashMap::new()); + Self::Rpc { rpc_client, latest_mempool_timestamp, mempool_entries_cache, mempool_txs_cache } + } + + /// Creates a new, primarily REST API client for the chain interactions + /// with Bitcoin Core. + /// + /// Aside the required REST host and port, we provide RPC configuration + /// options for necessary calls not supported by the REST interface. + pub(crate) fn new_rest( + rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, rpc_user: String, + rpc_password: String, + ) -> Self { + let rest_endpoint = endpoint(rest_host, rest_port).with_path("/rest".to_string()); + let rest_client = Arc::new(RestClient::new(rest_endpoint)); + + let rpc_endpoint = endpoint(rpc_host, rpc_port); + let rpc_credentials = rpc_credentials(rpc_user, rpc_password); + let rpc_client = Arc::new(RpcClient::new(&rpc_credentials, rpc_endpoint)); + + let latest_mempool_timestamp = AtomicU64::new(0); + + let mempool_entries_cache = tokio::sync::Mutex::new(HashMap::new()); + let mempool_txs_cache = tokio::sync::Mutex::new(HashMap::new()); + + Self::Rest { + rest_client, + rpc_client, + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + } + } + + pub(crate) fn utxo_source(&self) -> Arc { + match self { + BitcoindClient::Rpc { rpc_client, .. } => Arc::clone(rpc_client) as Arc, + BitcoindClient::Rest { rest_client, .. } => { + Arc::clone(rest_client) as Arc + }, + } + } + + /// Broadcasts the provided transaction. + pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::broadcast_transaction_inner(Arc::clone(rpc_client), tx).await + }, + BitcoindClient::Rest { rpc_client, .. } => { + // Bitcoin Core's REST interface does not support broadcasting transactions + // so we use the RPC client. + Self::broadcast_transaction_inner(Arc::clone(rpc_client), tx).await + }, + } + } + + async fn broadcast_transaction_inner( + rpc_client: Arc, tx: &Transaction, + ) -> std::io::Result { + let tx_serialized = bitcoin::consensus::encode::serialize_hex(tx); + let tx_json = serde_json::json!(tx_serialized); + rpc_client.call_method::("sendrawtransaction", &[tx_json]).await + } + + /// Retrieve the fee estimate needed for a transaction to begin + /// confirmation within the provided `num_blocks`. + pub(crate) async fn get_fee_estimate_for_target( + &self, num_blocks: usize, estimation_mode: FeeRateEstimationMode, + ) -> std::io::Result { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_fee_estimate_for_target_inner( + Arc::clone(rpc_client), + num_blocks, + estimation_mode, + ) + .await + }, + BitcoindClient::Rest { rpc_client, .. } => { + // We rely on the internal RPC client to make this call, as this + // operation is not supported by Bitcoin Core's REST interface. + Self::get_fee_estimate_for_target_inner( + Arc::clone(rpc_client), + num_blocks, + estimation_mode, + ) + .await + }, + } + } + + /// Estimate the fee rate for the provided target number of blocks. + async fn get_fee_estimate_for_target_inner( + rpc_client: Arc, num_blocks: usize, estimation_mode: FeeRateEstimationMode, + ) -> std::io::Result { + let num_blocks_json = serde_json::json!(num_blocks); + let estimation_mode_json = serde_json::json!(estimation_mode); + rpc_client + .call_method::( + "estimatesmartfee", + &[num_blocks_json, estimation_mode_json], + ) + .await + .map(|resp| resp.0) + } + + /// Gets the mempool minimum fee rate. + pub(crate) async fn get_mempool_minimum_fee_rate(&self) -> std::io::Result { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_mempool_minimum_fee_rate_rpc(Arc::clone(rpc_client)).await + }, + BitcoindClient::Rest { rest_client, .. } => { + Self::get_mempool_minimum_fee_rate_rest(Arc::clone(rest_client)).await + }, + } + } + + /// Get the mempool minimum fee rate via RPC interface. + async fn get_mempool_minimum_fee_rate_rpc( + rpc_client: Arc, + ) -> std::io::Result { + rpc_client + .call_method::("getmempoolinfo", &[]) + .await + .map(|resp| resp.0) + } + + /// Get the mempool minimum fee rate via REST interface. + async fn get_mempool_minimum_fee_rate_rest( + rest_client: Arc, + ) -> std::io::Result { + rest_client + .request_resource::("mempool/info.json") + .await + .map(|resp| resp.0) + } + + /// Gets the raw transaction for the provided transaction ID. Returns `None` if not found. + pub(crate) async fn get_raw_transaction( + &self, txid: &Txid, + ) -> std::io::Result> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_raw_transaction_rpc(Arc::clone(rpc_client), txid).await + }, + BitcoindClient::Rest { rest_client, .. } => { + Self::get_raw_transaction_rest(Arc::clone(rest_client), txid).await + }, + } + } + + /// Retrieve raw transaction for provided transaction ID via the RPC interface. + async fn get_raw_transaction_rpc( + rpc_client: Arc, txid: &Txid, + ) -> std::io::Result> { + let txid_hex = txid.to_string(); + let txid_json = serde_json::json!(txid_hex); + match rpc_client + .call_method::("getrawtransaction", &[txid_json]) + .await + { + Ok(resp) => Ok(Some(resp.0)), + Err(e) => match e.into_inner() { + Some(inner) => { + let rpc_error_res: Result, _> = inner.downcast(); + + match rpc_error_res { + Ok(rpc_error) => { + // Check if it's the 'not found' error code. + if rpc_error.code == -5 { + Ok(None) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error)) + } + }, + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getrawtransaction response", + )), + } + }, + None => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getrawtransaction response", + )), + }, + } + } + + /// Retrieve raw transaction for provided transaction ID via the REST interface. + async fn get_raw_transaction_rest( + rest_client: Arc, txid: &Txid, + ) -> std::io::Result> { + let txid_hex = txid.to_string(); + let tx_path = format!("tx/{}.json", txid_hex); + match rest_client + .request_resource::(&tx_path) + .await + { + Ok(resp) => Ok(Some(resp.0)), + Err(e) => match e.kind() { + std::io::ErrorKind::Other => { + match e.into_inner() { + Some(inner) => { + let http_error_res: Result, _> = inner.downcast(); + match http_error_res { + Ok(http_error) => { + // Check if it's the HTTP NOT_FOUND error code. + if &http_error.status_code == "404" { + Ok(None) + } else { + Err(std::io::Error::new( + std::io::ErrorKind::Other, + http_error, + )) + } + }, + Err(_) => { + let error_msg = + format!("Failed to process {} response.", tx_path); + Err(std::io::Error::new( + std::io::ErrorKind::Other, + error_msg.as_str(), + )) + }, + } + }, + None => { + let error_msg = format!("Failed to process {} response.", tx_path); + Err(std::io::Error::new(std::io::ErrorKind::Other, error_msg.as_str())) + }, + } + }, + _ => { + let error_msg = format!("Failed to process {} response.", tx_path); + Err(std::io::Error::new(std::io::ErrorKind::Other, error_msg.as_str())) + }, + }, + } + } + + /// Retrieves the raw mempool. + pub(crate) async fn get_raw_mempool(&self) -> std::io::Result> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_raw_mempool_rpc(Arc::clone(rpc_client)).await + }, + BitcoindClient::Rest { rest_client, .. } => { + Self::get_raw_mempool_rest(Arc::clone(rest_client)).await + }, + } + } + + /// Retrieves the raw mempool via the RPC interface. + async fn get_raw_mempool_rpc(rpc_client: Arc) -> std::io::Result> { + let verbose_flag_json = serde_json::json!(false); + rpc_client + .call_method::("getrawmempool", &[verbose_flag_json]) + .await + .map(|resp| resp.0) + } + + /// Retrieves the raw mempool via the REST interface. + async fn get_raw_mempool_rest(rest_client: Arc) -> std::io::Result> { + rest_client + .request_resource::( + "mempool/contents.json?verbose=false", + ) + .await + .map(|resp| resp.0) + } + + /// Retrieves an entry from the mempool if it exists, else return `None`. + pub(crate) async fn get_mempool_entry( + &self, txid: Txid, + ) -> std::io::Result> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Self::get_mempool_entry_inner(Arc::clone(rpc_client), txid).await + }, + BitcoindClient::Rest { rpc_client, .. } => { + Self::get_mempool_entry_inner(Arc::clone(rpc_client), txid).await + }, + } + } + + /// Retrieves the mempool entry of the provided transaction ID. + async fn get_mempool_entry_inner( + client: Arc, txid: Txid, + ) -> std::io::Result> { + let txid_hex = txid.to_string(); + let txid_json = serde_json::json!(txid_hex); + + match client.call_method::("getmempoolentry", &[txid_json]).await { + Ok(resp) => Ok(Some(MempoolEntry { txid, time: resp.time, height: resp.height })), + Err(e) => match e.into_inner() { + Some(inner) => { + let rpc_error_res: Result, _> = inner.downcast(); + + match rpc_error_res { + Ok(rpc_error) => { + // Check if it's the 'not found' error code. + if rpc_error.code == -5 { + Ok(None) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error)) + } + }, + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getmempoolentry response", + )), + } + }, + None => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getmempoolentry response", + )), + }, + } + } + + pub(crate) async fn update_mempool_entries_cache(&self) -> std::io::Result<()> { + match self { + BitcoindClient::Rpc { mempool_entries_cache, .. } => { + self.update_mempool_entries_cache_inner(mempool_entries_cache).await + }, + BitcoindClient::Rest { mempool_entries_cache, .. } => { + self.update_mempool_entries_cache_inner(mempool_entries_cache).await + }, + } + } + + async fn update_mempool_entries_cache_inner( + &self, mempool_entries_cache: &tokio::sync::Mutex>, + ) -> std::io::Result<()> { + let mempool_txids = self.get_raw_mempool().await?; + + let mut mempool_entries_cache = mempool_entries_cache.lock().await; + mempool_entries_cache.retain(|txid, _| mempool_txids.contains(txid)); + + if let Some(difference) = mempool_txids.len().checked_sub(mempool_entries_cache.capacity()) + { + mempool_entries_cache.reserve(difference) + } + + for txid in mempool_txids { + if mempool_entries_cache.contains_key(&txid) { + continue; + } + + if let Some(entry) = self.get_mempool_entry(txid).await? { + mempool_entries_cache.insert(txid, entry.clone()); + } + } + + mempool_entries_cache.shrink_to_fit(); + + Ok(()) + } + + /// Returns two `Vec`s: + /// - mempool transactions, alongside their first-seen unix timestamps. + /// - transactions that have been evicted from the mempool, alongside the last time they were seen absent. + pub(crate) async fn get_updated_mempool_transactions( + &self, best_processed_height: u32, bdk_unconfirmed_txids: Vec, + ) -> std::io::Result<(Vec<(Transaction, u64)>, Vec<(Txid, u64)>)> { + let mempool_txs = + self.get_mempool_transactions_and_timestamp_at_height(best_processed_height).await?; + let evicted_txids = + self.get_evicted_mempool_txids_and_timestamp(bdk_unconfirmed_txids).await?; + Ok((mempool_txs, evicted_txids)) + } + + /// Get mempool transactions, alongside their first-seen unix timestamps. + /// + /// This method is an adapted version of `bdk_bitcoind_rpc::Emitter::mempool`. It emits each + /// transaction only once, unless we cannot assume the transaction's ancestors are already + /// emitted. + pub(crate) async fn get_mempool_transactions_and_timestamp_at_height( + &self, best_processed_height: u32, + ) -> std::io::Result> { + match self { + BitcoindClient::Rpc { + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + .. + } => { + self.get_mempool_transactions_and_timestamp_at_height_inner( + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + best_processed_height, + ) + .await + }, + BitcoindClient::Rest { + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + .. + } => { + self.get_mempool_transactions_and_timestamp_at_height_inner( + latest_mempool_timestamp, + mempool_entries_cache, + mempool_txs_cache, + best_processed_height, + ) + .await + }, + } + } + + async fn get_mempool_transactions_and_timestamp_at_height_inner( + &self, latest_mempool_timestamp: &AtomicU64, + mempool_entries_cache: &tokio::sync::Mutex>, + mempool_txs_cache: &tokio::sync::Mutex>, + best_processed_height: u32, + ) -> std::io::Result> { + let prev_mempool_time = latest_mempool_timestamp.load(Ordering::Relaxed); + let mut latest_time = prev_mempool_time; + + self.update_mempool_entries_cache().await?; + + let mempool_entries_cache = mempool_entries_cache.lock().await; + let mut mempool_txs_cache = mempool_txs_cache.lock().await; + mempool_txs_cache.retain(|txid, _| mempool_entries_cache.contains_key(txid)); + + if let Some(difference) = + mempool_entries_cache.len().checked_sub(mempool_txs_cache.capacity()) + { + mempool_txs_cache.reserve(difference) + } + + let mut txs_to_emit = Vec::with_capacity(mempool_entries_cache.len()); + for (txid, entry) in mempool_entries_cache.iter() { + if entry.time > latest_time { + latest_time = entry.time; + } + + // Avoid emitting transactions that are already emitted if we can guarantee + // blocks containing ancestors are already emitted. The bitcoind rpc interface + // provides us with the block height that the tx is introduced to the mempool. + // If we have already emitted the block of height, we can assume that all + // ancestor txs have been processed by the receiver. + let ancestor_within_height = entry.height <= best_processed_height; + let is_already_emitted = entry.time <= prev_mempool_time; + if is_already_emitted && ancestor_within_height { + continue; + } + + if let Some((cached_tx, cached_time)) = mempool_txs_cache.get(txid) { + txs_to_emit.push((cached_tx.clone(), *cached_time)); + continue; + } + + match self.get_raw_transaction(&entry.txid).await { + Ok(Some(tx)) => { + mempool_txs_cache.insert(entry.txid, (tx.clone(), entry.time)); + txs_to_emit.push((tx, entry.time)); + }, + Ok(None) => { + continue; + }, + Err(e) => return Err(e), + }; + } + + if !txs_to_emit.is_empty() { + latest_mempool_timestamp.store(latest_time, Ordering::Release); + } + Ok(txs_to_emit) + } + + // Retrieve a list of Txids that have been evicted from the mempool. + // + // To this end, we first update our local mempool_entries_cache and then return all unconfirmed + // wallet `Txid`s that don't appear in the mempool still. + async fn get_evicted_mempool_txids_and_timestamp( + &self, bdk_unconfirmed_txids: Vec, + ) -> std::io::Result> { + match self { + BitcoindClient::Rpc { latest_mempool_timestamp, mempool_entries_cache, .. } => { + Self::get_evicted_mempool_txids_and_timestamp_inner( + latest_mempool_timestamp, + mempool_entries_cache, + bdk_unconfirmed_txids, + ) + .await + }, + BitcoindClient::Rest { latest_mempool_timestamp, mempool_entries_cache, .. } => { + Self::get_evicted_mempool_txids_and_timestamp_inner( + latest_mempool_timestamp, + mempool_entries_cache, + bdk_unconfirmed_txids, + ) + .await + }, + } + } + + async fn get_evicted_mempool_txids_and_timestamp_inner( + latest_mempool_timestamp: &AtomicU64, + mempool_entries_cache: &tokio::sync::Mutex>, + bdk_unconfirmed_txids: Vec, + ) -> std::io::Result> { + let latest_mempool_timestamp = latest_mempool_timestamp.load(Ordering::Relaxed); + let mempool_entries_cache = mempool_entries_cache.lock().await; + let evicted_txids = bdk_unconfirmed_txids + .into_iter() + .filter(|txid| !mempool_entries_cache.contains_key(txid)) + .map(|txid| (txid, latest_mempool_timestamp)) + .collect(); + Ok(evicted_txids) + } +} + +impl BlockSource for BitcoindClient { + fn get_header<'a>( + &'a self, header_hash: &'a bitcoin::BlockHash, height_hint: Option, + ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Box::pin(async move { rpc_client.get_header(header_hash, height_hint).await }) + }, + BitcoindClient::Rest { rest_client, .. } => { + Box::pin(async move { rest_client.get_header(header_hash, height_hint).await }) + }, + } + } + + fn get_block<'a>( + &'a self, header_hash: &'a bitcoin::BlockHash, + ) -> AsyncBlockSourceResult<'a, BlockData> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Box::pin(async move { rpc_client.get_block(header_hash).await }) + }, + BitcoindClient::Rest { rest_client, .. } => { + Box::pin(async move { rest_client.get_block(header_hash).await }) + }, + } + } + + fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (bitcoin::BlockHash, Option)> { + match self { + BitcoindClient::Rpc { rpc_client, .. } => { + Box::pin(async move { rpc_client.get_best_block().await }) + }, + BitcoindClient::Rest { rest_client, .. } => { + Box::pin(async move { rest_client.get_best_block().await }) + }, + } + } +} + +pub(crate) struct FeeResponse(pub FeeRate); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + if !self.0["errors"].is_null() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + self.0["errors"].to_string(), + )); + } + let fee_rate_btc_per_kvbyte = self.0["feerate"] + .as_f64() + .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; + // Bitcoin Core gives us a feerate in BTC/KvB. + // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. + let fee_rate = { + let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + Ok(FeeResponse(fee_rate)) + } +} + +pub(crate) struct MempoolMinFeeResponse(pub FeeRate); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let fee_rate_btc_per_kvbyte = self.0["mempoolminfee"] + .as_f64() + .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; + // Bitcoin Core gives us a feerate in BTC/KvB. + // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. + let fee_rate = { + let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + Ok(MempoolMinFeeResponse(fee_rate)) + } +} + +pub(crate) struct GetRawTransactionResponse(pub Transaction); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let tx = self + .0 + .as_str() + .ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawtransaction response", + )) + .and_then(|s| { + bitcoin::consensus::encode::deserialize_hex(s).map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawtransaction response", + ) + }) + })?; + + Ok(GetRawTransactionResponse(tx)) + } +} + +pub struct GetRawMempoolResponse(Vec); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let res = self.0.as_array().ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + ))?; + + let mut mempool_transactions = Vec::with_capacity(res.len()); + + for hex in res { + let txid = if let Some(hex_str) = hex.as_str() { + match hex_str.parse::() { + Ok(txid) => txid, + Err(_) => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + } + } else { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }; + + mempool_transactions.push(txid); + } + + Ok(GetRawMempoolResponse(mempool_transactions)) + } +} + +pub struct GetMempoolEntryResponse { + time: u64, + height: u32, +} + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let res = self.0.as_object().ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getmempoolentry response", + ))?; + + let time = match res["time"].as_u64() { + Some(time) => time, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getmempoolentry response", + )); + }, + }; + + let height = match res["height"].as_u64().and_then(|h| h.try_into().ok()) { + Some(height) => height, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getmempoolentry response", + )); + }, + }; + + Ok(GetMempoolEntryResponse { time, height }) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct MempoolEntry { + /// The transaction id + txid: Txid, + /// Local time transaction entered pool in seconds since 1 Jan 1970 GMT + time: u64, + /// Block height when transaction entered pool + height: u32, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "UPPERCASE")] +pub(crate) enum FeeRateEstimationMode { + Economical, + Conservative, +} + +const MAX_HEADER_CACHE_ENTRIES: usize = 100; + +pub(crate) struct BoundedHeaderCache { + header_map: HashMap, + recently_seen: VecDeque, +} + +impl BoundedHeaderCache { + pub(crate) fn new() -> Self { + let header_map = HashMap::new(); + let recently_seen = VecDeque::new(); + Self { header_map, recently_seen } + } +} + +impl Cache for BoundedHeaderCache { + fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { + self.header_map.get(block_hash) + } + + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.recently_seen.push_back(block_hash); + self.header_map.insert(block_hash, block_header); + + if self.header_map.len() >= MAX_HEADER_CACHE_ENTRIES { + // Keep dropping old entries until we've actually removed a header entry. + while let Some(oldest_entry) = self.recently_seen.pop_front() { + if self.header_map.remove(&oldest_entry).is_some() { + break; + } + } + } + } + + fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { + self.recently_seen.retain(|e| e != block_hash); + self.header_map.remove(block_hash) + } +} + +pub(crate) struct ChainListener { + pub(crate) onchain_wallet: Arc, + pub(crate) channel_manager: Arc, + pub(crate) chain_monitor: Arc, + pub(crate) output_sweeper: Arc, +} + +impl Listen for ChainListener { + fn filtered_block_connected( + &self, header: &bitcoin::block::Header, + txdata: &lightning::chain::transaction::TransactionData, height: u32, + ) { + self.onchain_wallet.filtered_block_connected(header, txdata, height); + self.channel_manager.filtered_block_connected(header, txdata, height); + self.chain_monitor.filtered_block_connected(header, txdata, height); + self.output_sweeper.filtered_block_connected(header, txdata, height); + } + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + self.onchain_wallet.block_connected(block, height); + self.channel_manager.block_connected(block, height); + self.chain_monitor.block_connected(block, height); + self.output_sweeper.block_connected(block, height); + } + + fn blocks_disconnected(&self, fork_point_block: lightning::chain::BestBlock) { + self.onchain_wallet.blocks_disconnected(fork_point_block); + self.channel_manager.blocks_disconnected(fork_point_block); + self.chain_monitor.blocks_disconnected(fork_point_block); + self.output_sweeper.blocks_disconnected(fork_point_block); + } +} + +pub(crate) fn rpc_credentials(rpc_user: String, rpc_password: String) -> String { + BASE64_STANDARD.encode(format!("{}:{}", rpc_user, rpc_password)) +} + +pub(crate) fn endpoint(host: String, port: u16) -> HttpEndpoint { + HttpEndpoint::for_host(host).with_port(port) +} + +#[derive(Debug)] +pub struct HttpError { + pub(crate) status_code: String, + pub(crate) contents: Vec, +} + +impl std::error::Error for HttpError {} + +impl std::fmt::Display for HttpError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let contents = String::from_utf8_lossy(&self.contents); + write!(f, "status_code: {}, contents: {}", self.status_code, contents) + } +} + +#[cfg(test)] +mod tests { + use bitcoin::hashes::Hash; + use bitcoin::{FeeRate, OutPoint, ScriptBuf, Transaction, TxIn, TxOut, Txid, Witness}; + use lightning_block_sync::http::JsonResponse; + use proptest::arbitrary::any; + use proptest::collection::vec; + use proptest::{prop_assert_eq, prop_compose, proptest}; + use serde_json::json; + + use crate::chain::bitcoind::{ + FeeResponse, GetMempoolEntryResponse, GetRawMempoolResponse, GetRawTransactionResponse, + MempoolMinFeeResponse, + }; + + prop_compose! { + fn arbitrary_witness()( + witness_elements in vec(vec(any::(), 0..100), 0..20) + ) -> Witness { + let mut witness = Witness::new(); + for element in witness_elements { + witness.push(element); + } + witness + } + } + + prop_compose! { + fn arbitrary_txin()( + outpoint_hash in any::<[u8; 32]>(), + outpoint_vout in any::(), + script_bytes in vec(any::(), 0..100), + witness in arbitrary_witness(), + sequence in any::() + ) -> TxIn { + TxIn { + previous_output: OutPoint { + txid: Txid::from_byte_array(outpoint_hash), + vout: outpoint_vout, + }, + script_sig: ScriptBuf::from_bytes(script_bytes), + sequence: bitcoin::Sequence::from_consensus(sequence), + witness, + } + } + } + + prop_compose! { + fn arbitrary_txout()( + value in 0u64..21_000_000_00_000_000u64, + script_bytes in vec(any::(), 0..100) + ) -> TxOut { + TxOut { + value: bitcoin::Amount::from_sat(value), + script_pubkey: ScriptBuf::from_bytes(script_bytes), + } + } + } + + prop_compose! { + fn arbitrary_transaction()( + version in any::(), + inputs in vec(arbitrary_txin(), 1..20), + outputs in vec(arbitrary_txout(), 1..20), + lock_time in any::() + ) -> Transaction { + Transaction { + version: bitcoin::transaction::Version(version), + input: inputs, + output: outputs, + lock_time: bitcoin::absolute::LockTime::from_consensus(lock_time), + } + } + } + + proptest! { + #![proptest_config(proptest::test_runner::Config::with_cases(20))] + + #[test] + fn prop_get_raw_mempool_response_roundtrip(txids in vec(any::<[u8;32]>(), 0..10)) { + let txid_vec: Vec = txids.into_iter().map(Txid::from_byte_array).collect(); + let original = GetRawMempoolResponse(txid_vec.clone()); + + let json_vec: Vec = txid_vec.iter().map(|t| t.to_string()).collect(); + let json_val = serde_json::Value::Array(json_vec.iter().map(|s| json!(s)).collect()); + + let resp = JsonResponse(json_val); + let decoded: GetRawMempoolResponse = resp.try_into().unwrap(); + + prop_assert_eq!(original.0.len(), decoded.0.len()); + + prop_assert_eq!(original.0, decoded.0); + } + + #[test] + fn prop_get_mempool_entry_response_roundtrip( + time in any::(), + height in any::() + ) { + let json_val = json!({ + "time": time, + "height": height + }); + + let resp = JsonResponse(json_val); + let decoded: GetMempoolEntryResponse = resp.try_into().unwrap(); + + prop_assert_eq!(decoded.time, time); + prop_assert_eq!(decoded.height, height); + } + + #[test] + fn prop_get_raw_transaction_response_roundtrip(tx in arbitrary_transaction()) { + let hex = bitcoin::consensus::encode::serialize_hex(&tx); + let json_val = serde_json::Value::String(hex.clone()); + + let resp = JsonResponse(json_val); + let decoded: GetRawTransactionResponse = resp.try_into().unwrap(); + + prop_assert_eq!(decoded.0.compute_txid(), tx.compute_txid()); + prop_assert_eq!(decoded.0.compute_wtxid(), tx.compute_wtxid()); + + prop_assert_eq!(decoded.0, tx); + } + + #[test] + fn prop_fee_response_roundtrip(fee_rate in any::()) { + let fee_rate = fee_rate.abs(); + let json_val = json!({ + "feerate": fee_rate, + "errors": serde_json::Value::Null + }); + + let resp = JsonResponse(json_val); + let decoded: FeeResponse = resp.try_into().unwrap(); + + let expected = { + let fee_rate_sat_per_kwu = (fee_rate * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + prop_assert_eq!(decoded.0, expected); + } + + #[test] + fn prop_mempool_min_fee_response_roundtrip(fee_rate in any::()) { + let fee_rate = fee_rate.abs(); + let json_val = json!({ + "mempoolminfee": fee_rate + }); + + let resp = JsonResponse(json_val); + let decoded: MempoolMinFeeResponse = resp.try_into().unwrap(); + + let expected = { + let fee_rate_sat_per_kwu = (fee_rate * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + prop_assert_eq!(decoded.0, expected); + } + + } +} diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs deleted file mode 100644 index a579a3db3..000000000 --- a/src/chain/bitcoind_rpc.rs +++ /dev/null @@ -1,532 +0,0 @@ -// This file is Copyright its original authors, visible in version control history. -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in -// accordance with one or both of these licenses. - -use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; - -use lightning::chain::Listen; - -use lightning_block_sync::http::HttpEndpoint; -use lightning_block_sync::http::JsonResponse; -use lightning_block_sync::poll::ValidatedBlockHeader; -use lightning_block_sync::rpc::{RpcClient, RpcError}; -use lightning_block_sync::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, -}; - -use serde::Serialize; - -use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; - -use base64::prelude::{Engine, BASE64_STANDARD}; - -use std::collections::{HashMap, VecDeque}; -use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; - -pub struct BitcoindRpcClient { - rpc_client: Arc, - latest_mempool_timestamp: AtomicU64, - mempool_entries_cache: tokio::sync::Mutex>, - mempool_txs_cache: tokio::sync::Mutex>, -} - -impl BitcoindRpcClient { - pub(crate) fn new(host: String, port: u16, rpc_user: String, rpc_password: String) -> Self { - let http_endpoint = HttpEndpoint::for_host(host.clone()).with_port(port); - let rpc_credentials = - BASE64_STANDARD.encode(format!("{}:{}", rpc_user.clone(), rpc_password.clone())); - - let rpc_client = Arc::new(RpcClient::new(&rpc_credentials, http_endpoint)); - - let latest_mempool_timestamp = AtomicU64::new(0); - - let mempool_entries_cache = tokio::sync::Mutex::new(HashMap::new()); - let mempool_txs_cache = tokio::sync::Mutex::new(HashMap::new()); - Self { rpc_client, latest_mempool_timestamp, mempool_entries_cache, mempool_txs_cache } - } - - pub(crate) fn rpc_client(&self) -> Arc { - Arc::clone(&self.rpc_client) - } - - pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { - let tx_serialized = bitcoin::consensus::encode::serialize_hex(tx); - let tx_json = serde_json::json!(tx_serialized); - self.rpc_client.call_method::("sendrawtransaction", &[tx_json]).await - } - - pub(crate) async fn get_fee_estimate_for_target( - &self, num_blocks: usize, estimation_mode: FeeRateEstimationMode, - ) -> std::io::Result { - let num_blocks_json = serde_json::json!(num_blocks); - let estimation_mode_json = serde_json::json!(estimation_mode); - self.rpc_client - .call_method::( - "estimatesmartfee", - &[num_blocks_json, estimation_mode_json], - ) - .await - .map(|resp| resp.0) - } - - pub(crate) async fn get_mempool_minimum_fee_rate(&self) -> std::io::Result { - self.rpc_client - .call_method::("getmempoolinfo", &[]) - .await - .map(|resp| resp.0) - } - - pub(crate) async fn get_raw_transaction( - &self, txid: &Txid, - ) -> std::io::Result> { - let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); - let txid_json = serde_json::json!(txid_hex); - match self - .rpc_client - .call_method::("getrawtransaction", &[txid_json]) - .await - { - Ok(resp) => Ok(Some(resp.0)), - Err(e) => match e.into_inner() { - Some(inner) => { - let rpc_error_res: Result, _> = inner.downcast(); - - match rpc_error_res { - Ok(rpc_error) => { - // Check if it's the 'not found' error code. - if rpc_error.code == -5 { - Ok(None) - } else { - Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error)) - } - }, - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to process getrawtransaction response", - )), - } - }, - None => Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to process getrawtransaction response", - )), - }, - } - } - - pub(crate) async fn get_raw_mempool(&self) -> std::io::Result> { - let verbose_flag_json = serde_json::json!(false); - self.rpc_client - .call_method::("getrawmempool", &[verbose_flag_json]) - .await - .map(|resp| resp.0) - } - - pub(crate) async fn get_mempool_entry( - &self, txid: Txid, - ) -> std::io::Result> { - let txid_hex = bitcoin::consensus::encode::serialize_hex(&txid); - let txid_json = serde_json::json!(txid_hex); - match self - .rpc_client - .call_method::("getmempoolentry", &[txid_json]) - .await - { - Ok(resp) => Ok(Some(MempoolEntry { txid, height: resp.height, time: resp.time })), - Err(e) => match e.into_inner() { - Some(inner) => { - let rpc_error_res: Result, _> = inner.downcast(); - - match rpc_error_res { - Ok(rpc_error) => { - // Check if it's the 'not found' error code. - if rpc_error.code == -5 { - Ok(None) - } else { - Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error)) - } - }, - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to process getmempoolentry response", - )), - } - }, - None => Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to process getmempoolentry response", - )), - }, - } - } - - pub(crate) async fn update_mempool_entries_cache(&self) -> std::io::Result<()> { - let mempool_txids = self.get_raw_mempool().await?; - - let mut mempool_entries_cache = self.mempool_entries_cache.lock().await; - mempool_entries_cache.retain(|txid, _| mempool_txids.contains(txid)); - - if let Some(difference) = mempool_txids.len().checked_sub(mempool_entries_cache.capacity()) - { - mempool_entries_cache.reserve(difference) - } - - for txid in mempool_txids { - if mempool_entries_cache.contains_key(&txid) { - continue; - } - - if let Some(entry) = self.get_mempool_entry(txid).await? { - mempool_entries_cache.insert(txid, entry.clone()); - } - } - - mempool_entries_cache.shrink_to_fit(); - - Ok(()) - } - - /// Returns two `Vec`s: - /// - mempool transactions, alongside their first-seen unix timestamps. - /// - transactions that have been evicted from the mempool, alongside the last time they were seen absent. - pub(crate) async fn get_updated_mempool_transactions( - &self, best_processed_height: u32, unconfirmed_txids: Vec, - ) -> std::io::Result<(Vec<(Transaction, u64)>, Vec<(Txid, u64)>)> { - let mempool_txs = - self.get_mempool_transactions_and_timestamp_at_height(best_processed_height).await?; - let evicted_txids = self.get_evicted_mempool_txids_and_timestamp(unconfirmed_txids).await?; - Ok((mempool_txs, evicted_txids)) - } - - /// Get mempool transactions, alongside their first-seen unix timestamps. - /// - /// This method is an adapted version of `bdk_bitcoind_rpc::Emitter::mempool`. It emits each - /// transaction only once, unless we cannot assume the transaction's ancestors are already - /// emitted. - async fn get_mempool_transactions_and_timestamp_at_height( - &self, best_processed_height: u32, - ) -> std::io::Result> { - let prev_mempool_time = self.latest_mempool_timestamp.load(Ordering::Relaxed); - let mut latest_time = prev_mempool_time; - - self.update_mempool_entries_cache().await?; - - let mempool_entries_cache = self.mempool_entries_cache.lock().await; - let mut mempool_txs_cache = self.mempool_txs_cache.lock().await; - mempool_txs_cache.retain(|txid, _| mempool_entries_cache.contains_key(txid)); - - if let Some(difference) = - mempool_entries_cache.len().checked_sub(mempool_txs_cache.capacity()) - { - mempool_txs_cache.reserve(difference) - } - - let mut txs_to_emit = Vec::with_capacity(mempool_entries_cache.len()); - for (txid, entry) in mempool_entries_cache.iter() { - if entry.time > latest_time { - latest_time = entry.time; - } - - // Avoid emitting transactions that are already emitted if we can guarantee - // blocks containing ancestors are already emitted. The bitcoind rpc interface - // provides us with the block height that the tx is introduced to the mempool. - // If we have already emitted the block of height, we can assume that all - // ancestor txs have been processed by the receiver. - let ancestor_within_height = entry.height <= best_processed_height; - let is_already_emitted = entry.time <= prev_mempool_time; - if is_already_emitted && ancestor_within_height { - continue; - } - - if let Some((cached_tx, cached_time)) = mempool_txs_cache.get(txid) { - txs_to_emit.push((cached_tx.clone(), *cached_time)); - continue; - } - - match self.get_raw_transaction(&entry.txid).await { - Ok(Some(tx)) => { - mempool_txs_cache.insert(entry.txid, (tx.clone(), entry.time)); - txs_to_emit.push((tx, entry.time)); - }, - Ok(None) => { - continue; - }, - Err(e) => return Err(e), - }; - } - - if !txs_to_emit.is_empty() { - self.latest_mempool_timestamp.store(latest_time, Ordering::Release); - } - Ok(txs_to_emit) - } - - // Retrieve a list of Txids that have been evicted from the mempool. - // - // To this end, we first update our local mempool_entries_cache and then return all unconfirmed - // wallet `Txid`s that don't appear in the mempool still. - async fn get_evicted_mempool_txids_and_timestamp( - &self, unconfirmed_txids: Vec, - ) -> std::io::Result> { - let latest_mempool_timestamp = self.latest_mempool_timestamp.load(Ordering::Relaxed); - let mempool_entries_cache = self.mempool_entries_cache.lock().await; - let evicted_txids = unconfirmed_txids - .into_iter() - .filter(|txid| mempool_entries_cache.contains_key(txid)) - .map(|txid| (txid, latest_mempool_timestamp)) - .collect(); - Ok(evicted_txids) - } -} - -impl BlockSource for BitcoindRpcClient { - fn get_header<'a>( - &'a self, header_hash: &'a BlockHash, height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { self.rpc_client.get_header(header_hash, height_hint).await }) - } - - fn get_block<'a>( - &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { self.rpc_client.get_block(header_hash).await }) - } - - fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (BlockHash, Option)> { - Box::pin(async move { self.rpc_client.get_best_block().await }) - } -} - -pub(crate) struct FeeResponse(pub FeeRate); - -impl TryInto for JsonResponse { - type Error = std::io::Error; - fn try_into(self) -> std::io::Result { - if !self.0["errors"].is_null() { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - self.0["errors"].to_string(), - )); - } - let fee_rate_btc_per_kvbyte = self.0["feerate"] - .as_f64() - .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; - // Bitcoin Core gives us a feerate in BTC/KvB. - // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. - let fee_rate = { - let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; - FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) - }; - Ok(FeeResponse(fee_rate)) - } -} - -pub struct MempoolMinFeeResponse(pub FeeRate); - -impl TryInto for JsonResponse { - type Error = std::io::Error; - fn try_into(self) -> std::io::Result { - let fee_rate_btc_per_kvbyte = self.0["mempoolminfee"] - .as_f64() - .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; - // Bitcoin Core gives us a feerate in BTC/KvB. - // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. - let fee_rate = { - let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; - FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) - }; - Ok(MempoolMinFeeResponse(fee_rate)) - } -} - -pub struct GetRawTransactionResponse(pub Transaction); - -impl TryInto for JsonResponse { - type Error = std::io::Error; - fn try_into(self) -> std::io::Result { - let tx = self - .0 - .as_str() - .ok_or(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getrawtransaction response", - )) - .and_then(|s| { - bitcoin::consensus::encode::deserialize_hex(s).map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getrawtransaction response", - ) - }) - })?; - - Ok(GetRawTransactionResponse(tx)) - } -} - -pub struct GetRawMempoolResponse(Vec); - -impl TryInto for JsonResponse { - type Error = std::io::Error; - fn try_into(self) -> std::io::Result { - let res = self.0.as_array().ok_or(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getrawmempool response", - ))?; - - let mut mempool_transactions = Vec::with_capacity(res.len()); - - for hex in res { - let txid = if let Some(hex_str) = hex.as_str() { - match bitcoin::consensus::encode::deserialize_hex(hex_str) { - Ok(txid) => txid, - Err(_) => { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getrawmempool response", - )); - }, - } - } else { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getrawmempool response", - )); - }; - - mempool_transactions.push(txid); - } - - Ok(GetRawMempoolResponse(mempool_transactions)) - } -} - -pub struct GetMempoolEntryResponse { - time: u64, - height: u32, -} - -impl TryInto for JsonResponse { - type Error = std::io::Error; - fn try_into(self) -> std::io::Result { - let res = self.0.as_object().ok_or(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getmempoolentry response", - ))?; - - let time = match res["time"].as_u64() { - Some(time) => time, - None => { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getmempoolentry response", - )); - }, - }; - - let height = match res["height"].as_u64().and_then(|h| h.try_into().ok()) { - Some(height) => height, - None => { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to parse getmempoolentry response", - )); - }, - }; - - Ok(GetMempoolEntryResponse { time, height }) - } -} - -#[derive(Debug, Clone)] -pub(crate) struct MempoolEntry { - /// The transaction id - txid: Txid, - /// Local time transaction entered pool in seconds since 1 Jan 1970 GMT - time: u64, - /// Block height when transaction entered pool - height: u32, -} - -#[derive(Debug, Clone, Serialize)] -#[serde(rename_all = "UPPERCASE")] -pub(crate) enum FeeRateEstimationMode { - Economical, - Conservative, -} - -const MAX_HEADER_CACHE_ENTRIES: usize = 100; - -pub(crate) struct BoundedHeaderCache { - header_map: HashMap, - recently_seen: VecDeque, -} - -impl BoundedHeaderCache { - pub(crate) fn new() -> Self { - let header_map = HashMap::new(); - let recently_seen = VecDeque::new(); - Self { header_map, recently_seen } - } -} - -impl Cache for BoundedHeaderCache { - fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.header_map.get(block_hash) - } - - fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { - self.recently_seen.push_back(block_hash); - self.header_map.insert(block_hash, block_header); - - if self.header_map.len() >= MAX_HEADER_CACHE_ENTRIES { - // Keep dropping old entries until we've actually removed a header entry. - while let Some(oldest_entry) = self.recently_seen.pop_front() { - if self.header_map.remove(&oldest_entry).is_some() { - break; - } - } - } - } - - fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { - self.recently_seen.retain(|e| e != block_hash); - self.header_map.remove(block_hash) - } -} - -pub(crate) struct ChainListener { - pub(crate) onchain_wallet: Arc, - pub(crate) channel_manager: Arc, - pub(crate) chain_monitor: Arc, - pub(crate) output_sweeper: Arc, -} - -impl Listen for ChainListener { - fn filtered_block_connected( - &self, header: &bitcoin::block::Header, - txdata: &lightning::chain::transaction::TransactionData, height: u32, - ) { - self.onchain_wallet.filtered_block_connected(header, txdata, height); - self.channel_manager.filtered_block_connected(header, txdata, height); - self.chain_monitor.filtered_block_connected(header, txdata, height); - self.output_sweeper.filtered_block_connected(header, txdata, height); - } - fn block_connected(&self, block: &bitcoin::Block, height: u32) { - self.onchain_wallet.block_connected(block, height); - self.channel_manager.block_connected(block, height); - self.chain_monitor.block_connected(block, height); - self.output_sweeper.block_connected(block, height); - } - - fn block_disconnected(&self, header: &bitcoin::block::Header, height: u32) { - self.onchain_wallet.block_disconnected(header, height); - self.channel_manager.block_disconnected(header, height); - self.chain_monitor.block_disconnected(header, height); - self.output_sweeper.block_disconnected(header, height); - } -} diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 9882e652b..9e05dfaee 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -5,56 +5,407 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::collections::HashMap; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use bdk_chain::bdk_core::spk_client::{ + FullScanRequest as BdkFullScanRequest, FullScanResponse as BdkFullScanResponse, + SyncRequest as BdkSyncRequest, SyncResponse as BdkSyncResponse, +}; +use bdk_electrum::BdkElectrumClient; +use bdk_wallet::{KeychainKind as BdkKeyChainKind, Update as BdkUpdate}; +use bitcoin::{FeeRate, Network, Script, ScriptBuf, Transaction, Txid}; +use electrum_client::{ + Batch, Client as ElectrumClient, ConfigBuilder as ElectrumConfigBuilder, ElectrumApi, +}; +use lightning::chain::{Confirm, Filter, WatchedOutput}; +use lightning::util::ser::Writeable; +use lightning_transaction_sync::ElectrumSyncClient; + +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; use crate::config::{ - Config, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, + Config, ElectrumSyncConfig, BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, }; use crate::error::Error; use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - ConfirmationTarget, + ConfirmationTarget, OnchainFeeEstimator, }; +use crate::io::utils::write_node_metrics; use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::runtime::Runtime; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::NodeMetrics; -use lightning::chain::{Confirm, Filter, WatchedOutput}; -use lightning::util::ser::Writeable; -use lightning_transaction_sync::ElectrumSyncClient; +const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; +const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; +const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; -use bdk_chain::bdk_core::spk_client::FullScanRequest as BdkFullScanRequest; -use bdk_chain::bdk_core::spk_client::FullScanResponse as BdkFullScanResponse; -use bdk_chain::bdk_core::spk_client::SyncRequest as BdkSyncRequest; -use bdk_chain::bdk_core::spk_client::SyncResponse as BdkSyncResponse; -use bdk_wallet::KeychainKind as BdkKeyChainKind; +pub(super) struct ElectrumChainSource { + server_url: String, + pub(super) sync_config: ElectrumSyncConfig, + electrum_runtime_status: RwLock, + onchain_wallet_sync_status: Mutex, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} -use bdk_electrum::BdkElectrumClient; +impl ElectrumChainSource { + pub(super) fn new( + server_url: String, sync_config: ElectrumSyncConfig, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self { + server_url, + sync_config, + electrum_runtime_status, + onchain_wallet_sync_status, + lightning_wallet_sync_status, + fee_estimator, + kv_store, + config, + logger: Arc::clone(&logger), + node_metrics, + } + } -use electrum_client::Client as ElectrumClient; -use electrum_client::ConfigBuilder as ElectrumConfigBuilder; -use electrum_client::{Batch, ElectrumApi}; + pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { + self.electrum_runtime_status.write().unwrap().start( + self.server_url.clone(), + Arc::clone(&runtime), + Arc::clone(&self.config), + Arc::clone(&self.logger), + ) + } -use bitcoin::{FeeRate, Network, Script, Transaction, Txid}; + pub(super) fn stop(&self) { + self.electrum_runtime_status.write().unwrap().stop(); + } -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Duration, Instant}; + pub(crate) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } -const BDK_ELECTRUM_CLIENT_BATCH_SIZE: usize = 5; -const ELECTRUM_CLIENT_NUM_RETRIES: u8 = 3; -const ELECTRUM_CLIENT_TIMEOUT_SECS: u8 = 10; + let res = self.sync_onchain_wallet_inner(onchain_wallet).await; + + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the onchain wallet" + ); + return Err(Error::FeerateEstimationUpdateFailed); + }; + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + let apply_wallet_update = + |update_res: Result, now: Instant| match update_res { + Ok(update) => match onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + self.logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => Err(e), + }; + + let cached_txs = onchain_wallet.get_cached_txs(); + + let res = if incremental_sync { + let incremental_sync_request = onchain_wallet.get_incremental_sync_request(); + let incremental_sync_fut = electrum_client + .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); + + let now = Instant::now(); + let update_res = incremental_sync_fut.await.map(|u| u.into()); + apply_wallet_update(update_res, now) + } else { + let full_scan_request = onchain_wallet.get_full_scan_request(); + let full_scan_fut = + electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); + let now = Instant::now(); + let update_res = full_scan_fut.await.map(|u| u.into()); + apply_wallet_update(update_res, now) + }; + + res + } + + pub(crate) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::TxSyncFailed + })?; + } + + let res = + self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_lightning_wallet_inner( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + sync_cman as Arc, + sync_cmon as Arc, + sync_sweeper as Arc, + ]; + + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the lightning wallet" + ); + return Err(Error::TxSyncFailed); + }; + + let res = electrum_client.sync_confirmables(confirmables).await; + + if let Ok(_) = res { + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + } + + res + } -pub(crate) struct ElectrumRuntimeClient { + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().unwrap().client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before updating fees"); + return Err(Error::FeerateEstimationUpdateFailed); + }; + + let now = Instant::now(); + + let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; + self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + Ok(()) + } + + pub(crate) async fn process_broadcast_package(&self, package: Vec) { + let electrum_client: Arc = + if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before broadcasting"); + return; + }; + + for tx in package { + electrum_client.broadcast(tx).await; + } + } +} + +impl Filter for ElectrumChainSource { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + } + fn register_output(&self, output: lightning::chain::WatchedOutput) { + self.electrum_runtime_status.write().unwrap().register_output(output) + } +} + +enum ElectrumRuntimeStatus { + Started(Arc), + Stopped { + pending_registered_txs: Vec<(Txid, ScriptBuf)>, + pending_registered_outputs: Vec, + }, +} + +impl ElectrumRuntimeStatus { + fn new() -> Self { + let pending_registered_txs = Vec::new(); + let pending_registered_outputs = Vec::new(); + Self::Stopped { pending_registered_txs, pending_registered_outputs } + } + + pub(super) fn start( + &mut self, server_url: String, runtime: Arc, config: Arc, + logger: Arc, + ) -> Result<(), Error> { + match self { + Self::Stopped { pending_registered_txs, pending_registered_outputs } => { + let client = Arc::new(ElectrumRuntimeClient::new( + server_url.clone(), + runtime, + config, + logger, + )?); + + // Apply any pending `Filter` entries + for (txid, script_pubkey) in pending_registered_txs.drain(..) { + client.register_tx(&txid, &script_pubkey); + } + + for output in pending_registered_outputs.drain(..) { + client.register_output(output) + } + + *self = Self::Started(client); + }, + Self::Started(_) => { + debug_assert!(false, "We shouldn't call start if we're already started") + }, + } + Ok(()) + } + + pub(super) fn stop(&mut self) { + *self = Self::new() + } + + fn client(&self) -> Option> { + match self { + Self::Started(client) => Some(Arc::clone(&client)), + Self::Stopped { .. } => None, + } + } + + fn register_tx(&mut self, txid: &Txid, script_pubkey: &Script) { + match self { + Self::Started(client) => client.register_tx(txid, script_pubkey), + Self::Stopped { pending_registered_txs, .. } => { + pending_registered_txs.push((*txid, script_pubkey.to_owned())) + }, + } + } + + fn register_output(&mut self, output: lightning::chain::WatchedOutput) { + match self { + Self::Started(client) => client.register_output(output), + Self::Stopped { pending_registered_outputs, .. } => { + pending_registered_outputs.push(output) + }, + } + } +} + +struct ElectrumRuntimeClient { electrum_client: Arc, - bdk_electrum_client: Arc>, + bdk_electrum_client: Arc>>, tx_sync: Arc>>, - runtime: Arc, + runtime: Arc, config: Arc, logger: Arc, } impl ElectrumRuntimeClient { - pub(crate) fn new( - server_url: String, runtime: Arc, config: Arc, - logger: Arc, + fn new( + server_url: String, runtime: Arc, config: Arc, logger: Arc, ) -> Result { let electrum_config = ElectrumConfigBuilder::new() .retry(ELECTRUM_CLIENT_NUM_RETRIES) @@ -67,12 +418,7 @@ impl ElectrumRuntimeClient { Error::ConnectionFailed })?, ); - let electrum_client_2 = - ElectrumClient::from_config(&server_url, electrum_config).map_err(|e| { - log_error!(logger, "Failed to connect to electrum server: {}", e); - Error::ConnectionFailed - })?; - let bdk_electrum_client = Arc::new(BdkElectrumClient::new(electrum_client_2)); + let bdk_electrum_client = Arc::new(BdkElectrumClient::new(Arc::clone(&electrum_client))); let tx_sync = Arc::new( ElectrumSyncClient::new(server_url.clone(), Arc::clone(&logger)).map_err(|e| { log_error!(logger, "Failed to connect to electrum server: {}", e); @@ -82,7 +428,7 @@ impl ElectrumRuntimeClient { Ok(Self { electrum_client, bdk_electrum_client, tx_sync, runtime, config, logger }) } - pub(crate) async fn sync_confirmables( + async fn sync_confirmables( &self, confirmables: Vec>, ) -> Result<(), Error> { let now = Instant::now(); @@ -116,7 +462,7 @@ impl ElectrumRuntimeClient { Ok(res) } - pub(crate) async fn get_full_scan_wallet_update( + async fn get_full_scan_wallet_update( &self, request: BdkFullScanRequest, cached_txs: impl IntoIterator>>, ) -> Result, Error> { @@ -150,7 +496,7 @@ impl ElectrumRuntimeClient { }) } - pub(crate) async fn get_incremental_sync_wallet_update( + async fn get_incremental_sync_wallet_update( &self, request: BdkSyncRequest<(BdkKeyChainKind, u32)>, cached_txs: impl IntoIterator>>, ) -> Result { @@ -179,7 +525,7 @@ impl ElectrumRuntimeClient { }) } - pub(crate) async fn broadcast(&self, tx: Transaction) { + async fn broadcast(&self, tx: Transaction) { let electrum_client = Arc::clone(&self.electrum_client); let txid = tx.compute_txid(); @@ -187,7 +533,6 @@ impl ElectrumRuntimeClient { let spawn_fut = self.runtime.spawn_blocking(move || electrum_client.transaction_broadcast(&tx)); - let timeout_fut = tokio::time::timeout(Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), spawn_fut); @@ -221,7 +566,7 @@ impl ElectrumRuntimeClient { } } - pub(crate) async fn get_fee_rate_cache_update( + async fn get_fee_rate_cache_update( &self, ) -> Result, Error> { let electrum_client = Arc::clone(&self.electrum_client); diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs new file mode 100644 index 000000000..f6f313955 --- /dev/null +++ b/src/chain/esplora.rs @@ -0,0 +1,445 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::collections::HashMap; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use bdk_esplora::EsploraAsyncExt; +use bitcoin::{FeeRate, Network, Script, Transaction, Txid}; +use esplora_client::AsyncClient as EsploraAsyncClient; +use lightning::chain::{Confirm, Filter, WatchedOutput}; +use lightning::util::ser::Writeable; +use lightning_transaction_sync::EsploraSyncClient; + +use super::{periodically_archive_fully_resolved_monitors, WalletSyncStatus}; +use crate::config::{ + Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, + BDK_WALLET_SYNC_TIMEOUT_SECS, DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, + FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, TX_BROADCAST_TIMEOUT_SECS, +}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + OnchainFeeEstimator, +}; +use crate::io::utils::write_node_metrics; +use crate::logger::{log_bytes, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; + +pub(super) struct EsploraChainSource { + pub(super) sync_config: EsploraSyncConfig, + esplora_client: EsploraAsyncClient, + onchain_wallet_sync_status: Mutex, + tx_sync: Arc>>, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, +} + +impl EsploraChainSource { + pub(crate) fn new( + server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, + fee_estimator: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + + for (header_name, header_value) in &headers { + client_builder = client_builder.header(header_name, header_value); + } + + let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client.clone(), Arc::clone(&logger))); + + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self { + sync_config, + esplora_client, + onchain_wallet_sync_status, + tx_sync, + lightning_wallet_sync_status, + fee_estimator, + kv_store, + config, + logger, + node_metrics, + } + } + + pub(super) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let res = self.sync_onchain_wallet_inner(onchain_wallet).await; + + self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + macro_rules! get_and_apply_wallet_update { + ($sync_future: expr) => {{ + let now = Instant::now(); + match $sync_future.await { + Ok(res) => match res { + Ok(update) => match onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + self.logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger) + )?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + if let Some(status_code) = he.status() { + log_error!( + self.logger, + "{} of on-chain wallet failed due to HTTP {} error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + status_code, + he, + ); + } else { + log_error!( + self.logger, + "{} of on-chain wallet failed due to HTTP error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + he, + ); + } + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!( + self.logger, + "{} of on-chain wallet failed due to Esplora error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationFailed) + }, + }, + }, + Err(e) => { + log_error!( + self.logger, + "{} of on-chain wallet timed out: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationTimeout) + }, + } + }} + } + + if incremental_sync { + let sync_request = onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { + let full_scan_request = onchain_wallet.get_full_scan_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + self.esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } + } + + pub(super) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let receiver_res = { + let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(self.logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let res = + self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; + + self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + } + + async fn sync_lightning_wallet_inner( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + + let timeout_fut = tokio::time::timeout( + Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + self.tx_sync.sync(confirmables), + ); + let now = Instant::now(); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_info!( + self.logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.node_metrics), + )?; + Ok(()) + }, + Err(e) => { + log_error!(self.logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) + }, + }, + Err(e) => { + log_error!(self.logger, "Lightning wallet sync timed out: {}", e); + Err(Error::TxSyncTimeout) + }, + } + } + + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + let now = Instant::now(); + let estimates = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + self.esplora_client.get_fee_estimates(), + ) + .await + .map_err(|e| { + log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })?; + + if estimates.is_empty() && self.config.network == Network::Bitcoin { + // Ensure we fail if we didn't receive any estimates. + log_error!( + self.logger, + "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", + ); + return Err(Error::FeerateEstimationUpdateFailed); + } + + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + for target in confirmation_targets { + let num_blocks = get_num_block_defaults_for_target(target); + + // Convert the retrieved fee rate and fall back to 1 sat/vb if we fail or it + // yields less than that. This is mostly necessary to continue on + // `signet`/`regtest` where we might not get estimates (or bogus values). + let converted_estimate_sat_vb = + esplora_client::convert_fee_rate(num_blocks, estimates.clone()) + .map_or(1.0, |converted| converted.max(1.0)); + + let fee_rate = FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + self.logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + self.fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + + log_info!( + self.logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = self.node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + )?; + } + + Ok(()) + } + + pub(crate) async fn process_broadcast_package(&self, package: Vec) { + for tx in &package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + self.esplora_client.broadcast(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_trace!(self.logger, "Successfully broadcast transaction {}", txid); + }, + Err(e) => match e { + esplora_client::Error::HttpResponse { status, message } => { + if status == 400 { + // Log 400 at lesser level, as this often just means bitcoind already knows the + // transaction. + // FIXME: We can further differentiate here based on the error + // message which will be available with rust-esplora-client 0.7 and + // later. + log_trace!( + self.logger, + "Failed to broadcast due to HTTP connection error: {}", + message + ); + } else { + log_error!( + self.logger, + "Failed to broadcast due to HTTP connection error: {} - {}", + status, + message + ); + } + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + _ => { + log_error!( + self.logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + }, + Err(e) => { + log_error!( + self.logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + self.logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } +} + +impl Filter for EsploraChainSource { + fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { + self.tx_sync.register_tx(txid, script_pubkey); + } + fn register_output(&self, output: WatchedOutput) { + self.tx_sync.register_output(output); + } +} diff --git a/src/chain/mod.rs b/src/chain/mod.rs index de651c61c..6fc336e68 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -5,58 +5,33 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -mod bitcoind_rpc; +mod bitcoind; mod electrum; -use crate::chain::bitcoind_rpc::{ - BitcoindRpcClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, -}; -use crate::chain::electrum::ElectrumRuntimeClient; +mod esplora; + +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; +use std::time::Duration; + +use bitcoin::{Script, Txid}; +use lightning::chain::{BestBlock, Filter}; +use lightning_block_sync::gossip::UtxoSource; + +use crate::chain::bitcoind::BitcoindChainSource; +use crate::chain::electrum::ElectrumChainSource; +use crate::chain::esplora::EsploraChainSource; use crate::config::{ - BackgroundSyncConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, - BDK_CLIENT_STOP_GAP, BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, - LDK_WALLET_SYNC_TIMEOUT_SECS, RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, - TX_BROADCAST_TIMEOUT_SECS, WALLET_SYNC_INTERVAL_MINIMUM_SECS, -}; -use crate::fee_estimator::{ - apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, - ConfirmationTarget, OnchainFeeEstimator, + BackgroundSyncConfig, BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, WALLET_SYNC_INTERVAL_MINIMUM_SECS, }; +use crate::fee_estimator::OnchainFeeEstimator; use crate::io::utils::write_node_metrics; -use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::logger::{log_debug, log_info, log_trace, LdkLogger, Logger}; +use crate::runtime::Runtime; use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; -use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::{Confirm, Filter, Listen, WatchedOutput}; -use lightning::util::ser::Writeable; - -use lightning_transaction_sync::EsploraSyncClient; - -use lightning_block_sync::gossip::UtxoSource; -use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; -use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; -use lightning_block_sync::SpvClient; - -use bdk_esplora::EsploraAsyncExt; -use bdk_wallet::Update as BdkUpdate; - -use esplora_client::AsyncClient as EsploraAsyncClient; - -use bitcoin::{FeeRate, Network, Script, ScriptBuf, Txid}; - -use std::collections::HashMap; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; - -// The default Esplora server we're using. -pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; - -// The default Esplora client timeout we're using. -pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; - -const CHAIN_POLLING_INTERVAL_SECS: u64 = 2; - pub(crate) enum WalletSyncStatus { Completed, InProgress { subscribers: tokio::sync::broadcast::Sender> }, @@ -110,223 +85,107 @@ impl WalletSyncStatus { } } -pub(crate) enum ElectrumRuntimeStatus { - Started(Arc), - Stopped { - pending_registered_txs: Vec<(Txid, ScriptBuf)>, - pending_registered_outputs: Vec, - }, -} - -impl ElectrumRuntimeStatus { - pub(crate) fn new() -> Self { - let pending_registered_txs = Vec::new(); - let pending_registered_outputs = Vec::new(); - Self::Stopped { pending_registered_txs, pending_registered_outputs } - } - - pub(crate) fn start( - &mut self, server_url: String, runtime: Arc, config: Arc, - logger: Arc, - ) -> Result<(), Error> { - match self { - Self::Stopped { pending_registered_txs, pending_registered_outputs } => { - let client = Arc::new(ElectrumRuntimeClient::new( - server_url.clone(), - runtime, - config, - logger, - )?); - - // Apply any pending `Filter` entries - for (txid, script_pubkey) in pending_registered_txs.drain(..) { - client.register_tx(&txid, &script_pubkey); - } - - for output in pending_registered_outputs.drain(..) { - client.register_output(output) - } - - *self = Self::Started(client); - }, - Self::Started(_) => { - debug_assert!(false, "We shouldn't call start if we're already started") - }, - } - Ok(()) - } - - pub(crate) fn stop(&mut self) { - *self = Self::new() - } - - pub(crate) fn client(&self) -> Option> { - match self { - Self::Started(client) => Some(Arc::clone(&client)), - Self::Stopped { .. } => None, - } - } - - fn register_tx(&mut self, txid: &Txid, script_pubkey: &Script) { - match self { - Self::Started(client) => client.register_tx(txid, script_pubkey), - Self::Stopped { pending_registered_txs, .. } => { - pending_registered_txs.push((*txid, script_pubkey.to_owned())) - }, - } - } - - fn register_output(&mut self, output: lightning::chain::WatchedOutput) { - match self { - Self::Started(client) => client.register_output(output), - Self::Stopped { pending_registered_outputs, .. } => { - pending_registered_outputs.push(output) - }, - } - } +pub(crate) struct ChainSource { + kind: ChainSourceKind, + tx_broadcaster: Arc, + logger: Arc, } -pub(crate) enum ChainSource { - Esplora { - sync_config: EsploraSyncConfig, - esplora_client: EsploraAsyncClient, - onchain_wallet: Arc, - onchain_wallet_sync_status: Mutex, - tx_sync: Arc>>, - lightning_wallet_sync_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, - }, - Electrum { - server_url: String, - sync_config: ElectrumSyncConfig, - electrum_runtime_status: RwLock, - onchain_wallet: Arc, - onchain_wallet_sync_status: Mutex, - lightning_wallet_sync_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, - }, - BitcoindRpc { - bitcoind_rpc_client: Arc, - header_cache: tokio::sync::Mutex, - latest_chain_tip: RwLock>, - onchain_wallet: Arc, - wallet_polling_status: Mutex, - fee_estimator: Arc, - tx_broadcaster: Arc, - kv_store: Arc, - config: Arc, - logger: Arc, - node_metrics: Arc>, - }, +enum ChainSourceKind { + Esplora(EsploraChainSource), + Electrum(ElectrumChainSource), + Bitcoind(BitcoindChainSource), } impl ChainSource { pub(crate) fn new_esplora( - server_url: String, sync_config: EsploraSyncConfig, onchain_wallet: Arc, + server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { - // FIXME / TODO: We introduced this to make `bdk_esplora` work separately without updating - // `lightning-transaction-sync`. We should revert this as part of of the upgrade to LDK 0.2. - let mut client_builder_0_11 = esplora_client_0_11::Builder::new(&server_url); - client_builder_0_11 = client_builder_0_11.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client_0_11 = client_builder_0_11.build_async().unwrap(); - let tx_sync = - Arc::new(EsploraSyncClient::from_client(esplora_client_0_11, Arc::clone(&logger))); - - let mut client_builder = esplora_client::Builder::new(&server_url); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - let esplora_client = client_builder.build_async().unwrap(); - - let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self::Esplora { + ) -> (Self, Option) { + let esplora_chain_source = EsploraChainSource::new( + server_url, + headers, sync_config, - esplora_client, - onchain_wallet, - onchain_wallet_sync_status, - tx_sync, - lightning_wallet_sync_status, fee_estimator, - tx_broadcaster, kv_store, config, - logger, + Arc::clone(&logger), node_metrics, - } + ); + let kind = ChainSourceKind::Esplora(esplora_chain_source); + (Self { kind, tx_broadcaster, logger }, None) } pub(crate) fn new_electrum( - server_url: String, sync_config: ElectrumSyncConfig, onchain_wallet: Arc, + server_url: String, sync_config: ElectrumSyncConfig, fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { - let electrum_runtime_status = RwLock::new(ElectrumRuntimeStatus::new()); - let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self::Electrum { + ) -> (Self, Option) { + let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, - electrum_runtime_status, - onchain_wallet, - onchain_wallet_sync_status, - lightning_wallet_sync_status, fee_estimator, - tx_broadcaster, kv_store, config, - logger, + Arc::clone(&logger), node_metrics, - } + ); + let kind = ChainSourceKind::Electrum(electrum_chain_source); + (Self { kind, tx_broadcaster, logger }, None) } - pub(crate) fn new_bitcoind_rpc( - host: String, port: u16, rpc_user: String, rpc_password: String, - onchain_wallet: Arc, fee_estimator: Arc, - tx_broadcaster: Arc, kv_store: Arc, config: Arc, + pub(crate) async fn new_bitcoind_rpc( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, + ) -> (Self, Option) { + let bitcoind_chain_source = BitcoindChainSource::new_rpc( + rpc_host, + rpc_port, + rpc_user, + rpc_password, + fee_estimator, + kv_store, + config, + Arc::clone(&logger), + node_metrics, + ); + let best_block = bitcoind_chain_source.poll_best_block().await.ok(); + let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); + (Self { kind, tx_broadcaster, logger }, best_block) + } + + pub(crate) async fn new_bitcoind_rest( + rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, logger: Arc, node_metrics: Arc>, - ) -> Self { - let bitcoind_rpc_client = - Arc::new(BitcoindRpcClient::new(host, port, rpc_user, rpc_password)); - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); - let latest_chain_tip = RwLock::new(None); - let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); - Self::BitcoindRpc { - bitcoind_rpc_client, - header_cache, - latest_chain_tip, - onchain_wallet, - wallet_polling_status, + ) -> (Self, Option) { + let bitcoind_chain_source = BitcoindChainSource::new_rest( + rpc_host, + rpc_port, + rpc_user, + rpc_password, fee_estimator, - tx_broadcaster, kv_store, config, - logger, + rest_client_config, + Arc::clone(&logger), node_metrics, - } + ); + let best_block = bitcoind_chain_source.poll_best_block().await.ok(); + let kind = ChainSourceKind::Bitcoind(bitcoind_chain_source); + (Self { kind, tx_broadcaster, logger }, best_block) } - pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { - match self { - Self::Electrum { server_url, electrum_runtime_status, config, logger, .. } => { - electrum_runtime_status.write().unwrap().start( - server_url.clone(), - Arc::clone(&runtime), - Arc::clone(&config), - Arc::clone(&logger), - )?; + pub(crate) fn start(&self, runtime: Arc) -> Result<(), Error> { + match &self.kind { + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.start(runtime)? }, _ => { // Nothing to do for other chain sources. @@ -336,10 +195,8 @@ impl ChainSource { } pub(crate) fn stop(&self) { - match self { - Self::Electrum { electrum_runtime_status, .. } => { - electrum_runtime_status.write().unwrap().stop(); - }, + match &self.kind { + ChainSourceKind::Electrum(electrum_chain_source) => electrum_chain_source.stop(), _ => { // Nothing to do for other chain sources. }, @@ -347,219 +204,93 @@ impl ChainSource { } pub(crate) fn as_utxo_source(&self) -> Option> { - match self { - Self::BitcoindRpc { bitcoind_rpc_client, .. } => Some(bitcoind_rpc_client.rpc_client()), + match &self.kind { + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + Some(bitcoind_chain_source.as_utxo_source()) + }, _ => None, } } + pub(crate) fn is_transaction_based(&self) -> bool { + match &self.kind { + ChainSourceKind::Esplora(_) => true, + ChainSourceKind::Electrum { .. } => true, + ChainSourceKind::Bitcoind { .. } => false, + } + } + pub(crate) async fn continuously_sync_wallets( - &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + &self, stop_sync_receiver: tokio::sync::watch::Receiver<()>, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) { - match self { - Self::Esplora { sync_config, logger, .. } => { - if let Some(_background_sync_config) = sync_config.background_sync_config.as_ref() { - // Alby: background sync is disabled - sanity check - /*self.start_tx_based_sync_loop( + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + if let Some(background_sync_config) = + esplora_chain_source.sync_config.background_sync_config.as_ref() + { + self.start_tx_based_sync_loop( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, background_sync_config, - Arc::clone(&logger), + Arc::clone(&self.logger), ) - .await*/ - log_error!( - logger, - "Alby LDK-node default background syncing should be disabled. This must not happen!", - ); + .await } else { // Background syncing is disabled - // Alby: expected background syncing to be disabled - /*log_info!( - logger, + log_info!( + self.logger, "Background syncing is disabled. Manual syncing required for onchain wallet, lightning wallet, and fee rate updates.", - );*/ + ); return; } }, - Self::Electrum { sync_config, logger, .. } => { - if let Some(background_sync_config) = sync_config.background_sync_config.as_ref() { + ChainSourceKind::Electrum(electrum_chain_source) => { + if let Some(background_sync_config) = + electrum_chain_source.sync_config.background_sync_config.as_ref() + { self.start_tx_based_sync_loop( stop_sync_receiver, + onchain_wallet, channel_manager, chain_monitor, output_sweeper, background_sync_config, - Arc::clone(&logger), + Arc::clone(&self.logger), ) .await } else { // Background syncing is disabled log_info!( - logger, + self.logger, "Background syncing is disabled. Manual syncing required for onchain wallet, lightning wallet, and fee rate updates.", ); return; } }, - Self::BitcoindRpc { - bitcoind_rpc_client, - header_cache, - latest_chain_tip, - onchain_wallet, - wallet_polling_status, - kv_store, - config, - logger, - node_metrics, - .. - } => { - // First register for the wallet polling status to make sure `Node::sync_wallets` calls - // wait on the result before proceeding. - { - let mut status_lock = wallet_polling_status.lock().unwrap(); - if status_lock.register_or_subscribe_pending_sync().is_some() { - debug_assert!(false, "Sync already in progress. This should never happen."); - } - } - - log_info!( - logger, - "Starting initial synchronization of chain listeners. This might take a while..", - ); - - loop { - let channel_manager_best_block_hash = - channel_manager.current_best_block().block_hash; - let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; - let onchain_wallet_best_block_hash = - onchain_wallet.current_best_block().block_hash; - - let mut chain_listeners = vec![ - ( - onchain_wallet_best_block_hash, - &**onchain_wallet as &(dyn Listen + Send + Sync), - ), - ( - channel_manager_best_block_hash, - &*channel_manager as &(dyn Listen + Send + Sync), - ), - (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), - ]; - - // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s - // before giving them to `ChainMonitor` it the first place. However, this isn't - // trivial as we load them on initialization (in the `Builder`) and only gain - // network access during `start`. For now, we just make sure we get the worst known - // block hash and sychronize them via `ChainMonitor`. - if let Some(worst_channel_monitor_block_hash) = chain_monitor - .list_monitors() - .iter() - .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) - .map(|m| m.current_best_block()) - .min_by_key(|b| b.height) - .map(|b| b.block_hash) - { - chain_listeners.push(( - worst_channel_monitor_block_hash, - &*chain_monitor as &(dyn Listen + Send + Sync), - )); - } - - let mut locked_header_cache = header_cache.lock().await; - let now = SystemTime::now(); - match synchronize_listeners( - bitcoind_rpc_client.as_ref(), - config.network, - &mut *locked_header_cache, - chain_listeners.clone(), + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source + .continuously_sync_wallets( + stop_sync_receiver, + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, ) .await - { - Ok(chain_tip) => { - { - log_info!( - logger, - "Finished synchronizing listeners in {}ms", - now.elapsed().unwrap().as_millis() - ); - *latest_chain_tip.write().unwrap() = Some(chain_tip); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .unwrap_or_else(|e| { - log_error!(logger, "Failed to persist node metrics: {}", e); - }); - } - break; - }, - - Err(e) => { - log_error!(logger, "Failed to synchronize chain listeners: {:?}", e); - tokio::time::sleep(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)) - .await; - }, - } - } - - // Now propagate the initial result to unblock waiting subscribers. - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); - - let mut chain_polling_interval = - tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); - chain_polling_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - let mut fee_rate_update_interval = - tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); - // When starting up, we just blocked on updating, so skip the first tick. - fee_rate_update_interval.reset(); - fee_rate_update_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - - log_info!(logger, "Starting continuous polling for chain updates."); - - // Start the polling loop. - loop { - tokio::select! { - _ = stop_sync_receiver.changed() => { - log_trace!( - logger, - "Stopping polling for new chain data.", - ); - return; - } - _ = chain_polling_interval.tick() => { - let _ = self.poll_and_update_listeners(Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper)).await; - } - _ = fee_rate_update_interval.tick() => { - let _ = self.update_fee_rate_estimates().await; - } - } - } }, } } async fn start_tx_based_sync_loop( &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, - channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, background_sync_config: &BackgroundSyncConfig, - logger: Arc, + onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, + background_sync_config: &BackgroundSyncConfig, logger: Arc, ) { // Setup syncing intervals let onchain_wallet_sync_interval_secs = background_sync_config @@ -598,7 +329,7 @@ impl ChainSource { return; } _ = onchain_wallet_sync_interval.tick() => { - let _ = self.sync_onchain_wallet().await; + let _ = self.sync_onchain_wallet(Arc::clone(&onchain_wallet)).await; } _ = fee_rate_update_interval.tick() => { let _ = self.update_fee_rate_estimates().await; @@ -616,217 +347,19 @@ impl ChainSource { // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, // etc.) - pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { - match self { - Self::Esplora { - esplora_client, - onchain_wallet, - onchain_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let receiver_res = { - let mut status_lock = onchain_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let res = { - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - - log_info!(logger, "Starting onchain wallet sync"); - - macro_rules! get_and_apply_wallet_update { - ($sync_future: expr) => {{ - let now = Instant::now(); - match $sync_future.await { - Ok(res) => match res { - Ok(update) => match onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - logger, - "{} of on-chain wallet finished in {}ms.", - if incremental_sync { "Incremental sync" } else { "Sync" }, - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), Arc::clone(&logger))?; - } - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => match *e { - esplora_client::Error::Reqwest(he) => { - log_error!( - logger, - "{} of on-chain wallet failed due to HTTP connection error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - he - ); - Err(Error::WalletOperationFailed) - }, - _ => { - log_error!( - logger, - "{} of on-chain wallet failed due to Esplora error: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - e - ); - Err(Error::WalletOperationFailed) - }, - }, - }, - Err(e) => { - log_error!( - logger, - "{} of on-chain wallet timed out: {}", - if incremental_sync { "Incremental sync" } else { "Sync" }, - e - ); - Err(Error::WalletOperationTimeout) - }, - } - }} - } - - if incremental_sync { - let sync_request = onchain_wallet.get_incremental_sync_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } else { - let full_scan_request = onchain_wallet.get_full_scan_request(); - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - esplora_client.full_scan( - full_scan_request, - BDK_CLIENT_STOP_GAP, - BDK_CLIENT_CONCURRENCY, - ), - ); - get_and_apply_wallet_update!(wallet_sync_timeout_fut) - } - }; - - onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + pub(crate) async fn sync_onchain_wallet( + &self, onchain_wallet: Arc, + ) -> Result<(), Error> { + log_info!(self.logger, "Starting onchain wallet sync"); + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.sync_onchain_wallet(onchain_wallet).await }, - Self::Electrum { - electrum_runtime_status, - onchain_wallet, - onchain_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the onchain wallet" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; - let receiver_res = { - let mut status_lock = onchain_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - // If this is our first sync, do a full scan with the configured gap limit. - // Otherwise just do an incremental sync. - let incremental_sync = - node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); - - let apply_wallet_update = - |update_res: Result, now: Instant| match update_res { - Ok(update) => match onchain_wallet.apply_update(update) { - Ok(()) => { - log_info!( - logger, - "{} of on-chain wallet finished in {}ms.", - if incremental_sync { "Incremental sync" } else { "Sync" }, - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } - Ok(()) - }, - Err(e) => Err(e), - }, - Err(e) => Err(e), - }; - - let cached_txs = onchain_wallet.get_cached_txs(); - - let res = if incremental_sync { - let incremental_sync_request = onchain_wallet.get_incremental_sync_request(); - let incremental_sync_fut = electrum_client - .get_incremental_sync_wallet_update(incremental_sync_request, cached_txs); - - let now = Instant::now(); - let update_res = incremental_sync_fut.await.map(|u| u.into()); - apply_wallet_update(update_res, now) - } else { - let full_scan_request = onchain_wallet.get_full_scan_request(); - let full_scan_fut = - electrum_client.get_full_scan_wallet_update(full_scan_request, cached_txs); - let now = Instant::now(); - let update_res = full_scan_fut.await.map(|u| u.into()); - apply_wallet_update(update_res, now) - }; - - onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.sync_onchain_wallet(onchain_wallet).await }, - Self::BitcoindRpc { .. } => { - // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + ChainSourceKind::Bitcoind { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go via // `ChainPoller`. So nothing to do here. unreachable!("Onchain wallet will be synced via chain polling") }, @@ -839,169 +372,20 @@ impl ChainSource { &self, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - match self { - Self::Esplora { - tx_sync, - lightning_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - - log_debug!(logger, "Acquiring lightning wallet sync lock"); - let receiver_res = { - let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - let res = { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), - tx_sync.sync(confirmables), - ); - let now = Instant::now(); - log_info!(logger, "Starting lightning wallet sync"); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_info!( - logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - { - log_debug!(logger, "Acquiring node metrics lock"); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - log_debug!(logger, "Writing node metrics"); - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - log_debug!(logger, "Updated node metrics"); - } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&kv_store), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )?; - Ok(()) - }, - Err(e) => { - log_error!(logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, - }, - Err(e) => { - log_error!(logger, "Lightning wallet sync timed out: {}", e); - Err(Error::TxSyncTimeout) - }, - } - }; - - lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + log_info!(self.logger, "Starting lightning wallet sync"); + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source + .sync_lightning_wallet(channel_manager, chain_monitor, output_sweeper) + .await }, - Self::Electrum { - electrum_runtime_status, - lightning_wallet_sync_status, - kv_store, - logger, - node_metrics, - .. - } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the lightning wallet" - ); - return Err(Error::TxSyncFailed); - }; - - let sync_cman = Arc::clone(&channel_manager); - let sync_cmon = Arc::clone(&chain_monitor); - let sync_sweeper = Arc::clone(&output_sweeper); - let confirmables = vec![ - sync_cman as Arc, - sync_cmon as Arc, - sync_sweeper as Arc, - ]; - - let receiver_res = { - let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(logger, "Failed to receive wallet sync result: {:?}", e); - Error::TxSyncFailed - })?; - } - - let res = electrum_client.sync_confirmables(confirmables).await; - - if let Ok(_) = res { - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } - - periodically_archive_fully_resolved_monitors( - Arc::clone(&channel_manager), - Arc::clone(&chain_monitor), - Arc::clone(&kv_store), - Arc::clone(&logger), - Arc::clone(&node_metrics), - )?; - } - - lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); - - res + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source + .sync_lightning_wallet(channel_manager, chain_monitor, output_sweeper) + .await }, - Self::BitcoindRpc { .. } => { - // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + ChainSourceKind::Bitcoind { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go via // `ChainPoller`. So nothing to do here. unreachable!("Lightning wallet will be synced via chain polling") }, @@ -1009,600 +393,101 @@ impl ChainSource { } pub(crate) async fn poll_and_update_listeners( - &self, channel_manager: Arc, chain_monitor: Arc, - output_sweeper: Arc, + &self, onchain_wallet: Arc, channel_manager: Arc, + chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - match self { - Self::Esplora { .. } => { + match &self.kind { + ChainSourceKind::Esplora { .. } => { // In Esplora mode we sync lightning and onchain wallets via // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. unreachable!("Listeners will be synced via transction-based syncing") }, - Self::Electrum { .. } => { + ChainSourceKind::Electrum { .. } => { // In Electrum mode we sync lightning and onchain wallets via // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. unreachable!("Listeners will be synced via transction-based syncing") }, - Self::BitcoindRpc { - bitcoind_rpc_client, - header_cache, - latest_chain_tip, - onchain_wallet, - wallet_polling_status, - kv_store, - config, - logger, - node_metrics, - .. - } => { - let receiver_res = { - let mut status_lock = wallet_polling_status.lock().unwrap(); - status_lock.register_or_subscribe_pending_sync() - }; - - if let Some(mut sync_receiver) = receiver_res { - log_info!(logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); - log_error!(logger, "Failed to receive wallet polling result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let latest_chain_tip_opt = latest_chain_tip.read().unwrap().clone(); - let chain_tip = if let Some(tip) = latest_chain_tip_opt { - tip - } else { - match validate_best_block_header(bitcoind_rpc_client.as_ref()).await { - Ok(tip) => { - *latest_chain_tip.write().unwrap() = Some(tip); - tip - }, - Err(e) => { - log_error!(logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - wallet_polling_status - .lock() - .unwrap() - .propagate_result_to_subscribers(res); - return res; - }, - } - }; - - let mut locked_header_cache = header_cache.lock().await; - let chain_poller = - ChainPoller::new(Arc::clone(&bitcoind_rpc_client), config.network); - let chain_listener = ChainListener { - onchain_wallet: Arc::clone(&onchain_wallet), - channel_manager: Arc::clone(&channel_manager), - chain_monitor, - output_sweeper, - }; - let mut spv_client = SpvClient::new( - chain_tip, - chain_poller, - &mut *locked_header_cache, - &chain_listener, - ); - - let now = SystemTime::now(); - match spv_client.poll_best_tip().await { - Ok((ChainTip::Better(tip), true)) => { - log_trace!( - logger, - "Finished polling best tip in {}ms", - now.elapsed().unwrap().as_millis() - ); - *latest_chain_tip.write().unwrap() = Some(tip); - }, - Ok(_) => {}, - Err(e) => { - log_error!(logger, "Failed to poll for chain data: {:?}", e); - let res = Err(Error::TxSyncFailed); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - - let cur_height = channel_manager.current_best_block().height; - - let now = SystemTime::now(); - let unconfirmed_txids = onchain_wallet.get_unconfirmed_txids(); - match bitcoind_rpc_client - .get_updated_mempool_transactions(cur_height, unconfirmed_txids) + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source + .poll_and_update_listeners( + onchain_wallet, + channel_manager, + chain_monitor, + output_sweeper, + ) .await - { - Ok((unconfirmed_txs, evicted_txids)) => { - log_trace!( - logger, - "Finished polling mempool of size {} and {} evicted transactions in {}ms", - unconfirmed_txs.len(), - evicted_txids.len(), - now.elapsed().unwrap().as_millis() - ); - onchain_wallet - .apply_mempool_txs(unconfirmed_txs, evicted_txids) - .unwrap_or_else(|e| { - log_error!(logger, "Failed to apply mempool transactions: {:?}", e); - }); - }, - Err(e) => { - log_error!(logger, "Failed to poll for mempool transactions: {:?}", e); - let res = Err(Error::TxSyncFailed); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - - let write_res = write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - ); - match write_res { - Ok(()) => (), - Err(e) => { - log_error!(logger, "Failed to persist node metrics: {}", e); - let res = Err(Error::PersistenceFailed); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - return res; - }, - } - - let res = Ok(()); - wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); - res }, } } pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { - match self { - Self::Esplora { - esplora_client, - fee_estimator, - config, - kv_store, - logger, - node_metrics, - .. - } => { - let now = Instant::now(); - log_info!(logger, "Starting fee estimates sync"); - let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - esplora_client.get_fee_estimates(), - ) - .await - .map_err(|e| { - log_error!(logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); - Error::FeerateEstimationUpdateFailed - })?; - - if estimates.is_empty() && config.network == Network::Bitcoin { - // Ensure we fail if we didn't receive any estimates. - log_error!( - logger, - "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", - ); - return Err(Error::FeerateEstimationUpdateFailed); - } - - let confirmation_targets = get_all_conf_targets(); - - let mut new_fee_rate_cache = HashMap::with_capacity(10); - for target in confirmation_targets { - let num_blocks = get_num_block_defaults_for_target(target); - - // Convert the retrieved fee rate and fall back to 1 sat/vb if we fail or it - // yields less than that. This is mostly necessary to continue on - // `signet`/`regtest` where we might not get estimates (or bogus values). - let converted_estimate_sat_vb = - esplora_client::convert_fee_rate(num_blocks, estimates.clone()) - .map_or(1.0, |converted| converted.max(1.0)); - - let fee_rate = - FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - - new_fee_rate_cache.insert(target, adjusted_fee_rate); - - log_trace!( - logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } - - fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - - log_info!( - logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - log_debug!(logger, "Acquiring node metrics lock"); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - log_debug!(logger, "Writing node metrics"); - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - log_debug!(logger, "Updated node metrics"); - } - - Ok(()) + log_info!(self.logger, "Starting fee estimates sync"); + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.update_fee_rate_estimates().await }, - Self::Electrum { - electrum_runtime_status, - fee_estimator, - kv_store, - logger, - node_metrics, - .. - } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before updating fees" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; - - let now = Instant::now(); - - let new_fee_rate_cache = electrum_client.get_fee_rate_cache_update().await?; - fee_estimator.set_fee_rate_cache(new_fee_rate_cache); - - log_info!( - logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } - - Ok(()) + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.update_fee_rate_estimates().await }, - Self::BitcoindRpc { - bitcoind_rpc_client, - fee_estimator, - config, - kv_store, - logger, - node_metrics, - .. - } => { - macro_rules! get_fee_rate_update { - ($estimation_fut: expr) => {{ - let update_res = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - $estimation_fut, - ) - .await - .map_err(|e| { - log_error!(logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })?; - update_res - }}; - } - let confirmation_targets = get_all_conf_targets(); - - let mut new_fee_rate_cache = HashMap::with_capacity(10); - let now = Instant::now(); - for target in confirmation_targets { - let fee_rate_update_res = match target { - ConfirmationTarget::Lightning( - LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, - ) => { - let estimation_fut = bitcoind_rpc_client.get_mempool_minimum_fee_rate(); - get_fee_rate_update!(estimation_fut) - }, - ConfirmationTarget::Lightning( - LdkConfirmationTarget::MaximumFeeEstimate, - ) => { - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = bitcoind_rpc_client - .get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - ConfirmationTarget::Lightning( - LdkConfirmationTarget::UrgentOnChainSweep, - ) => { - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Conservative; - let estimation_fut = bitcoind_rpc_client - .get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - _ => { - // Otherwise, we default to economical block-target estimate. - let num_blocks = get_num_block_defaults_for_target(target); - let estimation_mode = FeeRateEstimationMode::Economical; - let estimation_fut = bitcoind_rpc_client - .get_fee_estimate_for_target(num_blocks, estimation_mode); - get_fee_rate_update!(estimation_fut) - }, - }; - - let fee_rate = match (fee_rate_update_res, config.network) { - (Ok(rate), _) => rate, - (Err(e), Network::Bitcoin) => { - // Strictly fail on mainnet. - log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); - return Err(Error::FeerateEstimationUpdateFailed); - }, - (Err(e), n) if n == Network::Regtest || n == Network::Signet => { - // On regtest/signet we just fall back to the usual 1 sat/vb == 250 - // sat/kwu default. - log_error!( - logger, - "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", - e, - ); - FeeRate::from_sat_per_kwu(250) - }, - (Err(e), _) => { - // On testnet `estimatesmartfee` can be unreliable so we just skip in - // case of a failure, which will have us falling back to defaults. - log_error!( - logger, - "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", - e, - ); - return Ok(()); - }, - }; - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); - - new_fee_rate_cache.insert(target, adjusted_fee_rate); - - log_trace!( - logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.to_sat_per_kwu(), - ); - } - - if fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { - // We only log if the values changed, as it might be very spammy otherwise. - log_info!( - logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - } - - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - Arc::clone(&kv_store), - Arc::clone(&logger), - )?; - } - - Ok(()) + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source.update_fee_rate_estimates().await }, } } - pub(crate) async fn process_broadcast_queue(&self) { - match self { - Self::Esplora { esplora_client, tx_broadcaster, logger, .. } => { - let mut receiver = tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - esplora_client.broadcast(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!( - logger, - "Successfully broadcast transaction {}", - txid - ); - }, - Err(e) => match e { - esplora_client::Error::HttpResponse { status, message } => { - if status == 400 { - // Log 400 at lesser level, as this often just means bitcoind already knows the - // transaction. - // FIXME: We can further differentiate here based on the error - // message which will be available with rust-esplora-client 0.7 and - // later. - log_trace!( - logger, - "Failed to broadcast due to HTTP connection error: {}", - message - ); - } else { - log_error!( - logger, - "Failed to broadcast due to HTTP connection error: {} - {}", - status, message - ); - } - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - _ => { - log_error!( - logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - }, - Err(e) => { - log_error!( - logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } - } - } - }, - Self::Electrum { electrum_runtime_status, tx_broadcaster, .. } => { - let electrum_client: Arc = if let Some(client) = - electrum_runtime_status.read().unwrap().client().as_ref() - { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before broadcasting" + pub(crate) async fn continuously_process_broadcast_queue( + &self, mut stop_tx_bcast_receiver: tokio::sync::watch::Receiver<()>, + ) { + let mut receiver = self.tx_broadcaster.get_broadcast_queue().await; + loop { + let tx_bcast_logger = Arc::clone(&self.logger); + tokio::select! { + _ = stop_tx_bcast_receiver.changed() => { + log_debug!( + tx_bcast_logger, + "Stopping broadcasting transactions.", ); return; - }; - - let mut receiver = tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in next_package { - electrum_client.broadcast(tx).await; - } } - }, - Self::BitcoindRpc { bitcoind_rpc_client, tx_broadcaster, logger, .. } => { - // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 - // features, we should eventually switch to use `submitpackage` via the - // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual - // transactions. - let mut receiver = tx_broadcaster.get_broadcast_queue().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let txid = tx.compute_txid(); - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - bitcoind_rpc_client.broadcast_transaction(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(id) => { - debug_assert_eq!(id, txid); - log_trace!( - logger, - "Successfully broadcast transaction {}", - txid - ); - }, - Err(e) => { - log_error!( - logger, - "Failed to broadcast transaction {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - Err(e) => { - log_error!( - logger, - "Failed to broadcast transaction due to timeout {}: {}", - txid, - e - ); - log_trace!( - logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } + Some(next_package) = receiver.recv() => { + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.process_broadcast_package(next_package).await + }, + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.process_broadcast_package(next_package).await + }, + ChainSourceKind::Bitcoind(bitcoind_chain_source) => { + bitcoind_chain_source.process_broadcast_package(next_package).await + }, } } - }, + } } } } impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - match self { - Self::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), - Self::Electrum { electrum_runtime_status, .. } => { - electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.register_tx(txid, script_pubkey) + }, + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.register_tx(txid, script_pubkey) }, - Self::BitcoindRpc { .. } => (), + ChainSourceKind::Bitcoind { .. } => (), } } fn register_output(&self, output: lightning::chain::WatchedOutput) { - match self { - Self::Esplora { tx_sync, .. } => tx_sync.register_output(output), - Self::Electrum { electrum_runtime_status, .. } => { - electrum_runtime_status.write().unwrap().register_output(output) + match &self.kind { + ChainSourceKind::Esplora(esplora_chain_source) => { + esplora_chain_source.register_output(output) + }, + ChainSourceKind::Electrum(electrum_chain_source) => { + electrum_chain_source.register_output(output) }, - Self::BitcoindRpc { .. } => (), + ChainSourceKind::Bitcoind { .. } => (), } } } diff --git a/src/config.rs b/src/config.rs index 2f77c9480..c2240a673 100644 --- a/src/config.rs +++ b/src/config.rs @@ -7,20 +7,19 @@ //! Objects for configuring the node. -use crate::logger::LogLevel; -use crate::payment::SendingParameters; - -use lightning::ln::msgs::SocketAddress; -use lightning::routing::gossip::NodeAlias; -use lightning::util::config::ChannelConfig as LdkChannelConfig; -use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; -use lightning::util::config::UserConfig; +use std::fmt; +use std::time::Duration; use bitcoin::secp256k1::PublicKey; use bitcoin::Network; +use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; +use lightning::routing::router::RouteParametersConfig; +use lightning::util::config::{ + ChannelConfig as LdkChannelConfig, MaxDustHTLCExposure as LdkMaxDustHTLCExposure, UserConfig, +}; -use std::fmt; -use std::time::Duration; +use crate::logger::LogLevel; // Config defaults const DEFAULT_NETWORK: Network = Network::Bitcoin; @@ -39,6 +38,12 @@ pub const DEFAULT_LOG_FILENAME: &'static str = "ldk_node.log"; /// The default storage directory. pub const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node"; +// The default Esplora server we're using. +pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; + +// The default Esplora client timeout we're using. +pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; + // The 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold // number of derivation indexes after which BDK stops looking for new scripts belonging to the wallet. // Alby: this is only used in the first ever sync. Afterward, it only checks ~4 external and interal addresses (until more addresses are generated) @@ -60,6 +65,9 @@ pub(crate) const PEER_RECONNECTION_INTERVAL: Duration = Duration::from_secs(60); // The time in-between RGS sync attempts. pub(crate) const RGS_SYNC_INTERVAL: Duration = Duration::from_secs(60 * 60); +// The time in-between external scores sync attempts. +pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL: Duration = Duration::from_secs(60 * 60); + // The time in-between node announcement broadcast attempts. pub(crate) const NODE_ANN_BCAST_INTERVAL: Duration = Duration::from_secs(60 * 60); @@ -70,7 +78,7 @@ pub(crate) const WALLET_SYNC_INTERVAL_MINIMUM_SECS: u64 = 10; pub(crate) const BDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 40; //20; // Alby: originally 90 // The timeout after which we abort a wallet syncing operation. -pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 20; //10; // Alby: originally 90 +pub(crate) const LDK_WALLET_SYNC_TIMEOUT_SECS: u64 = 40; //10; // Alby: originally 90 // The timeout after which we give up waiting on LDK's event handler to exit on shutdown. pub(crate) const LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS: u64 = 60; // 30; @@ -90,6 +98,9 @@ pub(crate) const RGS_SYNC_TIMEOUT_SECS: u64 = 5; /// The length in bytes of our wallets' keys seed. pub const WALLET_KEYS_SEED_LEN: usize = 64; +// The timeout after which we abort a external scores sync operation. +pub(crate) const EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS: u64 = 5; + #[derive(Debug, Clone)] /// Represents the configuration of an [`Node`] instance. /// @@ -110,9 +121,9 @@ pub const WALLET_KEYS_SEED_LEN: usize = 64; /// | `probing_liquidity_limit_multiplier` | 3 | /// | `log_level` | Debug | /// | `anchor_channels_config` | Some(..) | -/// | `sending_parameters` | None | +/// | `route_parameters` | None | /// -/// See [`AnchorChannelsConfig`] and [`SendingParameters`] for more information regarding their +/// See [`AnchorChannelsConfig`] and [`RouteParametersConfig`] for more information regarding their /// respective default values. /// /// [`Node`]: crate::Node @@ -169,12 +180,12 @@ pub struct Config { pub anchor_channels_config: Option, /// Configuration options for payment routing and pathfinding. /// - /// Setting the `SendingParameters` provides flexibility to customize how payments are routed, + /// Setting the [`RouteParametersConfig`] provides flexibility to customize how payments are routed, /// including setting limits on routing fees, CLTV expiry, and channel utilization. /// /// **Note:** If unset, default parameters will be used, and you will be able to override the /// parameters on a per-payment basis in the corresponding method calls. - pub sending_parameters: Option, + pub route_parameters: Option, /// Alby: Transient network graph. /// /// If set to `true`, the graph is not persisted in the database and is only kept in memory. @@ -192,7 +203,7 @@ impl Default for Config { trusted_peers_0conf: Vec::new(), probing_liquidity_limit_multiplier: DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER, anchor_channels_config: Some(AnchorChannelsConfig::default()), - sending_parameters: None, + route_parameters: None, node_alias: None, transient_network_graph: false, } @@ -322,6 +333,7 @@ pub(crate) fn default_user_config(config: &Config) -> UserConfig { user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); + user_config.reject_inbound_splices = false; if may_announce_channel(config).is_err() { user_config.accept_forwards_to_priv_channels = false; @@ -411,6 +423,15 @@ impl Default for ElectrumSyncConfig { } } +/// Configuration for syncing with Bitcoin Core backend via REST. +#[derive(Debug, Clone)] +pub struct BitcoindRestClientConfig { + /// Host URL. + pub rest_host: String, + /// Host port. + pub rest_port: u16, +} + /// Options which apply on a per-channel basis and may change at runtime or based on negotiation /// with our counterparty. #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -527,15 +548,24 @@ impl From for LdkMaxDustHTLCExposure { } } +#[derive(Debug, Clone, Copy)] +/// The role of the node in an asynchronous payments context. +/// +/// See for more information about the async payments protocol. +pub enum AsyncPaymentsRole { + /// Node acts a client in an async payments context. This means that if possible, it will instruct its peers to hold + /// HTLCs for it, so that it can go offline. + Client, + /// Node acts as a server in an async payments context. This means that it will hold async payments HTLCs and onion + /// messages for its peers. + Server, +} + #[cfg(test)] mod tests { use std::str::FromStr; - use super::may_announce_channel; - use super::AnnounceError; - use super::Config; - use super::NodeAlias; - use super::SocketAddress; + use super::{may_announce_channel, AnnounceError, Config, NodeAlias, SocketAddress}; #[test] fn node_announce_channel() { diff --git a/src/connection.rs b/src/connection.rs index c4cde717a..e3a25f357 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -5,20 +5,19 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::{log_error, log_info, LdkLogger}; -use crate::types::PeerManager; -use crate::Error; - -use lightning::ln::msgs::SocketAddress; - -use bitcoin::secp256k1::PublicKey; - use std::collections::hash_map::{self, HashMap}; use std::net::ToSocketAddrs; use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::time::Duration; +use bitcoin::secp256k1::PublicKey; +use lightning::ln::msgs::SocketAddress; + +use crate::logger::{log_error, log_info, LdkLogger}; +use crate::types::PeerManager; +use crate::Error; + pub(crate) struct ConnectionManager where L::Target: LdkLogger, diff --git a/src/data_store.rs b/src/data_store.rs index 78e3e7870..87bd831c9 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -5,16 +5,16 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::{log_error, LdkLogger}; -use crate::types::DynStore; -use crate::Error; +use std::collections::{hash_map, HashMap}; +use std::ops::Deref; +use std::sync::{Arc, Mutex}; +use lightning::util::persist::KVStoreSync; use lightning::util::ser::{Readable, Writeable}; -use std::collections::hash_map; -use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, Mutex}; +use crate::logger::{log_error, LdkLogger}; +use crate::types::DynStore; +use crate::Error; pub(crate) trait StorableObject: Clone + Readable + Writeable { type Id: StorableObjectId; @@ -98,19 +98,24 @@ where let removed = self.objects.lock().unwrap().remove(id).is_some(); if removed { let store_key = id.encode_to_hex_str(); - self.kv_store - .remove(&self.primary_namespace, &self.secondary_namespace, &store_key, false) - .map_err(|e| { - log_error!( - self.logger, - "Removing object data for key {}/{}/{} failed due to: {}", - &self.primary_namespace, - &self.secondary_namespace, - store_key, - e - ); - Error::PersistenceFailed - })?; + KVStoreSync::remove( + &*self.kv_store, + &self.primary_namespace, + &self.secondary_namespace, + &store_key, + false, + ) + .map_err(|e| { + log_error!( + self.logger, + "Removing object data for key {}/{}/{} failed due to: {}", + &self.primary_namespace, + &self.secondary_namespace, + store_key, + e + ); + Error::PersistenceFailed + })?; } Ok(()) } @@ -142,19 +147,24 @@ where fn persist(&self, object: &SO) -> Result<(), Error> { let store_key = object.id().encode_to_hex_str(); let data = object.encode(); - self.kv_store - .write(&self.primary_namespace, &self.secondary_namespace, &store_key, &data) - .map_err(|e| { - log_error!( - self.logger, - "Write for key {}/{}/{} failed due to: {}", - &self.primary_namespace, - &self.secondary_namespace, - store_key, - e - ); - Error::PersistenceFailed - })?; + KVStoreSync::write( + &*self.kv_store, + &self.primary_namespace, + &self.secondary_namespace, + &store_key, + data, + ) + .map_err(|e| { + log_error!( + self.logger, + "Write for key {}/{}/{} failed due to: {}", + &self.primary_namespace, + &self.secondary_namespace, + store_key, + e + ); + Error::PersistenceFailed + })?; Ok(()) } } @@ -162,11 +172,11 @@ where #[cfg(test)] mod tests { use lightning::impl_writeable_tlv_based; - use lightning::util::test_utils::{TestLogger, TestStore}; - - use crate::hex_utils; + use lightning::util::test_utils::TestLogger; use super::*; + use crate::hex_utils; + use crate::io::test_utils::InMemoryStore; #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] struct TestObjectId { @@ -225,7 +235,7 @@ mod tests { #[test] fn data_is_persisted() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let primary_namespace = "datastore_test_primary".to_string(); let secondary_namespace = "datastore_test_secondary".to_string(); @@ -243,13 +253,15 @@ mod tests { let store_key = id.encode_to_hex_str(); // Check we start empty. - assert!(store.read(&primary_namespace, &secondary_namespace, &store_key).is_err()); + assert!(KVStoreSync::read(&*store, &primary_namespace, &secondary_namespace, &store_key) + .is_err()); // Check we successfully store an object and return `false` let object = TestObject { id, data: [23u8; 3] }; assert_eq!(Ok(false), data_store.insert(object.clone())); assert_eq!(Some(object), data_store.get(&id)); - assert!(store.read(&primary_namespace, &secondary_namespace, &store_key).is_ok()); + assert!(KVStoreSync::read(&*store, &primary_namespace, &secondary_namespace, &store_key) + .is_ok()); // Test re-insertion returns `true` let mut override_object = object.clone(); diff --git a/src/error.rs b/src/error.rs index 59162f0ec..c8dbb2013 100644 --- a/src/error.rs +++ b/src/error.rs @@ -5,14 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::fmt; + use bdk_chain::bitcoin::psbt::ExtractTxError as BdkExtractTxError; use bdk_chain::local_chain::CannotConnectError as BdkChainConnectionError; use bdk_chain::tx_graph::CalculateFeeError as BdkChainCalculateFeeError; use bdk_wallet::error::CreateTxError as BdkCreateTxError; +#[allow(deprecated)] use bdk_wallet::signer::SignerError as BdkSignerError; -use std::fmt; - #[derive(Copy, Clone, Debug, PartialEq, Eq)] /// An error that possibly needs to be handled by the user. pub enum Error { @@ -42,6 +43,8 @@ pub enum Error { ChannelCreationFailed, /// A channel could not be closed. ChannelClosingFailed, + /// A channel could not be spliced. + ChannelSplicingFailed, /// A channel configuration could not be updated. ChannelConfigUpdateFailed, /// Persistence failed. @@ -122,6 +125,10 @@ pub enum Error { LiquiditySourceUnavailable, /// The given operation failed due to the LSP's required opening fee being too high. LiquidityFeeTooHigh, + /// The given blinded paths are invalid. + InvalidBlindedPaths, + /// Asynchronous payment services are disabled. + AsyncPaymentServicesDisabled, } impl fmt::Display for Error { @@ -142,6 +149,7 @@ impl fmt::Display for Error { Self::ProbeSendingFailed => write!(f, "Failed to send the given payment probe."), Self::ChannelCreationFailed => write!(f, "Failed to create channel."), Self::ChannelClosingFailed => write!(f, "Failed to close channel."), + Self::ChannelSplicingFailed => write!(f, "Failed to splice channel."), Self::ChannelConfigUpdateFailed => write!(f, "Failed to update channel config."), Self::PersistenceFailed => write!(f, "Failed to persist data."), Self::FeerateEstimationUpdateFailed => { @@ -196,12 +204,17 @@ impl fmt::Display for Error { Self::LiquidityFeeTooHigh => { write!(f, "The given operation failed due to the LSP's required opening fee being too high.") }, + Self::InvalidBlindedPaths => write!(f, "The given blinded paths are invalid."), + Self::AsyncPaymentServicesDisabled => { + write!(f, "Asynchronous payment services are disabled.") + }, } } } impl std::error::Error for Error {} +#[allow(deprecated)] impl From for Error { fn from(_: BdkSignerError) -> Self { Self::OnchainTxSigningFailed diff --git a/src/event.rs b/src/event.rs index 6253c786f..d33c73d2e 100644 --- a/src/event.rs +++ b/src/event.rs @@ -5,57 +5,56 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::types::{CustomTlvRecord, DynStore, PaymentStore, Sweeper, Wallet}; +use core::future::Future; +use core::task::{Poll, Waker}; +use std::collections::VecDeque; +use std::ops::Deref; +use std::sync::{Arc, Mutex}; -use crate::{ - hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, - TlvEntry, UserChannelId, +use bitcoin::blockdata::locktime::absolute::LockTime; +use bitcoin::secp256k1::PublicKey; +use bitcoin::{Amount, OutPoint}; +use lightning::events::bump_transaction::BumpTransactionEvent; +use lightning::events::{ + ClosureReason, Event as LdkEvent, PaymentFailureReason, PaymentPurpose, ReplayEvent, }; +use lightning::impl_writeable_tlv_based_enum; +use lightning::ln::channelmanager::PaymentId; +use lightning::ln::types::ChannelId; +use lightning::routing::gossip::NodeId; +use lightning::util::config::{ + ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, +}; +use lightning::util::errors::APIError; +use lightning::util::persist::KVStore; +use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use lightning_liquidity::lsps2::utils::compute_opening_fee; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use rand::{rng, Rng}; use crate::config::{may_announce_channel, Config}; use crate::connection::ConnectionManager; use crate::data_store::DataStoreUpdateResult; use crate::fee_estimator::ConfirmationTarget; +use crate::io::{ + EVENT_QUEUE_PERSISTENCE_KEY, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, +}; use crate::liquidity::LiquiditySource; -use crate::logger::Logger; - +use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; +use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; - -use crate::io::{ - EVENT_QUEUE_PERSISTENCE_KEY, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, +use crate::runtime::Runtime; +use crate::types::{ + CustomTlvRecord, DynStore, OnionMessenger, PaymentStore, Sweeper, TlvEntry, Wallet, +}; +use crate::{ + hex_utils, BumpTransactionEventHandler, ChannelManager, Error, Graph, PeerInfo, PeerStore, + UserChannelId, }; -use crate::logger::{log_debug, log_error, log_info, LdkLogger}; - -use lightning::events::bump_transaction::BumpTransactionEvent; -use lightning::events::{ClosureReason, PaymentPurpose, ReplayEvent}; -use lightning::events::{Event as LdkEvent, PaymentFailureReason}; -use lightning::impl_writeable_tlv_based_enum; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::types::ChannelId; -use lightning::routing::gossip::NodeId; -use lightning::util::errors::APIError; -use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; - -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - -use lightning_liquidity::lsps2::utils::compute_opening_fee; - -use bitcoin::blockdata::locktime::absolute::LockTime; -use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, OutPoint}; - -use rand::{thread_rng, Rng}; - -use core::future::Future; -use core::task::{Poll, Waker}; -use std::collections::VecDeque; -use std::ops::Deref; -use std::str::FromStr; -use std::sync::{Arc, Condvar, Mutex, RwLock}; -use std::time::Duration; /// An event emitted by [`Node`], which should be handled by the user. /// @@ -202,6 +201,10 @@ pub enum Event { funding_txo: OutPoint, }, /// A channel is ready to be used. + /// + /// This event is emitted when: + /// - A new channel has been established and is ready for use + /// - An existing channel has been spliced and is ready with the new funding output ChannelReady { /// The `channel_id` of the channel. channel_id: ChannelId, @@ -211,6 +214,14 @@ pub enum Event { /// /// This will be `None` for events serialized by LDK Node v0.1.0 and prior. counterparty_node_id: Option, + /// The outpoint of the channel's funding transaction. + /// + /// This represents the channel's current funding output, which may change when the + /// channel is spliced. For spliced channels, this will contain the new funding output + /// from the confirmed splice transaction. + /// + /// This will be `None` for events serialized by LDK Node v0.6.0 and prior. + funding_txo: Option, }, /// A channel has been closed. ChannelClosed { @@ -225,6 +236,28 @@ pub enum Event { /// This will be `None` for events serialized by LDK Node v0.2.1 and prior. reason: Option, }, + /// A channel splice is pending confirmation on-chain. + SplicePending { + /// The `channel_id` of the channel. + channel_id: ChannelId, + /// The `user_channel_id` of the channel. + user_channel_id: UserChannelId, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction. + new_funding_txo: OutPoint, + }, + /// A channel splice has failed. + SpliceFailed { + /// The `channel_id` of the channel. + channel_id: ChannelId, + /// The `user_channel_id` of the channel. + user_channel_id: UserChannelId, + /// The `node_id` of the channel counterparty. + counterparty_node_id: PublicKey, + /// The outpoint of the channel's splice funding transaction, if one was created. + abandoned_funding_txo: Option, + }, } impl_writeable_tlv_based_enum!(Event, @@ -249,6 +282,7 @@ impl_writeable_tlv_based_enum!(Event, (0, channel_id, required), (1, counterparty_node_id, option), (2, user_channel_id, required), + (3, funding_txo, option), }, (4, ChannelPending) => { (0, channel_id, required), @@ -281,7 +315,19 @@ impl_writeable_tlv_based_enum!(Event, (10, skimmed_fee_msat, option), (12, claim_from_onchain_tx, required), (14, outbound_amount_forwarded_msat, option), - } + }, + (8, SplicePending) => { + (1, channel_id, required), + (3, counterparty_node_id, required), + (5, user_channel_id, required), + (7, new_funding_txo, required), + }, + (9, SpliceFailed) => { + (1, channel_id, required), + (3, counterparty_node_id, required), + (5, user_channel_id, required), + (7, abandoned_funding_txo, option), + }, ); pub struct EventQueue @@ -290,7 +336,6 @@ where { queue: Arc>>, waker: Arc>>, - notifier: Condvar, kv_store: Arc, logger: L, } @@ -302,18 +347,17 @@ where pub(crate) fn new(kv_store: Arc, logger: L) -> Self { let queue = Arc::new(Mutex::new(VecDeque::new())); let waker = Arc::new(Mutex::new(None)); - let notifier = Condvar::new(); - Self { queue, waker, notifier, kv_store, logger } + Self { queue, waker, kv_store, logger } } - pub(crate) fn add_event(&self, event: Event) -> Result<(), Error> { - { + pub(crate) async fn add_event(&self, event: Event) -> Result<(), Error> { + let data = { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.push_back(event); - self.persist_queue(&locked_queue)?; - } + EventQueueSerWrapper(&locked_queue).encode() + }; - self.notifier.notify_one(); + self.persist_queue(data).await?; if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -330,19 +374,14 @@ where EventFuture { event_queue: Arc::clone(&self.queue), waker: Arc::clone(&self.waker) }.await } - pub(crate) fn wait_next_event(&self) -> Event { - let locked_queue = - self.notifier.wait_while(self.queue.lock().unwrap(), |queue| queue.is_empty()).unwrap(); - locked_queue.front().unwrap().clone() - } - - pub(crate) fn event_handled(&self) -> Result<(), Error> { - { + pub(crate) async fn event_handled(&self) -> Result<(), Error> { + let data = { let mut locked_queue = self.queue.lock().unwrap(); locked_queue.pop_front(); - self.persist_queue(&locked_queue)?; - } - self.notifier.notify_one(); + EventQueueSerWrapper(&locked_queue).encode() + }; + + self.persist_queue(data).await?; if let Some(waker) = self.waker.lock().unwrap().take() { waker.wake(); @@ -350,26 +389,26 @@ where Ok(()) } - fn persist_queue(&self, locked_queue: &VecDeque) -> Result<(), Error> { - let data = EventQueueSerWrapper(locked_queue).encode(); - self.kv_store - .write( + async fn persist_queue(&self, encoded_queue: Vec) -> Result<(), Error> { + KVStore::write( + &*self.kv_store, + EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_KEY, + encoded_queue, + ) + .await + .map_err(|e| { + log_error!( + self.logger, + "Write for key {}/{}/{} failed due to: {}", EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, - &data, - ) - .map_err(|e| { - log_error!( - self.logger, - "Write for key {}/{}/{} failed due to: {}", - EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_KEY, - e - ); - Error::PersistenceFailed - })?; + e + ); + Error::PersistenceFailed + })?; Ok(()) } } @@ -386,8 +425,7 @@ where let read_queue: EventQueueDeserWrapper = Readable::read(reader)?; let queue = Arc::new(Mutex::new(read_queue.0)); let waker = Arc::new(Mutex::new(None)); - let notifier = Condvar::new(); - Ok(Self { queue, waker, notifier, kv_store, logger }) + Ok(Self { queue, waker, kv_store, logger }) } } @@ -452,9 +490,12 @@ where liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>, - runtime: Arc>>>, + runtime: Arc, logger: L, config: Arc, + static_invoice_store: Option, + onion_messenger: Arc, + om_mailbox: Option>, } impl EventHandler @@ -468,7 +509,9 @@ where output_sweeper: Arc, network_graph: Arc, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>, - runtime: Arc>>>, logger: L, config: Arc, + static_invoice_store: Option, onion_messenger: Arc, + om_mailbox: Option>, runtime: Arc, logger: L, + config: Arc, ) -> Self { Self { event_queue, @@ -484,6 +527,9 @@ where logger, runtime, config, + static_invoice_store, + onion_messenger, + om_mailbox, } } @@ -494,7 +540,7 @@ where counterparty_node_id, channel_value_satoshis, output_script, - .. + user_channel_id, } => { // Construct the raw transaction with the output that is paid the amount of the // channel. @@ -513,16 +559,44 @@ where locktime, ) { Ok(final_tx) => { - // Give the funding transaction back to LDK for opening the channel. - match self.channel_manager.funding_transaction_generated( - temporary_channel_id, - counterparty_node_id, - final_tx, - ) { + let needs_manual_broadcast = + self.liquidity_source.as_ref().map_or(false, |ls| { + ls.as_ref().lsps2_channel_needs_manual_broadcast( + counterparty_node_id, + user_channel_id, + ) + }); + + let result = if needs_manual_broadcast { + self.liquidity_source.as_ref().map(|ls| { + ls.lsps2_store_funding_transaction( + user_channel_id, + counterparty_node_id, + final_tx.clone(), + ); + }); + self.channel_manager.funding_transaction_generated_manual_broadcast( + temporary_channel_id, + counterparty_node_id, + final_tx, + ) + } else { + self.channel_manager.funding_transaction_generated( + temporary_channel_id, + counterparty_node_id, + final_tx, + ) + }; + + match result { Ok(()) => {}, Err(APIError::APIMisuseError { err }) => { - log_error!(self.logger, "Panicking due to APIMisuseError: {}", err); - panic!("APIMisuseError: {}", err); + log_error!( + self.logger, + "Encountered APIMisuseError, this should never happen: {}", + err + ); + debug_assert!(false, "APIMisuseError: {}", err); }, Err(APIError::ChannelUnavailable { err }) => { log_error!( @@ -543,34 +617,33 @@ where Err(err) => { log_error!(self.logger, "Failed to create funding transaction: {}", err); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Failed to create funding transaction".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to force close channel after funding generation failed: {:?}", e); - panic!( + debug_assert!(false, "Failed to force close channel after funding generation failed" ); }); }, } }, - LdkEvent::FundingTxBroadcastSafe { .. } => { - debug_assert!(false, "We currently only support safe funding, so this event should never be emitted."); + LdkEvent::FundingTxBroadcastSafe { user_channel_id, counterparty_node_id, .. } => { + self.liquidity_source.as_ref().map(|ls| { + ls.lsps2_funding_tx_broadcast_safe(user_channel_id, counterparty_node_id); + }); }, LdkEvent::PaymentClaimable { payment_hash, purpose, amount_msat, - receiver_node_id: _, - via_channel_id: _, - via_user_channel_id: _, claim_deadline, onion_fields, counterparty_skimmed_fee_msat, - payment_id: _, + .. } => { let payment_id = PaymentId(payment_hash.0); if let Some(info) = self.payment_store.get(&payment_id) { @@ -684,7 +757,8 @@ where // the payment has been registered via `_for_hash` variants and needs to be manually claimed via // user interaction. match info.kind { - PaymentKind::Bolt11 { preimage, .. } => { + PaymentKind::Bolt11 { preimage, .. } + | PaymentKind::Bolt11Jit { preimage, .. } => { if purpose.preimage().is_none() { debug_assert!( preimage.is_none(), @@ -703,7 +777,7 @@ where claim_deadline, custom_records, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!( @@ -955,7 +1029,7 @@ where .map(|cf| cf.custom_tlvs().into_iter().map(|tlv| tlv.into()).collect()) .unwrap_or_default(), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1016,7 +1090,7 @@ where fee_paid_msat, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1047,7 +1121,7 @@ where let event = Event::PaymentFailed { payment_id: Some(payment_id), payment_hash, reason }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => return Ok(()), Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1060,29 +1134,17 @@ where LdkEvent::PaymentPathFailed { .. } => {}, LdkEvent::ProbeSuccessful { .. } => {}, LdkEvent::ProbeFailed { .. } => {}, - LdkEvent::HTLCHandlingFailed { failed_next_destination, .. } => { + LdkEvent::HTLCHandlingFailed { failure_type, .. } => { if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_htlc_handling_failed(failed_next_destination); - } - }, - LdkEvent::PendingHTLCsForwardable { time_forwardable } => { - let forwarding_channel_manager = self.channel_manager.clone(); - let min = time_forwardable.as_millis() as u64; - - let runtime_lock = self.runtime.read().unwrap(); - debug_assert!(runtime_lock.is_some()); - - if let Some(runtime) = runtime_lock.as_ref() { - runtime.spawn(async move { - let millis_to_sleep = thread_rng().gen_range(min..min * 5) as u64; - tokio::time::sleep(Duration::from_millis(millis_to_sleep)).await; - - forwarding_channel_manager.process_pending_htlc_forwards(); - }); + liquidity_source.handle_htlc_handling_failed(failure_type).await; } }, LdkEvent::SpendableOutputs { outputs, channel_id } => { - match self.output_sweeper.track_spendable_outputs(outputs, channel_id, true, None) { + match self + .output_sweeper + .track_spendable_outputs(outputs, channel_id, true, None) + .await + { Ok(_) => return Ok(()), Err(_) => { log_error!(self.logger, "Failed to track spendable outputs"); @@ -1104,7 +1166,7 @@ where log_error!(self.logger, "Rejecting inbound announced channel from peer {} due to missing configuration: {}", counterparty_node_id, err); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Channel request rejected".to_string(), @@ -1148,7 +1210,7 @@ where required_amount_sats, ); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Channel request rejected".to_string(), @@ -1165,7 +1227,7 @@ where counterparty_node_id, ); self.channel_manager - .force_close_without_broadcasting_txn( + .force_close_broadcasting_latest_txn( &temporary_channel_id, &counterparty_node_id, "Channel request rejected".to_string(), @@ -1177,7 +1239,7 @@ where } } - // fix for LND nodes which support both 0-conf and non-0conf + /*// fix for LND nodes which support both 0-conf and non-0conf // remove when https://github.com/lightningnetwork/lnd/pull/8796 is in LND let mut lnd_0conf_non0conf_fix: Vec = Vec::new(); // Olympus @@ -1219,18 +1281,47 @@ where let user_channel_id: u128 = rand::thread_rng().gen::(); let allow_0conf = (channel_type.requires_zero_conf() || !lnd_0conf_non0conf_fix.contains(&counterparty_node_id)) - && self.config.trusted_peers_0conf.contains(&counterparty_node_id); + && self.config.trusted_peers_0conf.contains(&counterparty_node_id);*/ + let user_channel_id: u128 = rng().random(); + let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); + let mut channel_override_config = None; + if let Some((lsp_node_id, _)) = self + .liquidity_source + .as_ref() + .and_then(|ls| ls.as_ref().get_lsps2_lsp_details()) + { + if lsp_node_id == counterparty_node_id { + // When we're an LSPS2 client, allow claiming underpaying HTLCs as the LSP will skim off some fee. We'll + // check that they don't take too much before claiming. + // + // We also set maximum allowed inbound HTLC value in flight + // to 100%. We should eventually be able to set this on a per-channel basis, but for + // now we just bump the default for all channels. + channel_override_config = Some(ChannelConfigOverrides { + handshake_overrides: Some(ChannelHandshakeConfigUpdate { + max_inbound_htlc_value_in_flight_percent_of_channel: Some(100), + ..Default::default() + }), + update_overrides: Some(ChannelConfigUpdate { + accept_underpaying_htlcs: Some(true), + ..Default::default() + }), + }); + } + } let res = if allow_0conf { self.channel_manager.accept_inbound_channel_from_trusted_peer_0conf( &temporary_channel_id, &counterparty_node_id, user_channel_id, + channel_override_config, ) } else { self.channel_manager.accept_inbound_channel( &temporary_channel_id, &counterparty_node_id, user_channel_id, + channel_override_config, ) }; @@ -1271,40 +1362,46 @@ where claim_from_onchain_tx, outbound_amount_forwarded_msat, } => { - let read_only_network_graph = self.network_graph.read_only(); - let nodes = read_only_network_graph.nodes(); - let channels = self.channel_manager.list_channels(); - - let node_str = |channel_id: &Option| { - channel_id - .and_then(|channel_id| channels.iter().find(|c| c.channel_id == channel_id)) - .and_then(|channel| { - nodes.get(&NodeId::from_pubkey(&channel.counterparty.node_id)) - }) - .map_or("private_node".to_string(), |node| { - node.announcement_info - .as_ref() - .map_or("unnamed node".to_string(), |ann| { - format!("node {}", ann.alias()) - }) - }) - }; - let channel_str = |channel_id: &Option| { - channel_id - .map(|channel_id| format!(" with channel {}", channel_id)) - .unwrap_or_default() - }; - let from_prev_str = format!( - " from {}{}", - node_str(&prev_channel_id), - channel_str(&prev_channel_id) - ); - let to_next_str = - format!(" to {}{}", node_str(&next_channel_id), channel_str(&next_channel_id)); + { + let read_only_network_graph = self.network_graph.read_only(); + let nodes = read_only_network_graph.nodes(); + let channels = self.channel_manager.list_channels(); + + let node_str = |channel_id: &Option| { + channel_id + .and_then(|channel_id| { + channels.iter().find(|c| c.channel_id == channel_id) + }) + .and_then(|channel| { + nodes.get(&NodeId::from_pubkey(&channel.counterparty.node_id)) + }) + .map_or("private_node".to_string(), |node| { + node.announcement_info + .as_ref() + .map_or("unnamed node".to_string(), |ann| { + format!("node {}", ann.alias()) + }) + }) + }; + let channel_str = |channel_id: &Option| { + channel_id + .map(|channel_id| format!(" with channel {}", channel_id)) + .unwrap_or_default() + }; + let from_prev_str = format!( + " from {}{}", + node_str(&prev_channel_id), + channel_str(&prev_channel_id) + ); + let to_next_str = format!( + " to {}{}", + node_str(&next_channel_id), + channel_str(&next_channel_id) + ); - let fee_earned = total_fee_earned_msat.unwrap_or(0); - if claim_from_onchain_tx { - log_info!( + let fee_earned = total_fee_earned_msat.unwrap_or(0); + if claim_from_onchain_tx { + log_info!( self.logger, "Forwarded payment{}{} of {}msat, earning {}msat in fees from claiming onchain.", from_prev_str, @@ -1312,19 +1409,23 @@ where outbound_amount_forwarded_msat.unwrap_or(0), fee_earned, ); - } else { - log_info!( - self.logger, - "Forwarded payment{}{} of {}msat, earning {}msat in fees.", - from_prev_str, - to_next_str, - outbound_amount_forwarded_msat.unwrap_or(0), - fee_earned, - ); + } else { + log_info!( + self.logger, + "Forwarded payment{}{} of {}msat, earning {}msat in fees.", + from_prev_str, + to_next_str, + outbound_amount_forwarded_msat.unwrap_or(0), + fee_earned, + ); + } } if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_payment_forwarded(next_channel_id); + let skimmed_fee_msat = skimmed_fee_msat.unwrap_or(0); + liquidity_source + .handle_payment_forwarded(next_channel_id, skimmed_fee_msat) + .await; } let event = Event::PaymentForwarded { @@ -1339,7 +1440,7 @@ where claim_from_onchain_tx, outbound_amount_forwarded_msat, }; - self.event_queue.add_event(event).map_err(|e| { + self.event_queue.add_event(event).await.map_err(|e| { log_error!(self.logger, "Failed to push to event queue: {}", e); ReplayEvent() })?; @@ -1366,7 +1467,7 @@ where counterparty_node_id, funding_txo, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1407,29 +1508,42 @@ where } }, LdkEvent::ChannelReady { - channel_id, user_channel_id, counterparty_node_id, .. + channel_id, + user_channel_id, + counterparty_node_id, + funding_txo, + .. } => { - log_info!( - self.logger, - "Channel {} with counterparty {} ready to be used.", - channel_id, - counterparty_node_id, - ); + if let Some(funding_txo) = funding_txo { + log_info!( + self.logger, + "Channel {} with counterparty {} ready to be used with funding_txo {}", + channel_id, + counterparty_node_id, + funding_txo, + ); + } else { + log_info!( + self.logger, + "Channel {} with counterparty {} ready to be used", + channel_id, + counterparty_node_id, + ); + } if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_channel_ready( - user_channel_id, - &channel_id, - &counterparty_node_id, - ); + liquidity_source + .handle_channel_ready(user_channel_id, &channel_id, &counterparty_node_id) + .await; } let event = Event::ChannelReady { channel_id, user_channel_id: UserChannelId(user_channel_id), counterparty_node_id: Some(counterparty_node_id), + funding_txo, }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1459,7 +1573,7 @@ where reason: Some(reason), }; - match self.event_queue.add_event(event) { + match self.event_queue.add_event(event).await { Ok(_) => {}, Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); @@ -1476,43 +1590,41 @@ where .. } => { if let Some(liquidity_source) = self.liquidity_source.as_ref() { - liquidity_source.handle_htlc_intercepted( - requested_next_hop_scid, - intercept_id, - expected_outbound_amount_msat, - payment_hash, - ); + liquidity_source + .handle_htlc_intercepted( + requested_next_hop_scid, + intercept_id, + expected_outbound_amount_msat, + payment_hash, + ) + .await; } }, LdkEvent::InvoiceReceived { .. } => { debug_assert!(false, "We currently don't handle BOLT12 invoices manually, so this event should never be emitted."); }, LdkEvent::ConnectionNeeded { node_id, addresses } => { - let runtime_lock = self.runtime.read().unwrap(); - debug_assert!(runtime_lock.is_some()); - - if let Some(runtime) = runtime_lock.as_ref() { - let spawn_logger = self.logger.clone(); - let spawn_cm = Arc::clone(&self.connection_manager); - runtime.spawn(async move { - for addr in &addresses { - match spawn_cm.connect_peer_if_necessary(node_id, addr.clone()).await { - Ok(()) => { - return; - }, - Err(e) => { - log_error!( - spawn_logger, - "Failed to establish connection to peer {}@{}: {}", - node_id, - addr, - e - ); - }, - } + let spawn_logger = self.logger.clone(); + let spawn_cm = Arc::clone(&self.connection_manager); + let future = async move { + for addr in &addresses { + match spawn_cm.connect_peer_if_necessary(node_id, addr.clone()).await { + Ok(()) => { + return; + }, + Err(e) => { + log_error!( + spawn_logger, + "Failed to establish connection to peer {}@{}: {}", + node_id, + addr, + e + ); + }, } - }); - } + } + }; + self.runtime.spawn_cancellable_background_task(future); }, LdkEvent::BumpTransaction(bte) => { match bte { @@ -1540,13 +1652,210 @@ where BumpTransactionEvent::HTLCResolution { .. } => {}, } - self.bump_tx_event_handler.handle_event(&bte); + self.bump_tx_event_handler.handle_event(&bte).await; + }, + LdkEvent::OnionMessageIntercepted { peer_node_id, message } => { + if let Some(om_mailbox) = self.om_mailbox.as_ref() { + om_mailbox.onion_message_intercepted(peer_node_id, message); + } else { + log_trace!( + self.logger, + "Onion message intercepted, but no onion message mailbox available" + ); + } + }, + LdkEvent::OnionMessagePeerConnected { peer_node_id } => { + if let Some(om_mailbox) = self.om_mailbox.as_ref() { + let messages = om_mailbox.onion_message_peer_connected(peer_node_id); + + for message in messages { + if let Err(e) = + self.onion_messenger.forward_onion_message(message, &peer_node_id) + { + log_trace!( + self.logger, + "Failed to forward onion message to peer {}: {:?}", + peer_node_id, + e + ); + } + } + } }, - LdkEvent::OnionMessageIntercepted { .. } => { - debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + + LdkEvent::PersistStaticInvoice { + invoice, + invoice_request_path, + invoice_slot, + recipient_id, + invoice_persisted_path, + } => { + if let Some(store) = self.static_invoice_store.as_ref() { + match store + .handle_persist_static_invoice( + invoice, + invoice_request_path, + invoice_slot, + recipient_id, + ) + .await + { + Ok(_) => { + self.channel_manager.static_invoice_persisted(invoice_persisted_path); + }, + Err(e) => { + log_error!(self.logger, "Failed to persist static invoice: {}", e); + return Err(ReplayEvent()); + }, + }; + } }, - LdkEvent::OnionMessagePeerConnected { .. } => { - debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + LdkEvent::StaticInvoiceRequested { + recipient_id, + invoice_slot, + reply_path, + invoice_request, + } => { + if let Some(store) = self.static_invoice_store.as_ref() { + let invoice = + store.handle_static_invoice_requested(&recipient_id, invoice_slot).await; + + match invoice { + Ok(Some((invoice, invoice_request_path))) => { + if let Err(e) = self.channel_manager.respond_to_static_invoice_request( + invoice, + reply_path, + invoice_request, + invoice_request_path, + ) { + log_error!(self.logger, "Failed to send static invoice: {:?}", e); + } + }, + Ok(None) => { + log_trace!( + self.logger, + "No static invoice found for recipient {} and slot {}", + hex_utils::to_string(&recipient_id), + invoice_slot + ); + }, + Err(e) => { + log_error!(self.logger, "Failed to retrieve static invoice: {}", e); + return Err(ReplayEvent()); + }, + } + } + }, + // TODO(splicing): Revisit error handling once splicing API is settled in LDK 0.3 + LdkEvent::FundingTransactionReadyForSigning { + channel_id, + counterparty_node_id, + unsigned_transaction, + .. + } => match self.wallet.sign_owned_inputs(unsigned_transaction) { + Ok(partially_signed_tx) => { + match self.channel_manager.funding_transaction_signed( + &channel_id, + &counterparty_node_id, + partially_signed_tx, + ) { + Ok(()) => { + log_info!( + self.logger, + "Signed funding transaction for channel {} with counterparty {}", + channel_id, + counterparty_node_id + ); + }, + Err(e) => { + // TODO(splicing): Abort splice once supported in LDK 0.3 + debug_assert!(false, "Failed signing funding transaction: {:?}", e); + log_error!(self.logger, "Failed signing funding transaction: {:?}", e); + }, + } + }, + Err(()) => log_error!(self.logger, "Failed signing funding transaction"), + }, + LdkEvent::SplicePending { + channel_id, + user_channel_id, + counterparty_node_id, + new_funding_txo, + .. + } => { + log_info!( + self.logger, + "Channel {} with counterparty {} pending splice with funding_txo {}", + channel_id, + counterparty_node_id, + new_funding_txo, + ); + + let event = Event::SplicePending { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + new_funding_txo, + }; + + match self.event_queue.add_event(event).await { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to push to event queue: {}", e); + return Err(ReplayEvent()); + }, + }; + }, + LdkEvent::SpliceFailed { + channel_id, + user_channel_id, + counterparty_node_id, + abandoned_funding_txo, + contributed_outputs, + .. + } => { + if let Some(funding_txo) = abandoned_funding_txo { + log_info!( + self.logger, + "Channel {} with counterparty {} failed splice with funding_txo {}", + channel_id, + counterparty_node_id, + funding_txo, + ); + } else { + log_info!( + self.logger, + "Channel {} with counterparty {} failed splice", + channel_id, + counterparty_node_id, + ); + } + + let tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: contributed_outputs, + }; + if let Err(e) = self.wallet.cancel_tx(&tx) { + log_error!(self.logger, "Failed reclaiming unused addresses: {}", e); + return Err(ReplayEvent()); + } + + let event = Event::SpliceFailed { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + abandoned_funding_txo, + }; + + match self.event_queue.add_event(event).await { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to push to event queue: {}", e); + return Err(ReplayEvent()); + }, + }; }, } Ok(()) @@ -1555,14 +1864,17 @@ where #[cfg(test)] mod tests { - use super::*; - use lightning::util::test_utils::{TestLogger, TestStore}; use std::sync::atomic::{AtomicU16, Ordering}; use std::time::Duration; + use lightning::util::test_utils::TestLogger; + + use super::*; + use crate::io::test_utils::InMemoryStore; + #[tokio::test] async fn event_queue_persistence() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); @@ -1571,35 +1883,36 @@ mod tests { channel_id: ChannelId([23u8; 32]), user_channel_id: UserChannelId(2323), counterparty_node_id: None, + funding_txo: None, }; - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); // Check we get the expected event and that it is returned until we mark it handled. for _ in 0..5 { - assert_eq!(event_queue.wait_next_event(), expected_event); assert_eq!(event_queue.next_event_async().await, expected_event); assert_eq!(event_queue.next_event(), Some(expected_event.clone())); } // Check we can read back what we persisted. - let persisted_bytes = store - .read( - EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, - EVENT_QUEUE_PERSISTENCE_KEY, - ) - .unwrap(); + let persisted_bytes = KVStore::read( + &*store, + EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, + EVENT_QUEUE_PERSISTENCE_KEY, + ) + .await + .unwrap(); let deser_event_queue = EventQueue::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); - assert_eq!(deser_event_queue.wait_next_event(), expected_event); + assert_eq!(deser_event_queue.next_event_async().await, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); assert_eq!(event_queue.next_event(), None); } #[tokio::test] async fn event_queue_concurrency() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let event_queue = Arc::new(EventQueue::new(Arc::clone(&store), Arc::clone(&logger))); assert_eq!(event_queue.next_event(), None); @@ -1608,6 +1921,7 @@ mod tests { channel_id: ChannelId([23u8; 32]), user_channel_id: UserChannelId(2323), counterparty_node_id: None, + funding_txo: None, }; // Check `next_event_async` won't return if the queue is empty and always rather timeout. @@ -1627,28 +1941,28 @@ mod tests { let mut delayed_enqueue = false; for _ in 0..25 { - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); } loop { tokio::select! { _ = tokio::time::sleep(Duration::from_millis(10)), if !delayed_enqueue => { - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); delayed_enqueue = true; } e = event_queue.next_event_async() => { assert_eq!(e, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); received_events.fetch_add(1, Ordering::SeqCst); - event_queue.add_event(expected_event.clone()).unwrap(); + event_queue.add_event(expected_event.clone()).await.unwrap(); enqueued_events.fetch_add(1, Ordering::SeqCst); } e = event_queue.next_event_async() => { assert_eq!(e, expected_event); - event_queue.event_handled().unwrap(); + event_queue.event_handled().await.unwrap(); received_events.fetch_add(1, Ordering::SeqCst); } } @@ -1660,32 +1974,5 @@ mod tests { } } assert_eq!(event_queue.next_event(), None); - - // Check we operate correctly, even when mixing and matching blocking and async API calls. - let (tx, mut rx) = tokio::sync::watch::channel(()); - let thread_queue = Arc::clone(&event_queue); - let thread_event = expected_event.clone(); - std::thread::spawn(move || { - let e = thread_queue.wait_next_event(); - assert_eq!(e, thread_event); - thread_queue.event_handled().unwrap(); - tx.send(()).unwrap(); - }); - - let thread_queue = Arc::clone(&event_queue); - let thread_event = expected_event.clone(); - std::thread::spawn(move || { - // Sleep a bit before we enqueue the events everybody is waiting for. - std::thread::sleep(Duration::from_millis(20)); - thread_queue.add_event(thread_event.clone()).unwrap(); - thread_queue.add_event(thread_event.clone()).unwrap(); - }); - - let e = event_queue.next_event_async().await; - assert_eq!(e, expected_event.clone()); - event_queue.event_handled().unwrap(); - - rx.changed().await.unwrap(); - assert_eq!(event_queue.next_event(), None); } } diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index f8ddcd5fd..b787ecd33 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -5,15 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::chaininterface::FeeEstimator as LdkFeeEstimator; -use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; - -use bitcoin::FeeRate; - use std::collections::HashMap; use std::sync::RwLock; +use bitcoin::FeeRate; +use lightning::chain::chaininterface::{ + ConfirmationTarget as LdkConfirmationTarget, FeeEstimator as LdkFeeEstimator, + FEERATE_FLOOR_SATS_PER_KW, +}; + #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub(crate) enum ConfirmationTarget { /// The default target for onchain payments. diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs new file mode 100644 index 000000000..32464d044 --- /dev/null +++ b/src/ffi/mod.rs @@ -0,0 +1,47 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in + +#[cfg(feature = "uniffi")] +mod types; + +#[cfg(feature = "uniffi")] +pub use types::*; + +#[cfg(feature = "uniffi")] +pub fn maybe_deref(wrapped_type: &std::sync::Arc) -> &R +where + T: AsRef, +{ + wrapped_type.as_ref().as_ref() +} + +#[cfg(feature = "uniffi")] +pub fn maybe_try_convert_enum(wrapped_type: &T) -> Result +where + for<'a> R: TryFrom<&'a T, Error = crate::error::Error>, +{ + R::try_from(wrapped_type) +} + +#[cfg(feature = "uniffi")] +pub fn maybe_wrap(ldk_type: impl Into) -> std::sync::Arc { + std::sync::Arc::new(ldk_type.into()) +} + +#[cfg(not(feature = "uniffi"))] +pub fn maybe_deref(value: &T) -> &T { + value +} + +#[cfg(not(feature = "uniffi"))] +pub fn maybe_try_convert_enum(value: &T) -> Result<&T, crate::error::Error> { + Ok(value) +} + +#[cfg(not(feature = "uniffi"))] +pub fn maybe_wrap(value: T) -> T { + value +} diff --git a/src/ffi/types.rs b/src/ffi/types.rs new file mode 100644 index 000000000..3c88a665f --- /dev/null +++ b/src/ffi/types.rs @@ -0,0 +1,1668 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +// Importing these items ensures they are accessible in the uniffi bindings +// without introducing unused import warnings in lib.rs. +// +// Make sure to add any re-exported items that need to be used in uniffi below. + +use std::convert::TryInto; +use std::ops::Deref; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +pub use bip39::Mnemonic; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use bitcoin::secp256k1::PublicKey; +pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; +pub use lightning::chain::channelmonitor::BalanceSource; +pub use lightning::events::{ClosureReason, PaymentFailureReason}; +use lightning::ln::channelmanager::PaymentId; +pub use lightning::ln::types::ChannelId; +use lightning::offers::invoice::Bolt12Invoice as LdkBolt12Invoice; +pub use lightning::offers::offer::OfferId; +use lightning::offers::offer::{Amount as LdkAmount, Offer as LdkOffer}; +use lightning::offers::refund::Refund as LdkRefund; +pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; +pub use lightning::routing::router::RouteParametersConfig; +use lightning::util::ser::Writeable; +use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; +pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; +pub use lightning_liquidity::lsps0::ser::LSPSDateTime; +pub use lightning_liquidity::lsps1::msgs::{ + LSPS1ChannelInfo, LSPS1OrderId, LSPS1OrderParams, LSPS1PaymentState, +}; +pub use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; +pub use lightning_types::string::UntrustedString; +pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; + +use crate::builder::sanitize_alias; +pub use crate::config::{ + default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, + EsploraSyncConfig, MaxDustHTLCExposure, +}; +use crate::error::Error; +pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; +pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig}; +pub use crate::logger::{LogLevel, LogRecord, LogWriter}; +pub use crate::payment::store::{ + ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, +}; +pub use crate::payment::QrPaymentResult; +use crate::{hex_utils, SocketAddress, UniffiCustomTypeConverter, UserChannelId}; + +impl UniffiCustomTypeConverter for PublicKey { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Ok(key) = PublicKey::from_str(&val) { + return Ok(key); + } + + Err(Error::InvalidPublicKey.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for NodeId { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Ok(key) = NodeId::from_str(&val) { + return Ok(key); + } + + Err(Error::InvalidNodeId.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for Address { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Ok(addr) = Address::from_str(&val) { + return Ok(addr.assume_checked()); + } + + Err(Error::InvalidAddress.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OfferAmount { + Bitcoin { amount_msats: u64 }, + Currency { iso4217_code: String, amount: u64 }, +} + +impl From for OfferAmount { + fn from(ldk_amount: LdkAmount) -> Self { + match ldk_amount { + LdkAmount::Bitcoin { amount_msats } => OfferAmount::Bitcoin { amount_msats }, + LdkAmount::Currency { iso4217_code, amount } => { + OfferAmount::Currency { iso4217_code: iso4217_code.as_str().to_owned(), amount } + }, + } + } +} + +/// An `Offer` is a potentially long-lived proposal for payment of a good or service. +/// +/// An offer is a precursor to an [`InvoiceRequest`]. A merchant publishes an offer from which a +/// customer may request an [`Bolt12Invoice`] for a specific quantity and using an amount sufficient +/// to cover that quantity (i.e., at least `quantity * amount`). See [`Offer::amount`]. +/// +/// Offers may be denominated in currency other than bitcoin but are ultimately paid using the +/// latter. +/// +/// Through the use of [`BlindedMessagePath`]s, offers provide recipient privacy. +/// +/// [`InvoiceRequest`]: lightning::offers::invoice_request::InvoiceRequest +/// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice +/// [`Offer`]: lightning::offers::Offer:amount +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Offer { + pub(crate) inner: LdkOffer, +} + +impl Offer { + pub fn from_str(offer_str: &str) -> Result { + offer_str.parse() + } + + /// Returns the id of the offer. + pub fn id(&self) -> OfferId { + OfferId(self.inner.id().0) + } + + /// Whether the offer has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// A complete description of the purpose of the payment. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn offer_description(&self) -> Option { + self.inner.description().map(|printable| printable.to_string()) + } + + /// The issuer of the offer, possibly beginning with `user@domain` or `domain`. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn issuer(&self) -> Option { + self.inner.issuer().map(|printable| printable.to_string()) + } + + /// The minimum amount required for a successful payment of a single item. + pub fn amount(&self) -> Option { + self.inner.amount().map(|amount| amount.into()) + } + + /// Returns whether the given quantity is valid for the offer. + pub fn is_valid_quantity(&self, quantity: u64) -> bool { + self.inner.is_valid_quantity(quantity) + } + + /// Returns whether a quantity is expected in an [`InvoiceRequest`] for the offer. + /// + /// [`InvoiceRequest`]: lightning::offers::invoice_request::InvoiceRequest + pub fn expects_quantity(&self) -> bool { + self.inner.expects_quantity() + } + + /// Returns whether the given chain is supported by the offer. + pub fn supports_chain(&self, chain: Network) -> bool { + self.inner.supports_chain(chain.chain_hash()) + } + + /// The chains that may be used when paying a requested invoice (e.g., bitcoin mainnet). + /// + /// Payments must be denominated in units of the minimal lightning-payable unit (e.g., msats) + /// for the selected chain. + pub fn chains(&self) -> Vec { + self.inner.chains().into_iter().filter_map(Network::from_chain_hash).collect() + } + + /// Opaque bytes set by the originator. + /// + /// Useful for authentication and validating fields since it is reflected in `invoice_request` + /// messages along with all the other fields from the `offer`. + pub fn metadata(&self) -> Option> { + self.inner.metadata().cloned() + } + + /// Seconds since the Unix epoch when an invoice should no longer be requested. + /// + /// If `None`, the offer does not expire. + pub fn absolute_expiry_seconds(&self) -> Option { + self.inner.absolute_expiry().map(|duration| duration.as_secs()) + } + + /// The public key corresponding to the key used by the recipient to sign invoices. + /// - If [`Offer::paths`] is empty, MUST be `Some` and contain the recipient's node id for + /// sending an [`InvoiceRequest`]. + /// - If [`Offer::paths`] is not empty, MAY be `Some` and contain a transient id. + /// - If `None`, the signing pubkey will be the final blinded node id from the + /// [`BlindedMessagePath`] in [`Offer::paths`] used to send the [`InvoiceRequest`]. + /// + /// See also [`Bolt12Invoice::signing_pubkey`]. + /// + /// [`InvoiceRequest`]: lightning::offers::invoice_request::InvoiceRequest + /// [`Bolt12Invoice::signing_pubkey`]: lightning::offers::invoice::Bolt12Invoice::signing_pubkey + pub fn issuer_signing_pubkey(&self) -> Option { + self.inner.issuer_signing_pubkey() + } +} + +impl std::str::FromStr for Offer { + type Err = Error; + + fn from_str(offer_str: &str) -> Result { + offer_str + .parse::() + .map(|offer| Offer { inner: offer }) + .map_err(|_| Error::InvalidOffer) + } +} + +impl From for Offer { + fn from(offer: LdkOffer) -> Self { + Offer { inner: offer } + } +} + +impl Deref for Offer { + type Target = LdkOffer; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Offer { + fn as_ref(&self) -> &LdkOffer { + self.deref() + } +} + +impl std::fmt::Display for Offer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner) + } +} + +/// A `Refund` is a request to send an [`Bolt12Invoice`] without a preceding [`Offer`]. +/// +/// Typically, after an invoice is paid, the recipient may publish a refund allowing the sender to +/// recoup their funds. A refund may be used more generally as an "offer for money", such as with a +/// bitcoin ATM. +/// +/// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice +/// [`Offer`]: lightning::offers::offer::Offer +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Refund { + pub(crate) inner: LdkRefund, +} + +impl Refund { + pub fn from_str(refund_str: &str) -> Result { + refund_str.parse() + } + + /// A complete description of the purpose of the refund. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn refund_description(&self) -> String { + self.inner.description().to_string() + } + + /// Seconds since the Unix epoch when an invoice should no longer be sent. + /// + /// If `None`, the refund does not expire. + pub fn absolute_expiry_seconds(&self) -> Option { + self.inner.absolute_expiry().map(|duration| duration.as_secs()) + } + + /// Whether the refund has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// The issuer of the refund, possibly beginning with `user@domain` or `domain`. + /// + /// Intended to be displayed to the user but with the caveat that it has not been verified in any way. + pub fn issuer(&self) -> Option { + self.inner.issuer().map(|printable| printable.to_string()) + } + + /// An unpredictable series of bytes, typically containing information about the derivation of + /// [`payer_signing_pubkey`]. + /// + /// [`payer_signing_pubkey`]: Self::payer_signing_pubkey + pub fn payer_metadata(&self) -> Vec { + self.inner.payer_metadata().to_vec() + } + + /// A chain that the refund is valid for. + pub fn chain(&self) -> Option { + Network::try_from(self.inner.chain()).ok() + } + + /// The amount to refund in msats (i.e., the minimum lightning-payable unit for [`chain`]). + /// + /// [`chain`]: Self::chain + pub fn amount_msats(&self) -> u64 { + self.inner.amount_msats() + } + + /// The quantity of an item that refund is for. + pub fn quantity(&self) -> Option { + self.inner.quantity() + } + + /// A public node id to send to in the case where there are no [`paths`]. + /// + /// Otherwise, a possibly transient pubkey. + /// + /// [`paths`]: lightning::offers::refund::Refund::paths + pub fn payer_signing_pubkey(&self) -> PublicKey { + self.inner.payer_signing_pubkey() + } + + /// Payer provided note to include in the invoice. + pub fn payer_note(&self) -> Option { + self.inner.payer_note().map(|printable| printable.to_string()) + } +} + +impl std::str::FromStr for Refund { + type Err = Error; + + fn from_str(refund_str: &str) -> Result { + refund_str + .parse::() + .map(|refund| Refund { inner: refund }) + .map_err(|_| Error::InvalidRefund) + } +} + +impl From for Refund { + fn from(refund: LdkRefund) -> Self { + Refund { inner: refund } + } +} + +impl Deref for Refund { + type Target = LdkRefund; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Refund { + fn as_ref(&self) -> &LdkRefund { + self.deref() + } +} + +impl std::fmt::Display for Refund { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner) + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Bolt12Invoice { + pub(crate) inner: LdkBolt12Invoice, +} + +impl Bolt12Invoice { + pub fn from_str(invoice_str: &str) -> Result { + invoice_str.parse() + } + + /// SHA256 hash of the payment preimage that will be given in return for paying the invoice. + pub fn payment_hash(&self) -> PaymentHash { + PaymentHash(self.inner.payment_hash().0) + } + + /// The minimum amount required for a successful payment of the invoice. + pub fn amount_msats(&self) -> u64 { + self.inner.amount_msats() + } + + /// The minimum amount required for a successful payment of a single item. + /// + /// From [`Offer::amount`]; `None` if the invoice was created in response to a [`Refund`] or if + /// the [`Offer`] did not set it. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`Offer::amount`]: lightning::offers::offer::Offer::amount + /// [`Refund`]: lightning::offers::refund::Refund + pub fn amount(&self) -> Option { + self.inner.amount().map(|amount| amount.into()) + } + + /// A typically transient public key corresponding to the key used to sign the invoice. + /// + /// If the invoices was created in response to an [`Offer`], then this will be: + /// - [`Offer::issuer_signing_pubkey`] if it's `Some`, otherwise + /// - the final blinded node id from a [`BlindedMessagePath`] in [`Offer::paths`] if `None`. + /// + /// If the invoice was created in response to a [`Refund`], then it is a valid pubkey chosen by + /// the recipient. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`Offer::issuer_signing_pubkey`]: lightning::offers::offer::Offer::issuer_signing_pubkey + /// [`Offer::paths`]: lightning::offers::offer::Offer::paths + /// [`Refund`]: lightning::offers::refund::Refund + pub fn signing_pubkey(&self) -> PublicKey { + self.inner.signing_pubkey() + } + + /// Duration since the Unix epoch when the invoice was created. + pub fn created_at(&self) -> u64 { + self.inner.created_at().as_secs() + } + + /// Seconds since the Unix epoch when an invoice should no longer be requested. + /// + /// From [`Offer::absolute_expiry`] or [`Refund::absolute_expiry`]. + /// + /// [`Offer::absolute_expiry`]: lightning::offers::offer::Offer::absolute_expiry + pub fn absolute_expiry_seconds(&self) -> Option { + self.inner.absolute_expiry().map(|duration| duration.as_secs()) + } + + /// When the invoice has expired and therefore should no longer be paid. + pub fn relative_expiry(&self) -> u64 { + self.inner.relative_expiry().as_secs() + } + + /// Whether the invoice has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// A complete description of the purpose of the originating offer or refund. + /// + /// From [`Offer::description`] or [`Refund::description`]. + /// + /// [`Offer::description`]: lightning::offers::offer::Offer::description + /// [`Refund::description`]: lightning::offers::refund::Refund::description + pub fn invoice_description(&self) -> Option { + self.inner.description().map(|printable| printable.to_string()) + } + + /// The issuer of the offer or refund. + /// + /// From [`Offer::issuer`] or [`Refund::issuer`]. + /// + /// [`Offer::issuer`]: lightning::offers::offer::Offer::issuer + /// [`Refund::issuer`]: lightning::offers::refund::Refund::issuer + pub fn issuer(&self) -> Option { + self.inner.issuer().map(|printable| printable.to_string()) + } + + /// A payer-provided note reflected back in the invoice. + /// + /// From [`InvoiceRequest::payer_note`] or [`Refund::payer_note`]. + /// + /// [`Refund::payer_note`]: lightning::offers::refund::Refund::payer_note + pub fn payer_note(&self) -> Option { + self.inner.payer_note().map(|note| note.to_string()) + } + + /// Opaque bytes set by the originating [`Offer`]. + /// + /// From [`Offer::metadata`]; `None` if the invoice was created in response to a [`Refund`] or + /// if the [`Offer`] did not set it. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`Offer::metadata`]: lightning::offers::offer::Offer::metadata + /// [`Refund`]: lightning::offers::refund::Refund + pub fn metadata(&self) -> Option> { + self.inner.metadata().cloned() + } + + /// The quantity of items requested or refunded for. + /// + /// From [`InvoiceRequest::quantity`] or [`Refund::quantity`]. + /// + /// [`Refund::quantity`]: lightning::offers::refund::Refund::quantity + pub fn quantity(&self) -> Option { + self.inner.quantity() + } + + /// Hash that was used for signing the invoice. + pub fn signable_hash(&self) -> Vec { + self.inner.signable_hash().to_vec() + } + + /// A possibly transient pubkey used to sign the invoice request or to send an invoice for a + /// refund in case there are no [`message_paths`]. + /// + /// [`message_paths`]: lightning::offers::invoice::Bolt12Invoice + pub fn payer_signing_pubkey(&self) -> PublicKey { + self.inner.payer_signing_pubkey() + } + + /// The public key used by the recipient to sign invoices. + /// + /// From [`Offer::issuer_signing_pubkey`] and may be `None`; also `None` if the invoice was + /// created in response to a [`Refund`]. + /// + /// [`Offer::issuer_signing_pubkey`]: lightning::offers::offer::Offer::issuer_signing_pubkey + /// [`Refund`]: lightning::offers::refund::Refund + pub fn issuer_signing_pubkey(&self) -> Option { + self.inner.issuer_signing_pubkey() + } + + /// The chain that must be used when paying the invoice; selected from [`offer_chains`] if the + /// invoice originated from an offer. + /// + /// From [`InvoiceRequest::chain`] or [`Refund::chain`]. + /// + /// [`offer_chains`]: lightning::offers::invoice::Bolt12Invoice::offer_chains + /// [`InvoiceRequest::chain`]: lightning::offers::invoice_request::InvoiceRequest::chain + /// [`Refund::chain`]: lightning::offers::refund::Refund::chain + pub fn chain(&self) -> Vec { + self.inner.chain().to_bytes().to_vec() + } + + /// The chains that may be used when paying a requested invoice. + /// + /// From [`Offer::chains`]; `None` if the invoice was created in response to a [`Refund`]. + /// + /// [`Offer::chains`]: lightning::offers::offer::Offer::chains + /// [`Refund`]: lightning::offers::refund::Refund + pub fn offer_chains(&self) -> Option>> { + self.inner + .offer_chains() + .map(|chains| chains.iter().map(|chain| chain.to_bytes().to_vec()).collect()) + } + + /// Fallback addresses for paying the invoice on-chain, in order of most-preferred to + /// least-preferred. + pub fn fallback_addresses(&self) -> Vec
{ + self.inner.fallbacks() + } + + /// Writes `self` out to a `Vec`. + pub fn encode(&self) -> Vec { + self.inner.encode() + } +} + +impl std::str::FromStr for Bolt12Invoice { + type Err = Error; + + fn from_str(invoice_str: &str) -> Result { + if let Some(bytes_vec) = hex_utils::to_vec(invoice_str) { + if let Ok(invoice) = LdkBolt12Invoice::try_from(bytes_vec) { + return Ok(Bolt12Invoice { inner: invoice }); + } + } + Err(Error::InvalidInvoice) + } +} + +impl From for Bolt12Invoice { + fn from(invoice: LdkBolt12Invoice) -> Self { + Bolt12Invoice { inner: invoice } + } +} + +impl Deref for Bolt12Invoice { + type Target = LdkBolt12Invoice; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Bolt12Invoice { + fn as_ref(&self) -> &LdkBolt12Invoice { + self.deref() + } +} + +impl UniffiCustomTypeConverter for OfferId { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Some(bytes_vec) = hex_utils::to_vec(&val) { + let bytes_res = bytes_vec.try_into(); + if let Ok(bytes) = bytes_res { + return Ok(OfferId(bytes)); + } + } + Err(Error::InvalidOfferId.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + hex_utils::to_string(&obj.0) + } +} + +impl UniffiCustomTypeConverter for PaymentId { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Some(bytes_vec) = hex_utils::to_vec(&val) { + let bytes_res = bytes_vec.try_into(); + if let Ok(bytes) = bytes_res { + return Ok(PaymentId(bytes)); + } + } + Err(Error::InvalidPaymentId.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + hex_utils::to_string(&obj.0) + } +} + +impl UniffiCustomTypeConverter for PaymentHash { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Ok(hash) = Sha256::from_str(&val) { + Ok(PaymentHash(hash.to_byte_array())) + } else { + Err(Error::InvalidPaymentHash.into()) + } + } + + fn from_custom(obj: Self) -> Self::Builtin { + Sha256::from_slice(&obj.0).unwrap().to_string() + } +} + +impl UniffiCustomTypeConverter for PaymentPreimage { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Some(bytes_vec) = hex_utils::to_vec(&val) { + let bytes_res = bytes_vec.try_into(); + if let Ok(bytes) = bytes_res { + return Ok(PaymentPreimage(bytes)); + } + } + Err(Error::InvalidPaymentPreimage.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + hex_utils::to_string(&obj.0) + } +} + +impl UniffiCustomTypeConverter for PaymentSecret { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Some(bytes_vec) = hex_utils::to_vec(&val) { + let bytes_res = bytes_vec.try_into(); + if let Ok(bytes) = bytes_res { + return Ok(PaymentSecret(bytes)); + } + } + Err(Error::InvalidPaymentSecret.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + hex_utils::to_string(&obj.0) + } +} + +impl UniffiCustomTypeConverter for ChannelId { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + if let Some(hex_vec) = hex_utils::to_vec(&val) { + if hex_vec.len() == 32 { + let mut channel_id = [0u8; 32]; + channel_id.copy_from_slice(&hex_vec[..]); + return Ok(Self(channel_id)); + } + } + Err(Error::InvalidChannelId.into()) + } + + fn from_custom(obj: Self) -> Self::Builtin { + hex_utils::to_string(&obj.0) + } +} + +impl UniffiCustomTypeConverter for UserChannelId { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(UserChannelId(u128::from_str(&val).map_err(|_| Error::InvalidChannelId)?)) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.0.to_string() + } +} + +impl UniffiCustomTypeConverter for Txid { + type Builtin = String; + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(Txid::from_str(&val)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for BlockHash { + type Builtin = String; + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(BlockHash::from_str(&val)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for Mnemonic { + type Builtin = String; + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(Mnemonic::from_str(&val).map_err(|_| Error::InvalidSecretKey)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for SocketAddress { + type Builtin = String; + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(SocketAddress::from_str(&val).map_err(|_| Error::InvalidSocketAddress)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for UntrustedString { + type Builtin = String; + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(UntrustedString(val)) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +impl UniffiCustomTypeConverter for NodeAlias { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(sanitize_alias(&val).map_err(|_| Error::InvalidNodeAlias)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} + +/// Represents the description of an invoice which has to be either a directly included string or +/// a hash of a description provided out of band. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Bolt11InvoiceDescription { + /// Contains a full description. + Direct { + /// Description of what the invoice is for + description: String, + }, + /// Contains a hash. + Hash { + /// Hash of the description of what the invoice is for + hash: String, + }, +} + +impl TryFrom<&Bolt11InvoiceDescription> for lightning_invoice::Bolt11InvoiceDescription { + type Error = Error; + + fn try_from(value: &Bolt11InvoiceDescription) -> Result { + match value { + Bolt11InvoiceDescription::Direct { description } => { + Description::new(description.clone()) + .map(lightning_invoice::Bolt11InvoiceDescription::Direct) + .map_err(|_| Error::InvoiceCreationFailed) + }, + Bolt11InvoiceDescription::Hash { hash } => Sha256::from_str(&hash) + .map(lightning_invoice::Sha256) + .map(lightning_invoice::Bolt11InvoiceDescription::Hash) + .map_err(|_| Error::InvoiceCreationFailed), + } + } +} + +impl From for Bolt11InvoiceDescription { + fn from(value: lightning_invoice::Bolt11InvoiceDescription) -> Self { + match value { + lightning_invoice::Bolt11InvoiceDescription::Direct(description) => { + Bolt11InvoiceDescription::Direct { description: description.to_string() } + }, + lightning_invoice::Bolt11InvoiceDescription::Hash(hash) => { + Bolt11InvoiceDescription::Hash { hash: hex_utils::to_string(hash.0.as_ref()) } + }, + } + } +} + +impl<'a> From> for Bolt11InvoiceDescription { + fn from(value: Bolt11InvoiceDescriptionRef<'a>) -> Self { + match value { + lightning_invoice::Bolt11InvoiceDescriptionRef::Direct(description) => { + Bolt11InvoiceDescription::Direct { description: description.to_string() } + }, + lightning_invoice::Bolt11InvoiceDescriptionRef::Hash(hash) => { + Bolt11InvoiceDescription::Hash { hash: hex_utils::to_string(hash.0.as_ref()) } + }, + } + } +} + +/// Enum representing the crypto currencies (or networks) supported by this library +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum Currency { + /// Bitcoin mainnet + Bitcoin, + + /// Bitcoin testnet + BitcoinTestnet, + + /// Bitcoin regtest + Regtest, + + /// Bitcoin simnet + Simnet, + + /// Bitcoin signet + Signet, +} + +impl From for Currency { + fn from(currency: lightning_invoice::Currency) -> Self { + match currency { + lightning_invoice::Currency::Bitcoin => Currency::Bitcoin, + lightning_invoice::Currency::BitcoinTestnet => Currency::BitcoinTestnet, + lightning_invoice::Currency::Regtest => Currency::Regtest, + lightning_invoice::Currency::Simnet => Currency::Simnet, + lightning_invoice::Currency::Signet => Currency::Signet, + } + } +} + +/// A channel descriptor for a hop along a payment path. +/// +/// While this generally comes from BOLT 11's `r` field, this struct includes more fields than are +/// available in BOLT 11. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RouteHintHop { + /// The node_id of the non-target end of the route + pub src_node_id: PublicKey, + /// The short_channel_id of this channel + pub short_channel_id: u64, + /// The fees which must be paid to use this channel + pub fees: RoutingFees, + /// The difference in CLTV values between this node and the next node. + pub cltv_expiry_delta: u16, + /// The minimum value, in msat, which must be relayed to the next hop. + pub htlc_minimum_msat: Option, + /// The maximum value in msat available for routing with a single HTLC. + pub htlc_maximum_msat: Option, +} + +impl From for RouteHintHop { + fn from(hop: lightning::routing::router::RouteHintHop) -> Self { + Self { + src_node_id: hop.src_node_id, + short_channel_id: hop.short_channel_id, + cltv_expiry_delta: hop.cltv_expiry_delta, + htlc_minimum_msat: hop.htlc_minimum_msat, + htlc_maximum_msat: hop.htlc_maximum_msat, + fees: hop.fees, + } + } +} + +/// Represents a syntactically and semantically correct lightning BOLT11 invoice. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Bolt11Invoice { + pub(crate) inner: LdkBolt11Invoice, +} + +impl Bolt11Invoice { + pub fn from_str(invoice_str: &str) -> Result { + invoice_str.parse() + } + + /// The hash of the [`RawBolt11Invoice`] that was signed. + /// + /// [`RawBolt11Invoice`]: lightning_invoice::RawBolt11Invoice + pub fn signable_hash(&self) -> Vec { + self.inner.signable_hash().to_vec() + } + + /// Returns the hash to which we will receive the preimage on completion of the payment + pub fn payment_hash(&self) -> PaymentHash { + PaymentHash(self.inner.payment_hash().to_byte_array()) + } + + /// Get the payment secret if one was included in the invoice + pub fn payment_secret(&self) -> PaymentSecret { + PaymentSecret(self.inner.payment_secret().0) + } + + /// Returns the amount if specified in the invoice as millisatoshis. + pub fn amount_milli_satoshis(&self) -> Option { + self.inner.amount_milli_satoshis() + } + + /// Returns the invoice's expiry time (in seconds), if present, otherwise [`DEFAULT_EXPIRY_TIME`]. + /// + /// [`DEFAULT_EXPIRY_TIME`]: lightning_invoice::DEFAULT_EXPIRY_TIME + pub fn expiry_time_seconds(&self) -> u64 { + self.inner.expiry_time().as_secs() + } + + /// Returns the `Bolt11Invoice`'s timestamp as seconds since the Unix epoch + pub fn seconds_since_epoch(&self) -> u64 { + self.inner.duration_since_epoch().as_secs() + } + + /// Returns the seconds remaining until the invoice expires. + pub fn seconds_until_expiry(&self) -> u64 { + self.inner.duration_until_expiry().as_secs() + } + + /// Returns whether the invoice has expired. + pub fn is_expired(&self) -> bool { + self.inner.is_expired() + } + + /// Returns whether the expiry time would pass at the given point in time. + /// `at_time_seconds` is the timestamp as seconds since the Unix epoch. + pub fn would_expire(&self, at_time_seconds: u64) -> bool { + self.inner.would_expire(Duration::from_secs(at_time_seconds)) + } + + /// Return the description or a hash of it for longer ones + pub fn invoice_description(&self) -> Bolt11InvoiceDescription { + self.inner.description().into() + } + + /// Returns the invoice's `min_final_cltv_expiry_delta` time, if present, otherwise + /// [`DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA`]. + /// + /// [`DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA`]: lightning_invoice::DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA + pub fn min_final_cltv_expiry_delta(&self) -> u64 { + self.inner.min_final_cltv_expiry_delta() + } + + /// Returns the network for which the invoice was issued + pub fn network(&self) -> Network { + self.inner.network() + } + + /// Returns the currency for which the invoice was issued + pub fn currency(&self) -> Currency { + self.inner.currency().into() + } + + /// Returns a list of all fallback addresses as [`Address`]es + pub fn fallback_addresses(&self) -> Vec
{ + self.inner.fallback_addresses() + } + + /// Returns a list of all routes included in the invoice as the underlying hints + pub fn route_hints(&self) -> Vec> { + self.inner + .route_hints() + .iter() + .map(|route| route.0.iter().map(|hop| RouteHintHop::from(hop.clone())).collect()) + .collect() + } + + /// Recover the payee's public key (only to be used if none was included in the invoice) + pub fn recover_payee_pub_key(&self) -> PublicKey { + self.inner.recover_payee_pub_key() + } +} + +impl std::str::FromStr for Bolt11Invoice { + type Err = Error; + + fn from_str(invoice_str: &str) -> Result { + match invoice_str.parse::() { + Ok(signed) => match LdkBolt11Invoice::from_signed(signed) { + Ok(invoice) => Ok(Bolt11Invoice { inner: invoice }), + Err(_) => Err(Error::InvalidInvoice), + }, + Err(_) => Err(Error::InvalidInvoice), + } + } +} + +impl From for Bolt11Invoice { + fn from(invoice: LdkBolt11Invoice) -> Self { + Bolt11Invoice { inner: invoice } + } +} + +impl Deref for Bolt11Invoice { + type Target = LdkBolt11Invoice; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl AsRef for Bolt11Invoice { + fn as_ref(&self) -> &LdkBolt11Invoice { + self.deref() + } +} + +impl std::fmt::Display for Bolt11Invoice { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.inner) + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LSPS1PaymentInfo { + /// A Lightning payment using BOLT 11. + pub bolt11: Option, + /// An onchain payment. + pub onchain: Option, +} + +#[cfg(feature = "uniffi")] +impl From for LSPS1PaymentInfo { + fn from(value: lightning_liquidity::lsps1::msgs::LSPS1PaymentInfo) -> Self { + LSPS1PaymentInfo { + bolt11: value.bolt11.map(|b| b.into()), + onchain: value.onchain.map(|o| o.into()), + } + } +} + +/// An onchain payment. +#[cfg(feature = "uniffi")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LSPS1OnchainPaymentInfo { + /// Indicates the current state of the payment. + pub state: lightning_liquidity::lsps1::msgs::LSPS1PaymentState, + /// The datetime when the payment option expires. + pub expires_at: LSPSDateTime, + /// The total fee the LSP will charge to open this channel in satoshi. + pub fee_total_sat: u64, + /// The amount the client needs to pay to have the requested channel openend. + pub order_total_sat: u64, + /// An on-chain address the client can send [`Self::order_total_sat`] to to have the channel + /// opened. + pub address: bitcoin::Address, + /// The minimum number of block confirmations that are required for the on-chain payment to be + /// considered confirmed. + pub min_onchain_payment_confirmations: Option, + /// The minimum fee rate for the on-chain payment in case the client wants the payment to be + /// confirmed without a confirmation. + pub min_fee_for_0conf: Arc, + /// The address where the LSP will send the funds if the order fails. + pub refund_onchain_address: Option, +} + +#[cfg(feature = "uniffi")] +impl From for LSPS1OnchainPaymentInfo { + fn from(value: lightning_liquidity::lsps1::msgs::LSPS1OnchainPaymentInfo) -> Self { + Self { + state: value.state, + expires_at: value.expires_at, + fee_total_sat: value.fee_total_sat, + order_total_sat: value.order_total_sat, + address: value.address, + min_onchain_payment_confirmations: value.min_onchain_payment_confirmations, + min_fee_for_0conf: Arc::new(value.min_fee_for_0conf), + refund_onchain_address: value.refund_onchain_address, + } + } +} +/// A Lightning payment using BOLT 11. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct LSPS1Bolt11PaymentInfo { + /// Indicates the current state of the payment. + pub state: LSPS1PaymentState, + /// The datetime when the payment option expires. + pub expires_at: LSPSDateTime, + /// The total fee the LSP will charge to open this channel in satoshi. + pub fee_total_sat: u64, + /// The amount the client needs to pay to have the requested channel openend. + pub order_total_sat: u64, + /// A BOLT11 invoice the client can pay to have to channel opened. + pub invoice: Arc, +} + +impl From for LSPS1Bolt11PaymentInfo { + fn from(info: lightning_liquidity::lsps1::msgs::LSPS1Bolt11PaymentInfo) -> Self { + Self { + state: info.state, + expires_at: info.expires_at, + fee_total_sat: info.fee_total_sat, + order_total_sat: info.order_total_sat, + invoice: Arc::new(info.invoice.into()), + } + } +} + +impl UniffiCustomTypeConverter for LSPS1OrderId { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(Self(val)) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.0 + } +} + +impl UniffiCustomTypeConverter for LSPSDateTime { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(LSPSDateTime::from_str(&val).map_err(|_| Error::InvalidDateTime)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_rfc3339() + } +} + +#[cfg(test)] +mod tests { + use std::num::NonZeroU64; + use std::time::{SystemTime, UNIX_EPOCH}; + + use lightning::offers::offer::{OfferBuilder, Quantity}; + use lightning::offers::refund::RefundBuilder; + + use super::*; + + fn create_test_bolt11_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { + let invoice_string = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; + let ldk_invoice: LdkBolt11Invoice = invoice_string.parse().unwrap(); + let wrapped_invoice = Bolt11Invoice::from(ldk_invoice.clone()); + (ldk_invoice, wrapped_invoice) + } + + fn create_test_offer() -> (LdkOffer, Offer) { + let pubkey = bitcoin::secp256k1::PublicKey::from_str( + "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619", + ) + .unwrap(); + + let expiry = + (SystemTime::now() + Duration::from_secs(3600)).duration_since(UNIX_EPOCH).unwrap(); + + let quantity = NonZeroU64::new(10_000).unwrap(); + + let builder = OfferBuilder::new(pubkey) + .description("Test offer description".to_string()) + .amount_msats(100_000) + .issuer("Offer issuer".to_string()) + .absolute_expiry(expiry) + .chain(Network::Bitcoin) + .supported_quantity(Quantity::Bounded(quantity)) + .metadata(vec![ + 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0xba, 0xbe, 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, + 0xcd, 0xef, + ]) + .unwrap(); + + let ldk_offer = builder.build().unwrap(); + let wrapped_offer = Offer::from(ldk_offer.clone()); + + (ldk_offer, wrapped_offer) + } + + fn create_test_refund() -> (LdkRefund, Refund) { + let payer_key = bitcoin::secp256k1::PublicKey::from_str( + "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619", + ) + .unwrap(); + + let expiry = + (SystemTime::now() + Duration::from_secs(3600)).duration_since(UNIX_EPOCH).unwrap(); + + let builder = RefundBuilder::new("Test refund".to_string().into(), payer_key, 100_000) + .unwrap() + .description("Test refund description".to_string()) + .absolute_expiry(expiry) + .quantity(3) + .issuer("test_issuer".to_string()); + + let ldk_refund = builder.build().unwrap(); + let wrapped_refund = Refund::from(ldk_refund.clone()); + + (ldk_refund, wrapped_refund) + } + + fn create_test_bolt12_invoice() -> (LdkBolt12Invoice, Bolt12Invoice) { + let invoice_hex = "0020a5b7104b95f17442d6638143ded62b02c2fda98cdf35841713fd0f44b59286560a000e04682cb028502006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f520227105601015821034b4f0765a115caeff6787a8fb2d976c02467a36aea32901539d76473817937c65904546573745a9c00000068000001000003e75203dee4b3e5d48650caf1faadda53ac6e0dc3f509cc5e9c46defb8aeeec14010348e84fab39b226b1e0696cb6fb40bdb293952c184cf02007fa6e983cd311d189004e7bd75ff9ef069642f2abfa5916099e5a16144e1a6d9b4f246624d3b57d2895d5d2e46fe8661e49717d1663ad2c07b023738370a3e44a960f683040b1862fe36e22347c2dbe429c51af377bdbe01ca0e103f295d1678c68b628957a53a820afcc25763cc67b38aca82067bdf52dc68c061a02575d91c01beca64cc09735c395e91d034841d3e61b58948da631192ce556b85b01028e2284ead4ce184981f4d0f387f8d47295d4fa1dab6a6ae3a417550ac1c8b1aa007b38c926212fbf23154c6ff707621d6eedafc4298b133111d90934bb9d5a2103f0c8e4a3f3daa992334aad300677f23b4285db2ee5caf0a0ecc39c6596c3c4e42318040bec46add3626501f6e422be9c791adc81ea5c83ff0bfa91b7d42bcac0ed128a640fe970da584cff80fd5c12a8ea9b546a2d63515343a933daa21c0000000000000000001800000000000000011d24b2dfac5200000000a404682ca218a820a4a878fb352e63673c05eb07e53563fc8022ff039ad4c66e65848a7cde7ee780aa022710ae03020000b02103800fd75bf6b1e7c5f3fab33a372f6599730e0fae7a30fa4e5c8fbc69c3a87981f0403c9a40e6c9d08e12b0a155101d23a170b4f5b38051b0a0a09a794ce49e820f65d50c8fad7518200d3a28331aa5c668a8f7d70206aaf8bea2e8f05f0904b6e033"; + + let invoice_bytes = hex_utils::to_vec(invoice_hex).expect("Valid hex string"); + + let ldk_invoice = + LdkBolt12Invoice::try_from(invoice_bytes).expect("Valid Bolt12Invoice bytes"); + + let wrapped_invoice = Bolt12Invoice { inner: ldk_invoice.clone() }; + + (ldk_invoice, wrapped_invoice) + } + + #[test] + fn test_invoice_description_conversion() { + let hash = "09d08d4865e8af9266f6cc7c0ae23a1d6bf868207cf8f7c5979b9f6ed850dfb0".to_string(); + let description = Bolt11InvoiceDescription::Hash { hash }; + let converted_description = + lightning_invoice::Bolt11InvoiceDescription::try_from(&description).unwrap(); + let reconverted_description: Bolt11InvoiceDescription = converted_description.into(); + assert_eq!(description, reconverted_description); + } + + #[test] + fn test_bolt11_invoice_basic_properties() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); + + assert_eq!( + ldk_invoice.payment_hash().to_string(), + wrapped_invoice.payment_hash().to_string() + ); + assert_eq!(ldk_invoice.amount_milli_satoshis(), wrapped_invoice.amount_milli_satoshis()); + + assert_eq!( + ldk_invoice.min_final_cltv_expiry_delta(), + wrapped_invoice.min_final_cltv_expiry_delta() + ); + assert_eq!( + ldk_invoice.payment_secret().0.to_vec(), + wrapped_invoice.payment_secret().0.to_vec() + ); + + assert_eq!(ldk_invoice.network(), wrapped_invoice.network()); + assert_eq!( + format!("{:?}", ldk_invoice.currency()), + format!("{:?}", wrapped_invoice.currency()) + ); + } + + #[test] + fn test_bolt11_invoice_time_related_fields() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); + + assert_eq!(ldk_invoice.expiry_time().as_secs(), wrapped_invoice.expiry_time_seconds()); + assert_eq!( + ldk_invoice.duration_until_expiry().as_secs(), + wrapped_invoice.seconds_until_expiry() + ); + assert_eq!( + ldk_invoice.duration_since_epoch().as_secs(), + wrapped_invoice.seconds_since_epoch() + ); + + let future_time = Duration::from_secs(wrapped_invoice.seconds_since_epoch() + 10000); + assert!(!ldk_invoice.would_expire(future_time)); + assert!(!wrapped_invoice.would_expire(future_time.as_secs())); + } + + #[test] + fn test_bolt11_invoice_description() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); + + let ldk_description = ldk_invoice.description(); + let wrapped_description = wrapped_invoice.invoice_description(); + + match (ldk_description, &wrapped_description) { + ( + lightning_invoice::Bolt11InvoiceDescriptionRef::Direct(ldk_description), + Bolt11InvoiceDescription::Direct { description }, + ) => { + assert_eq!(ldk_description.to_string(), *description) + }, + ( + lightning_invoice::Bolt11InvoiceDescriptionRef::Hash(ldk_hash), + Bolt11InvoiceDescription::Hash { hash }, + ) => { + assert_eq!(hex_utils::to_string(ldk_hash.0.as_ref()), *hash) + }, + _ => panic!("Description types don't match"), + } + } + + #[test] + fn test_bolt11_invoice_route_hints() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); + + let wrapped_route_hints = wrapped_invoice.route_hints(); + let ldk_route_hints = ldk_invoice.route_hints(); + assert_eq!(ldk_route_hints.len(), wrapped_route_hints.len()); + + let ldk_hop = &ldk_route_hints[0].0[0]; + let wrapped_hop = &wrapped_route_hints[0][0]; + assert_eq!(ldk_hop.src_node_id, wrapped_hop.src_node_id); + assert_eq!(ldk_hop.short_channel_id, wrapped_hop.short_channel_id); + assert_eq!(ldk_hop.cltv_expiry_delta, wrapped_hop.cltv_expiry_delta); + assert_eq!(ldk_hop.htlc_minimum_msat, wrapped_hop.htlc_minimum_msat); + assert_eq!(ldk_hop.htlc_maximum_msat, wrapped_hop.htlc_maximum_msat); + assert_eq!(ldk_hop.fees.base_msat, wrapped_hop.fees.base_msat); + assert_eq!(ldk_hop.fees.proportional_millionths, wrapped_hop.fees.proportional_millionths); + } + + #[test] + fn test_bolt11_invoice_roundtrip() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt11_invoice(); + + let invoice_str = wrapped_invoice.to_string(); + let parsed_invoice: LdkBolt11Invoice = invoice_str.parse().unwrap(); + assert_eq!( + ldk_invoice.payment_hash().to_byte_array().to_vec(), + parsed_invoice.payment_hash().to_byte_array().to_vec() + ); + } + + #[test] + fn test_offer() { + let (ldk_offer, wrapped_offer) = create_test_offer(); + match (ldk_offer.description(), wrapped_offer.offer_description()) { + (Some(ldk_desc), Some(wrapped_desc)) => { + assert_eq!(ldk_desc.to_string(), wrapped_desc); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had a description but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had a description but LDK offer did not!"); + }, + } + + match (ldk_offer.amount(), wrapped_offer.amount()) { + (Some(ldk_amount), Some(wrapped_amount)) => { + let ldk_amount: OfferAmount = ldk_amount.into(); + assert_eq!(ldk_amount, wrapped_amount); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an amount but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an amount but LDK offer did not!"); + }, + } + + match (ldk_offer.issuer(), wrapped_offer.issuer()) { + (Some(ldk_issuer), Some(wrapped_issuer)) => { + assert_eq!(ldk_issuer.to_string(), wrapped_issuer); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an issuer but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an issuer but LDK offer did not!"); + }, + } + + assert_eq!(ldk_offer.is_expired(), wrapped_offer.is_expired()); + assert_eq!(ldk_offer.id(), wrapped_offer.id()); + assert_eq!(ldk_offer.is_valid_quantity(10_000), wrapped_offer.is_valid_quantity(10_000)); + assert_eq!(ldk_offer.expects_quantity(), wrapped_offer.expects_quantity()); + assert_eq!( + ldk_offer.supports_chain(Network::Bitcoin.chain_hash()), + wrapped_offer.supports_chain(Network::Bitcoin) + ); + assert_eq!( + ldk_offer.chains(), + wrapped_offer.chains().iter().map(|c| c.chain_hash()).collect::>() + ); + match (ldk_offer.metadata(), wrapped_offer.metadata()) { + (Some(ldk_metadata), Some(wrapped_metadata)) => { + assert_eq!(ldk_metadata.clone(), wrapped_metadata); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had metadata but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had metadata but LDK offer did not!"); + }, + } + + match (ldk_offer.absolute_expiry(), wrapped_offer.absolute_expiry_seconds()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry.as_secs(), wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an absolute expiry but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an absolute expiry but LDK offer did not!"); + }, + } + + match (ldk_offer.issuer_signing_pubkey(), wrapped_offer.issuer_signing_pubkey()) { + (Some(ldk_expiry_signing_pubkey), Some(wrapped_issuer_signing_pubkey)) => { + assert_eq!(ldk_expiry_signing_pubkey, wrapped_issuer_signing_pubkey); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK offer had an issuer signing pubkey but wrapped offer did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped offer had an issuer signing pubkey but LDK offer did not!"); + }, + } + } + + #[test] + fn test_refund_roundtrip() { + let (ldk_refund, _) = create_test_refund(); + + let refund_str = ldk_refund.to_string(); + + let parsed_refund = Refund::from_str(&refund_str); + assert!(parsed_refund.is_ok(), "Failed to parse refund from string!"); + + let invalid_result = Refund::from_str("invalid_refund_string"); + assert!(invalid_result.is_err()); + assert!(matches!(invalid_result.err().unwrap(), Error::InvalidRefund)); + } + + #[test] + fn test_refund_properties() { + let (ldk_refund, wrapped_refund) = create_test_refund(); + + assert_eq!(ldk_refund.description().to_string(), wrapped_refund.refund_description()); + assert_eq!(ldk_refund.amount_msats(), wrapped_refund.amount_msats()); + assert_eq!(ldk_refund.is_expired(), wrapped_refund.is_expired()); + + match (ldk_refund.absolute_expiry(), wrapped_refund.absolute_expiry_seconds()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry.as_secs(), wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK refund had an expiry but wrapped refund did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped refund had an expiry but LDK refund did not!"); + }, + } + + match (ldk_refund.quantity(), wrapped_refund.quantity()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry, wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK refund had an quantity but wrapped refund did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped refund had an quantity but LDK refund did not!"); + }, + } + + match (ldk_refund.issuer(), wrapped_refund.issuer()) { + (Some(ldk_issuer), Some(wrapped_issuer)) => { + assert_eq!(ldk_issuer.to_string(), wrapped_issuer); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK refund had an issuer but wrapped refund did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped refund had an issuer but LDK refund did not!"); + }, + } + + assert_eq!(ldk_refund.payer_metadata().to_vec(), wrapped_refund.payer_metadata()); + assert_eq!(ldk_refund.payer_signing_pubkey(), wrapped_refund.payer_signing_pubkey()); + + if let Ok(network) = Network::try_from(ldk_refund.chain()) { + assert_eq!(wrapped_refund.chain(), Some(network)); + } + + assert_eq!(ldk_refund.payer_note().map(|p| p.to_string()), wrapped_refund.payer_note()); + } + + #[test] + fn test_bolt12_invoice_properties() { + let (ldk_invoice, wrapped_invoice) = create_test_bolt12_invoice(); + + assert_eq!( + ldk_invoice.payment_hash().0.to_vec(), + wrapped_invoice.payment_hash().0.to_vec() + ); + assert_eq!(ldk_invoice.amount_msats(), wrapped_invoice.amount_msats()); + assert_eq!(ldk_invoice.is_expired(), wrapped_invoice.is_expired()); + + assert_eq!(ldk_invoice.signing_pubkey(), wrapped_invoice.signing_pubkey()); + + assert_eq!(ldk_invoice.created_at().as_secs(), wrapped_invoice.created_at()); + + match (ldk_invoice.absolute_expiry(), wrapped_invoice.absolute_expiry_seconds()) { + (Some(ldk_expiry), Some(wrapped_expiry)) => { + assert_eq!(ldk_expiry.as_secs(), wrapped_expiry); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had an absolute expiry but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had an absolute expiry but LDK invoice did not!"); + }, + } + + assert_eq!(ldk_invoice.relative_expiry().as_secs(), wrapped_invoice.relative_expiry()); + + match (ldk_invoice.description(), wrapped_invoice.invoice_description()) { + (Some(ldk_desc), Some(wrapped_desc)) => { + assert_eq!(ldk_desc.to_string(), wrapped_desc); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had a description but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had a description but LDK invoice did not!"); + }, + } + + match (ldk_invoice.issuer(), wrapped_invoice.issuer()) { + (Some(ldk_issuer), Some(wrapped_issuer)) => { + assert_eq!(ldk_issuer.to_string(), wrapped_issuer); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had an issuer but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had an issuer but LDK invoice did not!"); + }, + } + + match (ldk_invoice.payer_note(), wrapped_invoice.payer_note()) { + (Some(ldk_note), Some(wrapped_note)) => { + assert_eq!(ldk_note.to_string(), wrapped_note); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had a payer note but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had a payer note but LDK invoice did not!"); + }, + } + + match (ldk_invoice.metadata(), wrapped_invoice.metadata()) { + (Some(ldk_metadata), Some(wrapped_metadata)) => { + assert_eq!(ldk_metadata.as_slice(), wrapped_metadata.as_slice()); + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had metadata but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had metadata but LDK invoice did not!"); + }, + } + + assert_eq!(ldk_invoice.quantity(), wrapped_invoice.quantity()); + + assert_eq!(ldk_invoice.chain().to_bytes().to_vec(), wrapped_invoice.chain()); + + match (ldk_invoice.offer_chains(), wrapped_invoice.offer_chains()) { + (Some(ldk_chains), Some(wrapped_chains)) => { + assert_eq!(ldk_chains.len(), wrapped_chains.len()); + for (i, ldk_chain) in ldk_chains.iter().enumerate() { + assert_eq!(ldk_chain.to_bytes().to_vec(), wrapped_chains[i]); + } + }, + (None, None) => { + // Both fields are missing which is expected behaviour when converting + }, + (Some(_), None) => { + panic!("LDK invoice had offer chains but wrapped invoice did not!"); + }, + (None, Some(_)) => { + panic!("Wrapped invoice had offer chains but LDK invoice did not!"); + }, + } + + let ldk_fallbacks = ldk_invoice.fallbacks(); + let wrapped_fallbacks = wrapped_invoice.fallback_addresses(); + assert_eq!(ldk_fallbacks.len(), wrapped_fallbacks.len()); + for (i, ldk_fallback) in ldk_fallbacks.iter().enumerate() { + assert_eq!(*ldk_fallback, wrapped_fallbacks[i]); + } + + assert_eq!(ldk_invoice.encode(), wrapped_invoice.encode()); + + assert_eq!(ldk_invoice.signable_hash().to_vec(), wrapped_invoice.signable_hash()); + } +} diff --git a/src/gossip.rs b/src/gossip.rs index a8a6e3831..01aff4742 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -5,23 +5,24 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::future::Future; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::Arc; +use std::time::Duration; + +use lightning::util::native_async::FutureSpawner; +use lightning_block_sync::gossip::GossipVerifier; + use crate::chain::ChainSource; use crate::config::RGS_SYNC_TIMEOUT_SECS; -use crate::logger::{log_error, log_trace, LdkLogger, Logger}; +use crate::logger::{log_trace, LdkLogger, Logger}; +use crate::runtime::Runtime; use crate::types::{GossipSync, Graph, P2PGossipSync, PeerManager, RapidGossipSync, UtxoLookup}; use crate::Error; -use lightning_block_sync::gossip::{FutureSpawner, GossipVerifier}; - -use std::future::Future; -use std::sync::atomic::{AtomicU32, Ordering}; -use std::sync::{Arc, RwLock}; -use std::time::Duration; - pub(crate) enum GossipSource { P2PNetwork { gossip_sync: Arc, - logger: Arc, }, RapidGossipSync { gossip_sync: Arc, @@ -38,7 +39,7 @@ impl GossipSource { None::>, Arc::clone(&logger), )); - Self::P2PNetwork { gossip_sync, logger } + Self::P2PNetwork { gossip_sync } } pub fn new_rgs( @@ -63,12 +64,12 @@ impl GossipSource { pub(crate) fn set_gossip_verifier( &self, chain_source: Arc, peer_manager: Arc, - runtime: Arc>>>, + runtime: Arc, ) { match self { - Self::P2PNetwork { gossip_sync, logger } => { + Self::P2PNetwork { gossip_sync } => { if let Some(utxo_source) = chain_source.as_utxo_source() { - let spawner = RuntimeSpawner::new(Arc::clone(&runtime), Arc::clone(&logger)); + let spawner = RuntimeSpawner::new(Arc::clone(&runtime)); let gossip_verifier = Arc::new(GossipVerifier::new( utxo_source, spawner, @@ -133,28 +134,17 @@ impl GossipSource { } pub(crate) struct RuntimeSpawner { - runtime: Arc>>>, - logger: Arc, + runtime: Arc, } impl RuntimeSpawner { - pub(crate) fn new( - runtime: Arc>>>, logger: Arc, - ) -> Self { - Self { runtime, logger } + pub(crate) fn new(runtime: Arc) -> Self { + Self { runtime } } } impl FutureSpawner for RuntimeSpawner { fn spawn + Send + 'static>(&self, future: T) { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { - log_error!(self.logger, "Tried spawing a future while the runtime wasn't available. This should never happen."); - debug_assert!(false, "Tried spawing a future while the runtime wasn't available. This should never happen."); - return; - } - - let runtime = rt_lock.as_ref().unwrap(); - runtime.spawn(future); + self.runtime.spawn_cancellable_background_task(future); } } diff --git a/src/graph.rs b/src/graph.rs index 3e4e58c88..f2daebb9f 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -7,19 +7,17 @@ //! Objects for querying the network graph. -use crate::types::Graph; - -use lightning::routing::gossip::NodeId; +use std::sync::Arc; #[cfg(feature = "uniffi")] use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeId; #[cfg(feature = "uniffi")] use lightning::routing::gossip::RoutingFees; - #[cfg(not(feature = "uniffi"))] use lightning::routing::gossip::{ChannelInfo, NodeInfo}; -use std::sync::Arc; +use crate::types::Graph; /// Represents the network as nodes and channels between them. pub struct NetworkGraph { diff --git a/src/io/mod.rs b/src/io/mod.rs index 3192dbb86..38fba5114 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -27,11 +27,6 @@ pub(crate) const PEER_INFO_PERSISTENCE_KEY: &str = "peers"; pub(crate) const PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = "payments"; pub(crate) const PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; -/// The spendable output information used to persisted under this prefix until LDK Node v0.3.0. -pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: &str = - "spendable_outputs"; -pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; - /// The node metrics will be persisted under this key. pub(crate) const NODE_METRICS_PRIMARY_NAMESPACE: &str = ""; pub(crate) const NODE_METRICS_SECONDARY_NAMESPACE: &str = ""; @@ -78,3 +73,8 @@ pub(crate) const BDK_WALLET_TX_GRAPH_KEY: &str = "tx_graph"; pub(crate) const BDK_WALLET_INDEXER_PRIMARY_NAMESPACE: &str = "bdk_wallet"; pub(crate) const BDK_WALLET_INDEXER_SECONDARY_NAMESPACE: &str = ""; pub(crate) const BDK_WALLET_INDEXER_KEY: &str = "indexer"; + +/// [`StaticInvoice`]s will be persisted under this key. +/// +/// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice +pub(crate) const STATIC_INVOICE_STORE_PRIMARY_NAMESPACE: &str = "static_invoices"; diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index 0486b8a4f..f8f80c924 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -5,9 +5,8 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use rusqlite::Connection; - use lightning::io; +use rusqlite::Connection; pub(super) fn migrate_schema( connection: &mut Connection, kv_table_name: &str, from_version: u16, to_version: u16, @@ -75,14 +74,13 @@ pub(super) fn migrate_schema( #[cfg(test)] mod tests { - use crate::io::sqlite_store::SqliteStore; - use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; - - use lightning::util::persist::KVStore; + use std::fs; + use lightning::util::persist::KVStoreSync; use rusqlite::{named_params, Connection}; - use std::fs; + use crate::io::sqlite_store::SqliteStore; + use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; #[test] fn rwrl_post_schema_1_migration() { @@ -161,7 +159,8 @@ mod tests { } // Check we migrate the db just fine without losing our written data. - let store = SqliteStore::new(temp_path, Some(db_file_name), Some(kv_table_name)).unwrap(); + let store = + SqliteStore::new(temp_path, Some(db_file_name), Some(kv_table_name), None).unwrap(); let res = store.read(&test_namespace, "", &test_key).unwrap(); assert_eq!(res, test_data); diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 7bbcfdd4a..337211584 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -6,20 +6,25 @@ // accordance with one or both of these licenses. //! Objects related to [`SqliteStore`] live here. -use crate::io::utils::check_namespace_key_validity; +use std::boxed::Box; +use std::collections::HashMap; +use std::fs; +use std::future::Future; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; use lightning::io; + use lightning::util::persist::{ - KVStore, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, }; -use lightning::util::string::PrintableString; - +use lightning_types::string::PrintableString; use rusqlite::{named_params, Connection}; -use std::fs; -use std::path::PathBuf; -use std::sync::{Arc, Mutex}; +use crate::io::utils::check_namespace_key_validity; mod migrations; @@ -37,17 +42,8 @@ pub const DEFAULT_KV_TABLE_NAME: &str = "ldk_data"; // The current SQLite `user_version`, which we can use if we'd ever need to do a schema migration. const SCHEMA_USER_VERSION: u16 = 2; -/// A [`KVStore`] implementation that writes to and reads from an [SQLite] database. -/// -/// [SQLite]: https://sqlite.org -pub struct SqliteStore { - connection: Arc>, - data_dir: PathBuf, - kv_table_name: String, - config: SqliteStoreConfig, -} - /// Alby: extended SqliteStore configuration. +#[derive(Clone)] pub struct SqliteStoreConfig { /// Do not persist network graph. pub(crate) transient_graph: bool, @@ -59,6 +55,17 @@ impl Default for SqliteStoreConfig { } } +/// A [`KVStoreSync`] implementation that writes to and reads from an [SQLite] database. +/// +/// [SQLite]: https://sqlite.org +pub struct SqliteStore { + inner: Arc, + + // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list + // operations aren't sensitive to the order of execution. + next_write_version: AtomicU64, +} + impl SqliteStore { /// Constructs a new [`SqliteStore`]. /// @@ -68,6 +75,201 @@ impl SqliteStore { /// Similarly, the given `kv_table_name` will be used or default to [`DEFAULT_KV_TABLE_NAME`]. pub fn new( data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, + config: Option, + ) -> io::Result { + let config = config.unwrap_or_default(); + let inner = + Arc::new(SqliteStoreInner::new(data_dir, db_file_name, kv_table_name, config.clone())?); + let next_write_version = AtomicU64::new(1); + let store = Self { inner, next_write_version }; + + // Alby: enable not saving network graph (Alby Cloud) + if config.transient_graph { + // Drop existing network graph if it has been persisted before. + KVStoreSync::remove( + &store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + false, + )?; + } + + Ok(store) + } + + fn build_locking_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + + fn get_new_version_and_lock_ref(&self, locking_key: String) -> (Arc>, u64) { + let version = self.next_write_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("SqliteStore version counter overflowed"); + } + + // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for + // cleaning up unused locks. + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } + + /// Returns the data directory. + pub fn get_data_dir(&self) -> PathBuf { + self.inner.data_dir.clone() + } +} + +impl KVStore for SqliteStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.read_internal(&primary_namespace, &secondary_namespace, &key) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.write_internal( + inner_lock_ref, + locking_key, + version, + &primary_namespace, + &secondary_namespace, + &key, + buf, + ) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> Pin> + Send>> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.remove_internal( + inner_lock_ref, + locking_key, + version, + &primary_namespace, + &secondary_namespace, + &key, + ) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.list_internal(&primary_namespace, &secondary_namespace) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } +} + +impl KVStoreSync for SqliteStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.inner.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + self.inner.write_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> io::Result<()> { + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + self.inner.remove_internal( + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + ) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.inner.list_internal(primary_namespace, secondary_namespace) + } +} + +struct SqliteStoreInner { + connection: Arc>, + data_dir: PathBuf, + kv_table_name: String, + write_version_locks: Mutex>>>, + config: SqliteStoreConfig, +} + +impl SqliteStoreInner { + fn new( + data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, + config: SqliteStoreConfig, ) -> io::Result { let db_file_name = db_file_name.unwrap_or(DEFAULT_SQLITE_DB_FILE_NAME.to_string()); let kv_table_name = kv_table_name.unwrap_or(DEFAULT_KV_TABLE_NAME.to_string()); @@ -136,38 +338,16 @@ impl SqliteStore { })?; let connection = Arc::new(Mutex::new(connection)); - Ok(Self { connection, data_dir, kv_table_name, config: SqliteStoreConfig::default() }) - } - - /// Alby: constructs a new [`SqliteStore`] with an extended configuration. - pub fn with_config( - data_dir: PathBuf, db_file_name: Option, kv_table_name: Option, - config: SqliteStoreConfig, - ) -> io::Result { - let mut ret = SqliteStore::new(data_dir, db_file_name, kv_table_name)?; - - if config.transient_graph { - // Drop existing network graph if it has been persisted before. - ret.remove( - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_KEY, - false, - )?; - } - - ret.config = config; - Ok(ret) + let write_version_locks = Mutex::new(HashMap::new()); + Ok(Self { connection, data_dir, kv_table_name, write_version_locks, config }) } - /// Returns the data directory. - pub fn get_data_dir(&self) -> PathBuf { - self.data_dir.clone() + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.write_version_locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) } -} -impl KVStore for SqliteStore { - fn read( + fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; @@ -224,8 +404,9 @@ impl KVStore for SqliteStore { Ok(res) } - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + fn write_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; @@ -238,70 +419,77 @@ impl KVStore for SqliteStore { return Ok(()); } - let locked_conn = self.connection.lock().unwrap(); + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + let locked_conn = self.connection.lock().unwrap(); - let sql = format!( - "INSERT OR REPLACE INTO {} (primary_namespace, secondary_namespace, key, value) VALUES (:primary_namespace, :secondary_namespace, :key, :value);", - self.kv_table_name - ); + let sql = format!( + "INSERT OR REPLACE INTO {} (primary_namespace, secondary_namespace, key, value) VALUES (:primary_namespace, :secondary_namespace, :key, :value);", + self.kv_table_name + ); - let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { - let msg = format!("Failed to prepare statement: {}", e); - io::Error::new(io::ErrorKind::Other, msg) - })?; + let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { + let msg = format!("Failed to prepare statement: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; - stmt.execute(named_params! { - ":primary_namespace": primary_namespace, - ":secondary_namespace": secondary_namespace, - ":key": key, - ":value": buf, - }) - .map(|_| ()) - .map_err(|e| { - let msg = format!( - "Failed to write to key {}/{}/{}: {}", - PrintableString(primary_namespace), - PrintableString(secondary_namespace), - PrintableString(key), - e - ); - io::Error::new(io::ErrorKind::Other, msg) + stmt.execute(named_params! { + ":primary_namespace": primary_namespace, + ":secondary_namespace": secondary_namespace, + ":key": key, + ":value": buf, + }) + .map(|_| ()) + .map_err(|e| { + let msg = format!( + "Failed to write to key {}/{}/{}: {}", + PrintableString(primary_namespace), + PrintableString(secondary_namespace), + PrintableString(key), + e + ); + io::Error::new(io::ErrorKind::Other, msg) + }) }) } - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + fn remove_internal( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; - let locked_conn = self.connection.lock().unwrap(); + self.execute_locked_write(inner_lock_ref, locking_key, version, || { + let locked_conn = self.connection.lock().unwrap(); - let sql = format!("DELETE FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); + let sql = format!("DELETE FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); - let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { - let msg = format!("Failed to prepare statement: {}", e); - io::Error::new(io::ErrorKind::Other, msg) - })?; + let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { + let msg = format!("Failed to prepare statement: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; - stmt.execute(named_params! { - ":primary_namespace": primary_namespace, - ":secondary_namespace": secondary_namespace, - ":key": key, + stmt.execute(named_params! { + ":primary_namespace": primary_namespace, + ":secondary_namespace": secondary_namespace, + ":key": key, + }) + .map_err(|e| { + let msg = format!( + "Failed to delete key {}/{}/{}: {}", + PrintableString(primary_namespace), + PrintableString(secondary_namespace), + PrintableString(key), + e + ); + io::Error::new(io::ErrorKind::Other, msg) + })?; + Ok(()) }) - .map_err(|e| { - let msg = format!( - "Failed to delete key {}/{}/{}: {}", - PrintableString(primary_namespace), - PrintableString(secondary_namespace), - PrintableString(key), - e - ); - io::Error::new(io::ErrorKind::Other, msg) - })?; - Ok(()) } - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + fn list_internal( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; let locked_conn = self.connection.lock().unwrap(); @@ -339,6 +527,46 @@ impl KVStore for SqliteStore { Ok(keys) } + + fn execute_locked_write Result<(), lightning::io::Error>>( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, + ) -> Result<(), lightning::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().unwrap(); + + // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual + // consistency. + let is_stale_version = version <= *last_written_version; + + // If the version is not stale, we execute the callback. Otherwise we can and must skip writing. + if is_stale_version { + Ok(()) + } else { + callback().map(|_| { + *last_written_version = version; + }) + } + }; + + self.clean_locks(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.write_version_locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected SqliteStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } + } } #[cfg(test)] @@ -350,7 +578,7 @@ mod tests { impl Drop for SqliteStore { fn drop(&mut self) { - match fs::remove_dir_all(&self.data_dir) { + match fs::remove_dir_all(&self.inner.data_dir) { Err(e) => println!("Failed to remove test store directory: {}", e), _ => {}, } @@ -365,6 +593,7 @@ mod tests { temp_path, Some("test_db".to_string()), Some("test_table".to_string()), + None, ) .unwrap(); do_read_write_remove_list_persist(&store); @@ -378,12 +607,14 @@ mod tests { temp_path.clone(), Some("test_db_0".to_string()), Some("test_table".to_string()), + None, ) .unwrap(); let store_1 = SqliteStore::new( temp_path, Some("test_db_1".to_string()), Some("test_table".to_string()), + None, ) .unwrap(); do_test_store(&store_0, &store_1) @@ -397,8 +628,10 @@ pub mod bench { /// Bench! pub fn bench_sends(bench: &mut Criterion) { - let store_a = super::SqliteStore::new("bench_sqlite_store_a".into(), None, None).unwrap(); - let store_b = super::SqliteStore::new("bench_sqlite_store_b".into(), None, None).unwrap(); + let store_a = + super::SqliteStore::new("bench_sqlite_store_a".into(), None, None, None).unwrap(); + let store_b = + super::SqliteStore::new("bench_sqlite_store_b".into(), None, None, None).unwrap(); lightning::ln::channelmanager::bench::bench_two_sends( bench, "bench_sqlite_persisted_sends", diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index df806779e..310638dd8 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -5,47 +5,183 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::boxed::Box; +use std::collections::{hash_map, HashMap}; +use std::future::Future; +use std::panic::RefUnwindSafe; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Mutex; + +use lightning::events::ClosureReason; use lightning::ln::functional_test_utils::{ connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, - create_network, create_node_cfgs, create_node_chanmgrs, send_payment, + create_network, create_node_cfgs, create_node_chanmgrs, send_payment, TestChanMonCfg, +}; +use lightning::util::persist::{ + KVStore, KVStoreSync, MonitorUpdatingPersister, KVSTORE_NAMESPACE_KEY_MAX_LEN, }; -use lightning::util::persist::{read_channel_monitors, KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN}; - -use lightning::events::ClosureReason; use lightning::util::test_utils; -use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event}; +use lightning::{check_added_monitors, check_closed_broadcast, check_closed_event, io}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +type TestMonitorUpdatePersister<'a, K> = MonitorUpdatingPersister< + &'a K, + &'a test_utils::TestLogger, + &'a test_utils::TestKeysInterface, + &'a test_utils::TestKeysInterface, + &'a test_utils::TestBroadcaster, + &'a test_utils::TestFeeEstimator, +>; -use std::panic::RefUnwindSafe; -use std::path::PathBuf; +const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5; + +pub struct InMemoryStore { + persisted_bytes: Mutex>>>, +} + +impl InMemoryStore { + pub fn new() -> Self { + let persisted_bytes = Mutex::new(HashMap::new()); + Self { persisted_bytes } + } + + fn read_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + let persisted_lock = self.persisted_bytes.lock().unwrap(); + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + + if let Some(outer_ref) = persisted_lock.get(&prefixed) { + if let Some(inner_ref) = outer_ref.get(key) { + let bytes = inner_ref.clone(); + Ok(bytes) + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Key not found")) + } + } else { + Err(io::Error::new(io::ErrorKind::NotFound, "Namespace not found")) + } + } + + fn write_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new()); + outer_e.insert(key.to_string(), buf); + Ok(()) + } + + fn remove_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + ) -> io::Result<()> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) { + outer_ref.remove(&key.to_string()); + } + + Ok(()) + } + + fn list_internal( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + let mut persisted_lock = self.persisted_bytes.lock().unwrap(); + + let prefixed = format!("{primary_namespace}/{secondary_namespace}"); + match persisted_lock.entry(prefixed) { + hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()), + hash_map::Entry::Vacant(_) => Ok(Vec::new()), + } + } +} + +impl KVStore for InMemoryStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + 'static + Send>> { + let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); + Box::pin(async move { res }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + 'static + Send>> { + let res = self.write_internal(&primary_namespace, &secondary_namespace, &key, buf); + Box::pin(async move { res }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + 'static + Send>> { + let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); + Box::pin(async move { res }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + 'static + Send>> { + let res = self.list_internal(primary_namespace, secondary_namespace); + Box::pin(async move { res }) + } +} + +impl KVStoreSync for InMemoryStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.list_internal(primary_namespace, secondary_namespace) + } +} + +unsafe impl Sync for InMemoryStore {} +unsafe impl Send for InMemoryStore {} pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_dir: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); temp_path.push(rand_dir); temp_path } -pub(crate) fn do_read_write_remove_list_persist(kv_store: &K) { - let data = [42u8; 32]; +pub(crate) fn do_read_write_remove_list_persist(kv_store: &K) { + let data = vec![42u8; 32]; let primary_namespace = "testspace"; let secondary_namespace = "testsubspace"; let key = "testkey"; // Test the basic KVStore operations. - kv_store.write(primary_namespace, secondary_namespace, key, &data).unwrap(); + kv_store.write(primary_namespace, secondary_namespace, key, data.clone()).unwrap(); // Test empty primary/secondary namespaces are allowed, but not empty primary namespace and non-empty // secondary primary_namespace, and not empty key. - kv_store.write("", "", key, &data).unwrap(); - let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data)); + kv_store.write("", "", key, data.clone()).unwrap(); + let res = + std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, data.clone())); assert!(res.is_err()); let res = std::panic::catch_unwind(|| { - kv_store.write(primary_namespace, secondary_namespace, "", &data) + kv_store.write(primary_namespace, secondary_namespace, "", data.clone()) }); assert!(res.is_err()); @@ -63,7 +199,7 @@ pub(crate) fn do_read_write_remove_list_persist(kv_s // Ensure we have no issue operating with primary_namespace/secondary_namespace/key being KVSTORE_NAMESPACE_KEY_MAX_LEN let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect(); - kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap(); + kv_store.write(&max_chars, &max_chars, &max_chars, data.clone()).unwrap(); let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap(); assert_eq!(listed_keys.len(), 1); @@ -78,27 +214,50 @@ pub(crate) fn do_read_write_remove_list_persist(kv_s assert_eq!(listed_keys.len(), 0); } +pub(crate) fn create_persister<'a, K: KVStoreSync + Sync>( + store: &'a K, chanmon_cfg: &'a TestChanMonCfg, max_pending_updates: u64, +) -> TestMonitorUpdatePersister<'a, K> { + MonitorUpdatingPersister::new( + store, + &chanmon_cfg.logger, + max_pending_updates, + &chanmon_cfg.keys_manager, + &chanmon_cfg.keys_manager, + &chanmon_cfg.tx_broadcaster, + &chanmon_cfg.fee_estimator, + ) +} + +pub(crate) fn create_chain_monitor<'a, K: KVStoreSync + Sync>( + chanmon_cfg: &'a TestChanMonCfg, persister: &'a TestMonitorUpdatePersister<'a, K>, +) -> test_utils::TestChainMonitor<'a> { + test_utils::TestChainMonitor::new( + Some(&chanmon_cfg.chain_source), + &chanmon_cfg.tx_broadcaster, + &chanmon_cfg.logger, + &chanmon_cfg.fee_estimator, + persister, + &chanmon_cfg.keys_manager, + ) +} + // Integration-test the given KVStore implementation. Test relaying a few payments and check that // the persisted data is updated the appropriate number of times. -pub(crate) fn do_test_store(store_0: &K, store_1: &K) { +pub(crate) fn do_test_store(store_0: &K, store_1: &K) { + // This value is used later to limit how many iterations we perform. + let persister_0_max_pending_updates = 7; + // Intentionally set this to a smaller value to test a different alignment. + let persister_1_max_pending_updates = 3; + let chanmon_cfgs = create_chanmon_cfgs(2); + + let persister_0 = create_persister(store_0, &chanmon_cfgs[0], persister_0_max_pending_updates); + let persister_1 = create_persister(store_1, &chanmon_cfgs[1], persister_1_max_pending_updates); + + let chain_mon_0 = create_chain_monitor(&chanmon_cfgs[0], &persister_0); + let chain_mon_1 = create_chain_monitor(&chanmon_cfgs[1], &persister_1); + let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let chain_mon_0 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[0].chain_source), - &chanmon_cfgs[0].tx_broadcaster, - &chanmon_cfgs[0].logger, - &chanmon_cfgs[0].fee_estimator, - store_0, - node_cfgs[0].keys_manager, - ); - let chain_mon_1 = test_utils::TestChainMonitor::new( - Some(&chanmon_cfgs[1].chain_source), - &chanmon_cfgs[1].tx_broadcaster, - &chanmon_cfgs[1].logger, - &chanmon_cfgs[1].fee_estimator, - store_1, - node_cfgs[1].keys_manager, - ); node_cfgs[0].chain_monitor = chain_mon_0; node_cfgs[1].chain_monitor = chain_mon_1; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -106,26 +265,20 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { // Check that the persisted channel data is empty before any channels are // open. - let mut persisted_chan_data_0 = - read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap(); + let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_0.len(), 0); - let mut persisted_chan_data_1 = - read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap(); + let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_1.len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { - ($expected_update_id: expr) => { - persisted_chan_data_0 = - read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager) - .unwrap(); + ($expected_update_id:expr) => { + persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); } - persisted_chan_data_1 = - read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager) - .unwrap(); + persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates().unwrap(); assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); @@ -138,52 +291,63 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { check_persisted_data!(0); // Send a few payments and make sure the monitors are updated to the latest. - send_payment(&nodes[0], &vec![&nodes[1]][..], 8000000); - check_persisted_data!(5); - send_payment(&nodes[1], &vec![&nodes[0]][..], 4000000); - check_persisted_data!(10); + let expected_route = &[&nodes[1]][..]; + send_payment(&nodes[0], expected_route, 8_000_000); + check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT); + let expected_route = &[&nodes[0]][..]; + send_payment(&nodes[1], expected_route, 4_000_000); + check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT); + + // Send a few more payments to try all the alignments of max pending updates with + // updates for a payment sent and received. + let mut sender = 0; + for i in 3..=persister_0_max_pending_updates * 2 { + let receiver; + if sender == 0 { + sender = 1; + receiver = 0; + } else { + sender = 0; + receiver = 1; + } + let expected_route = &[&nodes[receiver]][..]; + send_payment(&nodes[sender], expected_route, 21_000); + check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT); + } // Force close because cooperative close doesn't result in any persisted // updates. + let message = "Channel force-closed".to_owned(); nodes[0] .node .force_close_broadcasting_latest_txn( &nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), - "whoops".to_string(), + message.clone(), ) .unwrap(); check_closed_event!( nodes[0], 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }, [nodes[1].node.get_our_node_id()], 100000 ); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcast(); assert_eq!(node_txn.len(), 1); + let txn = vec![node_txn[0].clone(), node_txn[0].clone()]; + let dummy_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn); + connect_block(&nodes[1], &dummy_block); - connect_block( - &nodes[1], - &create_dummy_block( - nodes[0].best_block_hash(), - 42, - vec![node_txn[0].clone(), node_txn[0].clone()], - ), - ); check_closed_broadcast!(nodes[1], true); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - [nodes[0].node.get_our_node_id()], - 100000 - ); + let reason = ClosureReason::CommitmentTxConfirmed; + let node_id_0 = nodes[0].node.get_our_node_id(); + check_closed_event!(nodes[1], 1, reason, false, [node_id_0], 100000); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. - check_persisted_data!(11); + check_persisted_data!(persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1); } diff --git a/src/io/utils.rs b/src/io/utils.rs index b5537ed7d..26b586889 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -5,35 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use super::*; -use crate::config::WALLET_KEYS_SEED_LEN; - -use crate::chain::ChainSource; -use crate::fee_estimator::OnchainFeeEstimator; -use crate::io::{ - NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, -}; -use crate::logger::{log_error, LdkLogger, Logger}; -use crate::peer_store::PeerStore; -use crate::sweep::DeprecatedSpendableOutputInfo; -use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; -use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; -use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; - -use lightning::io::Cursor; -use lightning::ln::msgs::DecodeError; -use lightning::routing::gossip::NetworkGraph; -use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringDecayParameters}; -use lightning::util::persist::{ - KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, NETWORK_GRAPH_PERSISTENCE_KEY, - NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_KEY, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, - OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, - SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, -}; -use lightning::util::ser::{Readable, ReadableArgs, Writeable}; -use lightning::util::string::PrintableString; -use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; +use std::fs; +use std::io::Write; +use std::ops::Deref; +use std::path::Path; +use std::sync::Arc; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; @@ -41,18 +17,45 @@ use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::ConfirmationBlockTime; use bdk_wallet::ChangeSet as BdkWalletChangeSet; - use bip39::Mnemonic; use bitcoin::Network; -use rand::{thread_rng, RngCore}; +use lightning::io::Cursor; +use lightning::ln::msgs::DecodeError; +use lightning::routing::gossip::NetworkGraph; +use lightning::routing::scoring::{ + ChannelLiquidities, ProbabilisticScorer, ProbabilisticScoringDecayParameters, +}; +use lightning::util::persist::{ + KVStore, KVStoreSync, KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN, + NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, +}; +use lightning::util::ser::{Readable, ReadableArgs, Writeable}; +use lightning_types::string::PrintableString; +use rand::rngs::OsRng; +use rand::TryRngCore; -use std::fs; -use std::io::Write; -use std::ops::Deref; -use std::path::Path; -use std::sync::Arc; +use super::*; +use crate::chain::ChainSource; +use crate::config::WALLET_KEYS_SEED_LEN; +use crate::fee_estimator::OnchainFeeEstimator; +use crate::io::{ + NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, +}; +use crate::logger::{log_error, log_trace, LdkLogger, Logger}; +use crate::peer_store::PeerStore; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper, WordCount}; +use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; +use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; -/// Generates a random [BIP 39] mnemonic. +pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; + +/// Generates a random [BIP 39] mnemonic with the specified word count. +/// +/// If no word count is specified, defaults to 24 words (256-bit entropy). /// /// The result may be used to initialize the [`Node`] entropy, i.e., can be given to /// [`Builder::set_entropy_bip39_mnemonic`]. @@ -60,11 +63,9 @@ use std::sync::Arc; /// [BIP 39]: https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki /// [`Node`]: crate::Node /// [`Builder::set_entropy_bip39_mnemonic`]: crate::Builder::set_entropy_bip39_mnemonic -pub fn generate_entropy_mnemonic() -> Mnemonic { - // bip39::Mnemonic supports 256 bit entropy max - let mut entropy = [0; 32]; - thread_rng().fill_bytes(&mut entropy); - Mnemonic::from_entropy(&entropy).unwrap() +pub fn generate_entropy_mnemonic(word_count: Option) -> Mnemonic { + let word_count = word_count.unwrap_or(WordCount::Words24).word_count(); + Mnemonic::generate(word_count).expect("Failed to generate mnemonic") } pub(crate) fn read_or_generate_seed_file( @@ -96,7 +97,10 @@ where Ok(key) } else { let mut key = [0; WALLET_KEYS_SEED_LEN]; - thread_rng().fill_bytes(&mut key); + OsRng.try_fill_bytes(&mut key).map_err(|e| { + log_error!(logger, "Failed to generate entropy: {}", e); + std::io::Error::new(std::io::ErrorKind::Other, "Failed to generate seed bytes") + })?; if let Some(parent_dir) = Path::new(&keys_seed_path).parent() { fs::create_dir_all(parent_dir).map_err(|e| { @@ -135,7 +139,9 @@ pub(crate) fn read_network_graph( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + log_trace!(logger, "Reading network graph"); + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, @@ -154,7 +160,8 @@ where L::Target: LdkLogger, { let params = ProbabilisticScoringDecayParameters::default(); - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, @@ -166,6 +173,53 @@ where }) } +/// Read previously persisted external pathfinding scores from the cache. +pub(crate) fn read_external_pathfinding_scores_from_cache( + kv_store: Arc, logger: L, +) -> Result +where + L::Target: LdkLogger, +{ + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + )?); + ChannelLiquidities::read(&mut reader).map_err(|e| { + log_error!(logger, "Failed to deserialize scorer: {}", e); + std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize Scorer") + }) +} + +/// Persist external pathfinding scores to the cache. +pub(crate) async fn write_external_pathfinding_scores_to_cache( + kv_store: Arc, data: &ChannelLiquidities, logger: L, +) -> Result<(), Error> +where + L::Target: LdkLogger, +{ + KVStore::write( + &*kv_store, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + data.encode(), + ) + .await + .map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, + e + ); + Error::PersistenceFailed + }) +} + /// Read previously persisted events from the store. pub(crate) fn read_event_queue( kv_store: Arc, logger: L, @@ -173,7 +227,8 @@ pub(crate) fn read_event_queue( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, EVENT_QUEUE_PERSISTENCE_PRIMARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_SECONDARY_NAMESPACE, EVENT_QUEUE_PERSISTENCE_KEY, @@ -191,7 +246,8 @@ pub(crate) fn read_peer_info( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PEER_INFO_PERSISTENCE_KEY, @@ -211,11 +267,13 @@ where { let mut res = Vec::new(); - for stored_key in kv_store.list( + for stored_key in KVStoreSync::list( + &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, )? { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key, @@ -238,7 +296,8 @@ pub(crate) fn read_output_sweeper( chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, @@ -252,107 +311,11 @@ pub(crate) fn read_output_sweeper( kv_store, logger.clone(), ); - OutputSweeper::read(&mut reader, args).map_err(|e| { + let (_, sweeper) = <(_, Sweeper)>::read(&mut reader, args).map_err(|e| { log_error!(logger, "Failed to deserialize OutputSweeper: {}", e); std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize OutputSweeper") - }) -} - -/// Read previously persisted spendable output information from the store and migrate to the -/// upstreamed `OutputSweeper`. -/// -/// We first iterate all `DeprecatedSpendableOutputInfo`s and have them tracked by the new -/// `OutputSweeper`. In order to be certain the initial output spends will happen in a single -/// transaction (and safe on-chain fees), we batch them to happen at current height plus two -/// blocks. Lastly, we remove the previously persisted data once we checked they are tracked and -/// awaiting their initial spend at the correct height. -/// -/// Note that this migration will be run in the `Builder`, i.e., at the time when the migration is -/// happening no background sync is ongoing, so we shouldn't have a risk of interleaving block -/// connections during the migration. -pub(crate) fn migrate_deprecated_spendable_outputs( - sweeper: Arc, kv_store: Arc, logger: L, -) -> Result<(), std::io::Error> -where - L::Target: LdkLogger, -{ - let best_block = sweeper.current_best_block(); - - for stored_key in kv_store.list( - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - )? { - let mut reader = Cursor::new(kv_store.read( - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - )?); - let output = DeprecatedSpendableOutputInfo::read(&mut reader).map_err(|e| { - log_error!(logger, "Failed to deserialize SpendableOutputInfo: {}", e); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize SpendableOutputInfo", - ) - })?; - let descriptors = vec![output.descriptor.clone()]; - let spend_delay = Some(best_block.height + 2); - sweeper - .track_spendable_outputs(descriptors, output.channel_id, true, spend_delay) - .map_err(|_| { - log_error!(logger, "Failed to track spendable outputs. Aborting migration, will retry in the future."); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to track spendable outputs. Aborting migration, will retry in the future.", - ) - })?; - - if let Some(tracked_spendable_output) = - sweeper.tracked_spendable_outputs().iter().find(|o| o.descriptor == output.descriptor) - { - match tracked_spendable_output.status { - OutputSpendStatus::PendingInitialBroadcast { delayed_until_height } => { - if delayed_until_height == spend_delay { - kv_store.remove( - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &stored_key, - false, - )?; - } else { - debug_assert!(false, "Unexpected status in OutputSweeper migration."); - log_error!(logger, "Unexpected status in OutputSweeper migration."); - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to migrate OutputSweeper state.", - )); - } - }, - _ => { - debug_assert!(false, "Unexpected status in OutputSweeper migration."); - log_error!(logger, "Unexpected status in OutputSweeper migration."); - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to migrate OutputSweeper state.", - )); - }, - } - } else { - debug_assert!( - false, - "OutputSweeper failed to track and persist outputs during migration." - ); - log_error!( - logger, - "OutputSweeper failed to track and persist outputs during migration." - ); - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "Failed to migrate OutputSweeper state.", - )); - } - } - - Ok(()) + })?; + Ok(sweeper) } pub(crate) fn read_node_metrics( @@ -361,7 +324,8 @@ pub(crate) fn read_node_metrics( where L::Target: LdkLogger, { - let mut reader = Cursor::new(kv_store.read( + let mut reader = Cursor::new(KVStoreSync::read( + &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, @@ -379,24 +343,24 @@ where L::Target: LdkLogger, { let data = node_metrics.encode(); - kv_store - .write( + KVStoreSync::write( + &*kv_store, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, + data, + ) + .map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, NODE_METRICS_KEY, - &data, - ) - .map_err(|e| { - log_error!( - logger, - "Writing data to key {}/{}/{} failed due to: {}", - NODE_METRICS_PRIMARY_NAMESPACE, - NODE_METRICS_SECONDARY_NAMESPACE, - NODE_METRICS_KEY, - e - ); - Error::PersistenceFailed - }) + e + ); + Error::PersistenceFailed + }) } pub(crate) fn is_valid_kvstore_str(key: &str) -> bool { @@ -491,31 +455,40 @@ pub(crate) fn check_namespace_key_validity( } macro_rules! impl_read_write_change_set_type { - ( $read_name: ident, $write_name: ident, $change_set_type:ty, $primary_namespace: expr, $secondary_namespace: expr, $key: expr ) => { + ( + $read_name:ident, + $write_name:ident, + $change_set_type:ty, + $primary_namespace:expr, + $secondary_namespace:expr, + $key:expr + ) => { pub(crate) fn $read_name( kv_store: Arc, logger: L, ) -> Result, std::io::Error> where L::Target: LdkLogger, { - let bytes = match kv_store.read($primary_namespace, $secondary_namespace, $key) { - Ok(bytes) => bytes, - Err(e) => { - if e.kind() == lightning::io::ErrorKind::NotFound { - return Ok(None); - } else { - log_error!( - logger, - "Reading data from key {}/{}/{} failed due to: {}", - $primary_namespace, - $secondary_namespace, - $key, - e - ); - return Err(e.into()); - } - }, - }; + let bytes = + match KVStoreSync::read(&*kv_store, $primary_namespace, $secondary_namespace, $key) + { + Ok(bytes) => bytes, + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + return Ok(None); + } else { + log_error!( + logger, + "Reading data from key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + return Err(e.into()); + } + }, + }; let mut reader = Cursor::new(bytes); let res: Result, DecodeError> = @@ -539,17 +512,18 @@ macro_rules! impl_read_write_change_set_type { L::Target: LdkLogger, { let data = ChangeSetSerWrapper(value).encode(); - kv_store.write($primary_namespace, $secondary_namespace, $key, &data).map_err(|e| { - log_error!( - logger, - "Writing data to key {}/{}/{} failed due to: {}", - $primary_namespace, - $secondary_namespace, - $key, - e - ); - e.into() - }) + KVStoreSync::write(&*kv_store, $primary_namespace, $secondary_namespace, $key, data) + .map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + e.into() + }) } }; } @@ -654,9 +628,35 @@ mod tests { #[test] fn mnemonic_to_entropy_to_mnemonic() { - let mnemonic = generate_entropy_mnemonic(); - + // Test default (24 words) + let mnemonic = generate_entropy_mnemonic(None); let entropy = mnemonic.to_entropy(); assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + assert_eq!(mnemonic.word_count(), 24); + + // Test with different word counts + let word_counts = [ + WordCount::Words12, + WordCount::Words15, + WordCount::Words18, + WordCount::Words21, + WordCount::Words24, + ]; + + for word_count in word_counts { + let mnemonic = generate_entropy_mnemonic(Some(word_count)); + let entropy = mnemonic.to_entropy(); + assert_eq!(mnemonic, Mnemonic::from_entropy(&entropy).unwrap()); + + // Verify expected word count + let expected_words = match word_count { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + }; + assert_eq!(mnemonic.word_count(), expected_words); + } } } diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 6a6a09e41..74875b6cb 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -5,20 +5,27 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::io::utils::check_namespace_key_validity; +use std::boxed::Box; +use std::collections::HashMap; +use std::future::Future; +#[cfg(test)] +use std::panic::RefUnwindSafe; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use bdk_chain::Merge; use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; +use lightning::impl_writeable_tlv_based_enum; use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::{ - KVStore, NETWORK_GRAPH_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, - NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, }; +use lightning::util::ser::{Readable, Writeable}; use prost::Message; use rand::RngCore; -#[cfg(test)] -use std::panic::RefUnwindSafe; -use std::sync::Arc; -use std::time::Duration; -use tokio::runtime::Runtime; use vss_client::client::VssClient; use vss_client::error::VssError; use vss_client::headers::VssHeaderProvider; @@ -33,6 +40,8 @@ use vss_client::util::retry::{ }; use vss_client::util::storable_builder::{EntropySource, StorableBuilder}; +use crate::io::utils::check_namespace_key_validity; + type CustomRetryPolicy = FilteredRetryPolicy< JitteredRetryPolicy< MaxTotalDelayRetryPolicy>>, @@ -40,60 +49,494 @@ type CustomRetryPolicy = FilteredRetryPolicy< Box bool + 'static + Send + Sync>, >; -/// A [`KVStore`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. +#[derive(Debug, PartialEq)] +enum VssSchemaVersion { + // The initial schema version. + // This used an empty `aad` and unobfuscated `primary_namespace`/`secondary_namespace`s in the + // stored key. + V0, + // The second deployed schema version. + // Here we started to obfuscate the primary and secondary namespaces and the obfuscated `store_key` (`obfuscate(primary_namespace#secondary_namespace)#obfuscate(key)`) is now used as `aad` for encryption, ensuring that the encrypted blobs commit to the key they're stored under. + V1, +} + +impl_writeable_tlv_based_enum!(VssSchemaVersion, + (0, V0) => {}, + (1, V1) => {}, +); + +const VSS_SCHEMA_VERSION_KEY: &str = "vss_schema_version"; + +// We set this to a small number of threads that would still allow to make some progress if one +// would hit a blocking case +const INTERNAL_RUNTIME_WORKERS: usize = 2; + +/// A [`KVStoreSync`] implementation that writes to and reads from a [VSS](https://github.com/lightningdevkit/vss-server/blob/main/README.md) backend. pub struct VssStore { - client: VssClient, - store_id: String, - runtime: Runtime, - storable_builder: StorableBuilder, - key_obfuscator: KeyObfuscator, - secondary_kv_store: Arc, + inner: Arc, + // Version counter to ensure that writes are applied in the correct order. It is assumed that read and list + // operations aren't sensitive to the order of execution. + next_version: AtomicU64, + // A VSS-internal runtime we use to avoid any deadlocks we could hit when waiting on a spawned + // blocking task to finish while the blocked thread had acquired the reactor. In particular, + // this works around a previously-hit case where a concurrent call to + // `PeerManager::process_pending_events` -> `ChannelManager::get_and_clear_pending_msg_events` + // would deadlock when trying to acquire sync `Mutex` locks that are held by the thread + // currently being blocked waiting on the VSS operation to finish. + internal_runtime: Option, + // Alby: secondary kv store for saving the network graph as it's large and shouldn't be saved to VSS + // NOTE: for Alby Cloud we use a transient network graph (saved in memory and rebuilt on startup) + secondary_kv_store: Arc, } impl VssStore { pub(crate) fn new( base_url: String, store_id: String, vss_seed: [u8; 32], header_provider: Arc, - secondary_kv_store: Arc, + secondary_kv_store: Arc, ) -> io::Result { - let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; + let next_version = AtomicU64::new(1); + let internal_runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name_fn(|| { + static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); + format!("ldk-node-vss-runtime-{}", id) + }) + .worker_threads(INTERNAL_RUNTIME_WORKERS) + .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) + .build() + .unwrap(); + let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); - let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) - //.with_max_attempts(10) - .with_max_attempts(15) // Alby: account for unexpected networking errors - //.with_max_total_delay(Duration::from_secs(15)) - .with_max_total_delay(Duration::from_secs(180)) // Alby: account for unexpected networking errors - .with_max_jitter(Duration::from_millis(10)) - .skip_retry_on_error(Box::new(|e: &VssError| { - matches!( - e, - VssError::NoSuchKeyError(..) - | VssError::InvalidRequestError(..) - | VssError::ConflictError(..) + + let sync_retry_policy = retry_policy(); + let blocking_client = VssClient::new_with_headers( + base_url.clone(), + sync_retry_policy, + header_provider.clone(), + ); + + let runtime_handle = internal_runtime.handle(); + let schema_version = tokio::task::block_in_place(|| { + runtime_handle.block_on(async { + determine_and_write_schema_version( + &blocking_client, + &store_id, + data_encryption_key, + &key_obfuscator, ) - }) as _); + .await + }) + })?; + + let async_retry_policy = retry_policy(); + let async_client = + VssClient::new_with_headers(base_url, async_retry_policy, header_provider); + + let inner = Arc::new(VssStoreInner::new( + schema_version, + blocking_client, + async_client, + store_id, + data_encryption_key, + key_obfuscator, + )); - let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); - Ok(Self { client, store_id, runtime, storable_builder, key_obfuscator, secondary_kv_store }) + Ok(Self { + inner, + next_version, + internal_runtime: Some(internal_runtime), + secondary_kv_store, + }) } - fn build_key( + // Same logic as for the obfuscated keys below, but just for locking, using the plaintext keys + fn build_locking_key( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> io::Result { - let obfuscated_key = self.key_obfuscator.obfuscate(key); + ) -> String { if primary_namespace.is_empty() { - Ok(obfuscated_key) + key.to_owned() } else { - Ok(format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key)) + format!("{}#{}#{}", primary_namespace, secondary_namespace, key) + } + } + + fn get_new_version_and_lock_ref( + &self, locking_key: String, + ) -> (Arc>, u64) { + let version = self.next_version.fetch_add(1, Ordering::Relaxed); + if version == u64::MAX { + panic!("VssStore version counter overflowed"); + } + + // Get a reference to the inner lock. We do this early so that the arc can double as an in-flight counter for + // cleaning up unused locks. + let inner_lock_ref = self.inner.get_inner_lock_ref(locking_key); + + (inner_lock_ref, version) + } +} + +impl KVStoreSync for VssStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + // Alby: read network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return lightning::util::persist::KVStoreSync::read( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + ); + } + + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = async move { + inner + .read_internal(&inner.blocking_client, primary_namespace, secondary_namespace, key) + .await + }; + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + // Alby: write network graph to secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return lightning::util::persist::KVStoreSync::write( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + buf, + ); + } + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let fut = async move { + inner + .write_internal( + &inner.blocking_client, + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + }; + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + // Alby: remove network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return lightning::util::persist::KVStoreSync::remove( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); + } + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let locking_key = self.build_locking_key(&primary_namespace, &secondary_namespace, &key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let fut = async move { + inner + .remove_internal( + &inner.blocking_client, + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + }; + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + // Alby: we use a secondary store for the network graph and currently don't support merging results + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE { + panic!("Alby: cannot list from NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE"); + } + if primary_namespace == "" { + panic!("Alby: cannot list from empty primary namespace"); + } + + let internal_runtime = self.internal_runtime.as_ref().ok_or_else(|| { + debug_assert!(false, "Failed to access internal runtime"); + let msg = format!("Failed to access internal runtime"); + Error::new(ErrorKind::Other, msg) + })?; + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = async move { + inner + .list_internal(&inner.blocking_client, primary_namespace, secondary_namespace) + .await + }; + tokio::task::block_in_place(move || internal_runtime.block_on(fut)) + } +} + +impl KVStore for VssStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + // Alby: read network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return lightning::util::persist::KVStore::read( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + ); + } + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner + .read_internal(&inner.async_client, primary_namespace, secondary_namespace, key) + .await + }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + // Alby: write network graph to secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return lightning::util::persist::KVStore::write( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + buf, + ); + } + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner + .write_internal( + &inner.async_client, + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + buf, + ) + .await + }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + // Alby: remove network graph from secondary storage + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE + && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE + && key == NETWORK_GRAPH_PERSISTENCE_KEY + { + return lightning::util::persist::KVStore::remove( + &*self.secondary_kv_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); + } + let locking_key = self.build_locking_key(primary_namespace, secondary_namespace, key); + let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(locking_key.clone()); + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner + .remove_internal( + &inner.async_client, + inner_lock_ref, + locking_key, + version, + primary_namespace, + secondary_namespace, + key, + lazy, + ) + .await + }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + // Alby: we use a secondary store for the network graph and currently don't support merging results + if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE { + panic!("Alby: cannot list from NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE"); + } + if primary_namespace == "" { + panic!("Alby: cannot list from empty primary namespace"); + } + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + Box::pin(async move { + inner.list_internal(&inner.async_client, primary_namespace, secondary_namespace).await + }) + } +} + +impl Drop for VssStore { + fn drop(&mut self) { + let internal_runtime = self.internal_runtime.take(); + tokio::task::block_in_place(move || drop(internal_runtime)); + } +} + +struct VssStoreInner { + schema_version: VssSchemaVersion, + blocking_client: VssClient, + // A secondary client that will only be used for async persistence via `KVStore`, to ensure TCP + // connections aren't shared between our outer and the internal runtime. + async_client: VssClient, + store_id: String, + data_encryption_key: [u8; 32], + key_obfuscator: KeyObfuscator, + // Per-key locks that ensures that we don't have concurrent writes to the same namespace/key. + // The lock also encapsulates the latest written version per key. + locks: Mutex>>>, + pending_lazy_deletes: Mutex>, +} + +impl VssStoreInner { + pub(crate) fn new( + schema_version: VssSchemaVersion, blocking_client: VssClient, + async_client: VssClient, store_id: String, + data_encryption_key: [u8; 32], key_obfuscator: KeyObfuscator, + ) -> Self { + let locks = Mutex::new(HashMap::new()); + let pending_lazy_deletes = Mutex::new(Vec::new()); + Self { + schema_version, + blocking_client, + async_client, + store_id, + data_encryption_key, + key_obfuscator, + locks, + pending_lazy_deletes, + } + } + + fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { + let mut outer_lock = self.locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(locking_key).or_default()) + } + + fn build_obfuscated_key( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> String { + if self.schema_version == VssSchemaVersion::V1 { + let obfuscated_prefix = + self.build_obfuscated_prefix(primary_namespace, secondary_namespace); + let obfuscated_key = self.key_obfuscator.obfuscate(key); + format!("{}#{}", obfuscated_prefix, obfuscated_key) + } else { + // Default to V0 schema + let obfuscated_key = self.key_obfuscator.obfuscate(key); + if primary_namespace.is_empty() { + obfuscated_key + } else { + format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key) + } + } + } + + fn build_obfuscated_prefix( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> String { + if self.schema_version == VssSchemaVersion::V1 { + let prefix = format!("{}#{}", primary_namespace, secondary_namespace); + self.key_obfuscator.obfuscate(&prefix) + } else { + // Default to V0 schema + format!("{}#{}", primary_namespace, secondary_namespace) } } fn extract_key(&self, unified_key: &str) -> io::Result { - let mut parts = unified_key.splitn(3, '#'); - let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); + let mut parts = if self.schema_version == VssSchemaVersion::V1 { + let mut parts = unified_key.splitn(2, '#'); + let _obfuscated_namespace = parts.next(); + parts + } else { + // Default to V0 schema + let mut parts = unified_key.splitn(3, '#'); + let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); + parts + }; match parts.next() { Some(obfuscated_key) => { let actual_key = self.key_obfuscator.deobfuscate(obfuscated_key)?; @@ -104,11 +547,12 @@ impl VssStore { } async fn list_all_keys( - &self, primary_namespace: &str, secondary_namespace: &str, + &self, client: &VssClient, primary_namespace: &str, + secondary_namespace: &str, ) -> io::Result> { let mut page_token = None; let mut keys = vec![]; - let key_prefix = format!("{}#{}", primary_namespace, secondary_namespace); + let key_prefix = self.build_obfuscated_prefix(primary_namespace, secondary_namespace); while page_token != Some("".to_string()) { let request = ListKeyVersionsRequest { store_id: self.store_id.clone(), @@ -117,7 +561,7 @@ impl VssStore { page_size: None, }; - let response = self.client.list_key_versions(&request).await.map_err(|e| { + let response = client.list_key_versions(&request).await.map_err(|e| { let msg = format!( "Failed to list keys in {}/{}: {}", primary_namespace, secondary_namespace, e @@ -132,38 +576,26 @@ impl VssStore { } Ok(keys) } -} -impl KVStore for VssStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + async fn read_internal( + &self, client: &VssClient, primary_namespace: String, + secondary_namespace: String, key: String, ) -> io::Result> { - // Alby: write network graph to secondary storage - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { - return self.secondary_kv_store.read(primary_namespace, secondary_namespace, key); - } + check_namespace_key_validity(&primary_namespace, &secondary_namespace, Some(&key), "read")?; - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; - let request = GetObjectRequest { - store_id: self.store_id.clone(), - key: self.build_key(primary_namespace, secondary_namespace, key)?, - }; + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let request = GetObjectRequest { store_id: self.store_id.clone(), key: store_key.clone() }; + let resp = client.get_object(&request).await.map_err(|e| { + let msg = format!( + "Failed to read from key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + match e { + VssError::NoSuchKeyError(..) => Error::new(ErrorKind::NotFound, msg), + _ => Error::new(ErrorKind::Other, msg), + } + })?; - let resp = - tokio::task::block_in_place(|| self.runtime.block_on(self.client.get_object(&request))) - .map_err(|e| { - let msg = format!( - "Failed to read from key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - match e { - VssError::NoSuchKeyError(..) => Error::new(ErrorKind::NotFound, msg), - _ => Error::new(ErrorKind::Other, msg), - } - })?; // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { @@ -174,36 +606,56 @@ impl KVStore for VssStore { Error::new(ErrorKind::Other, msg) })?; - Ok(self.storable_builder.deconstruct(storable)?.0) + let storable_builder = StorableBuilder::new(RandEntropySource); + let aad = + if self.schema_version == VssSchemaVersion::V1 { store_key.as_bytes() } else { &[] }; + let decrypted = storable_builder.deconstruct(storable, &self.data_encryption_key, aad)?.0; + Ok(decrypted) } - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + async fn write_internal( + &self, client: &VssClient, inner_lock_ref: Arc>, + locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, + key: String, buf: Vec, ) -> io::Result<()> { - // Alby: write network graph to secondary storage - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { - return self.secondary_kv_store.write(primary_namespace, secondary_namespace, key, buf); - } + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "write", + )?; - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; - let version = -1; - let storable = self.storable_builder.build(buf.to_vec(), version); + let delete_items = self + .pending_lazy_deletes + .try_lock() + .ok() + .and_then(|mut guard| guard.take()) + .unwrap_or_default(); + let store_key = self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + let vss_version = -1; + let storable_builder = StorableBuilder::new(RandEntropySource); + let aad = + if self.schema_version == VssSchemaVersion::V1 { store_key.as_bytes() } else { &[] }; + let storable = + storable_builder.build(buf.to_vec(), vss_version, &self.data_encryption_key, aad); let request = PutObjectRequest { store_id: self.store_id.clone(), global_version: None, transaction_items: vec![KeyValue { - key: self.build_key(primary_namespace, secondary_namespace, key)?, - version, + key: store_key, + version: vss_version, value: storable.encode_to_vec(), }], - delete_items: vec![], + delete_items: delete_items.clone(), }; - tokio::task::block_in_place(|| self.runtime.block_on(self.client.put_object(&request))) - .map_err(|e| { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + client.put_object(&request).await.map_err(|e| { + // Restore delete items so they'll be retried on next write. + if !delete_items.is_empty() { + self.pending_lazy_deletes.lock().unwrap().extend(delete_items); + } + let msg = format!( "Failed to write to key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e @@ -211,66 +663,112 @@ impl KVStore for VssStore { Error::new(ErrorKind::Other, msg) })?; - Ok(()) + Ok(()) + }) + .await } - fn remove( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, + async fn remove_internal( + &self, client: &VssClient, inner_lock_ref: Arc>, + locking_key: String, version: u64, primary_namespace: String, secondary_namespace: String, + key: String, lazy: bool, ) -> io::Result<()> { - // Alby: write network graph to secondary storage - if primary_namespace == NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE - && secondary_namespace == NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE - && key == NETWORK_GRAPH_PERSISTENCE_KEY - { - return self.secondary_kv_store.remove( - primary_namespace, - secondary_namespace, - key, - _lazy, - ); + check_namespace_key_validity( + &primary_namespace, + &secondary_namespace, + Some(&key), + "remove", + )?; + + let obfuscated_key = + self.build_obfuscated_key(&primary_namespace, &secondary_namespace, &key); + + let key_value = KeyValue { key: obfuscated_key, version: -1, value: vec![] }; + if lazy { + let mut pending_lazy_deletes = self.pending_lazy_deletes.lock().unwrap(); + pending_lazy_deletes.push(key_value); + return Ok(()); } - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; - let request = DeleteObjectRequest { - store_id: self.store_id.clone(), - key_value: Some(KeyValue { - key: self.build_key(primary_namespace, secondary_namespace, key)?, - version: -1, - value: vec![], - }), - }; - tokio::task::block_in_place(|| self.runtime.block_on(self.client.delete_object(&request))) - .map_err(|e| { + self.execute_locked_write(inner_lock_ref, locking_key, version, async move || { + let request = + DeleteObjectRequest { store_id: self.store_id.clone(), key_value: Some(key_value) }; + + client.delete_object(&request).await.map_err(|e| { let msg = format!( "Failed to delete key {}/{}/{}: {}", primary_namespace, secondary_namespace, key, e ); Error::new(ErrorKind::Other, msg) })?; - Ok(()) + + Ok(()) + }) + .await } - fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { - check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; + async fn list_internal( + &self, client: &VssClient, primary_namespace: String, + secondary_namespace: String, + ) -> io::Result> { + check_namespace_key_validity(&primary_namespace, &secondary_namespace, None, "list")?; - let keys = tokio::task::block_in_place(|| { - self.runtime.block_on(self.list_all_keys(primary_namespace, secondary_namespace)) - }) - .map_err(|e| { - let msg = format!( - "Failed to retrieve keys in namespace: {}/{} : {}", - primary_namespace, secondary_namespace, e - ); - Error::new(ErrorKind::Other, msg) - })?; + let keys = self + .list_all_keys(client, &primary_namespace, &secondary_namespace) + .await + .map_err(|e| { + let msg = format!( + "Failed to retrieve keys in namespace: {}/{} : {}", + primary_namespace, secondary_namespace, e + ); + Error::new(ErrorKind::Other, msg) + })?; + + Ok(keys) + } - // Alby: also list keys from secondary storage - let secondary_keys = - self.secondary_kv_store.list(primary_namespace, secondary_namespace)?; + async fn execute_locked_write< + F: Future>, + FN: FnOnce() -> F, + >( + &self, inner_lock_ref: Arc>, locking_key: String, version: u64, + callback: FN, + ) -> Result<(), lightning::io::Error> { + let res = { + let mut last_written_version = inner_lock_ref.lock().await; + + // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual + // consistency. + let is_stale_version = version <= *last_written_version; + + // If the version is not stale, we execute the callback. Otherwise we can and must skip writing. + if is_stale_version { + Ok(()) + } else { + callback().await.map(|_| { + *last_written_version = version; + }) + } + }; - let all_keys: Vec = - keys.iter().cloned().chain(secondary_keys.iter().cloned()).collect(); - Ok(all_keys) + self.clean_locks(&inner_lock_ref, locking_key); + + res + } + + fn clean_locks(&self, inner_lock_ref: &Arc>, locking_key: String) { + // If there no arcs in use elsewhere, this means that there are no in-flight writes. We can remove the map entry + // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in + // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already + // counted. + let mut outer_lock = self.locks.lock().unwrap(); + + let strong_count = Arc::strong_count(&inner_lock_ref); + debug_assert!(strong_count >= 2, "Unexpected VssStore strong count"); + + if strong_count == 2 { + outer_lock.remove(&locking_key); + } } } @@ -287,12 +785,132 @@ fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32] (k1, k2) } +fn retry_policy() -> CustomRetryPolicy { + ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(100) + .with_max_total_delay(Duration::from_secs(180)) + .with_max_jitter(Duration::from_millis(100)) + .skip_retry_on_error(Box::new(|e: &VssError| { + matches!( + e, + VssError::NoSuchKeyError(..) + | VssError::InvalidRequestError(..) + | VssError::ConflictError(..) + ) + }) as _) +} + +async fn determine_and_write_schema_version( + client: &VssClient, store_id: &String, data_encryption_key: [u8; 32], + key_obfuscator: &KeyObfuscator, +) -> io::Result { + // Build the obfuscated `vss_schema_version` key. + let obfuscated_prefix = key_obfuscator.obfuscate(&format! {"{}#{}", "", ""}); + let obfuscated_key = key_obfuscator.obfuscate(VSS_SCHEMA_VERSION_KEY); + let store_key = format!("{}#{}", obfuscated_prefix, obfuscated_key); + + // Try to read the stored schema version. + let request = GetObjectRequest { store_id: store_id.clone(), key: store_key.clone() }; + let resp = match client.get_object(&request).await { + Ok(resp) => Some(resp), + Err(VssError::NoSuchKeyError(..)) => { + // The value is not set. + None + }, + Err(e) => { + let msg = format!("Failed to read schema version: {}", e); + return Err(Error::new(ErrorKind::Other, msg)); + }, + }; + + if let Some(resp) = resp { + // The schema version was present, so just decrypt the stored data. + + // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise + // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] + let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + let storable_builder = StorableBuilder::new(RandEntropySource); + // Schema version was added starting with V1, so if set at all, we use the key as `aad` + let aad = store_key.as_bytes(); + let decrypted = storable_builder + .deconstruct(storable, &data_encryption_key, aad) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })? + .0; + + let schema_version: VssSchemaVersion = Readable::read(&mut io::Cursor::new(decrypted)) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + Ok(schema_version) + } else { + // The schema version wasn't present, this either means we're running for the first time *or* it's V0 pre-migration (predating writing of the schema version). + + // Check if any `bdk_wallet` data was written by listing keys under the respective + // (unobfuscated) prefix. + const V0_BDK_WALLET_PREFIX: &str = "bdk_wallet#"; + let request = ListKeyVersionsRequest { + store_id: store_id.clone(), + key_prefix: Some(V0_BDK_WALLET_PREFIX.to_string()), + page_token: None, + page_size: None, + }; + + let response = client.list_key_versions(&request).await.map_err(|e| { + let msg = format!("Failed to determine schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + let wallet_data_present = !response.key_versions.is_empty(); + if wallet_data_present { + // If the wallet data is present, it means we're not running for the first time. + Ok(VssSchemaVersion::V0) + } else { + // We're running for the first time, write the schema version to save unnecessary IOps + // on future startup. + let schema_version = VssSchemaVersion::V1; + let encoded_version = schema_version.encode(); + + let storable_builder = StorableBuilder::new(RandEntropySource); + let vss_version = -1; + let aad = store_key.as_bytes(); + let storable = + storable_builder.build(encoded_version, vss_version, &data_encryption_key, aad); + + let request = PutObjectRequest { + store_id: store_id.clone(), + global_version: None, + transaction_items: vec![KeyValue { + key: store_key, + version: vss_version, + value: storable.encode_to_vec(), + }], + delete_items: vec![], + }; + + client.put_object(&request).await.map_err(|e| { + let msg = format!("Failed to write schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; + + Ok(schema_version) + } + } +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; impl EntropySource for RandEntropySource { fn fill_bytes(&self, buffer: &mut [u8]) { - rand::thread_rng().fill_bytes(buffer); + rand::rng().fill_bytes(buffer); } } @@ -302,17 +920,32 @@ impl RefUnwindSafe for VssStore {} #[cfg(test)] #[cfg(vss_test)] mod tests { - use super::*; - use crate::io::test_utils::do_read_write_remove_list_persist; - use rand::distributions::Alphanumeric; - use rand::{thread_rng, Rng, RngCore}; use std::collections::HashMap; + + use rand::distr::Alphanumeric; + use rand::{rng, Rng, RngCore}; use vss_client::headers::FixedHeaders; + use super::*; + use crate::io::test_utils::do_read_write_remove_list_persist; + #[test] - fn read_write_remove_list_persist() { + fn vss_read_write_remove_list_persist() { + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + let mut rng = rng(); + let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); + do_read_write_remove_list_persist(&vss_store); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn vss_read_write_remove_list_persist_in_runtime_context() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let mut vss_seed = [0u8; 32]; rng.fill_bytes(&mut vss_seed); @@ -321,5 +954,87 @@ mod tests { VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); + drop(vss_store) + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn vss_lazy_delete() { + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + let mut rng = rng(); + let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); + + let primary_namespace = "test_namespace"; + let secondary_namespace = ""; + let key_to_delete = "key_to_delete"; + let key_for_trigger = "key_for_trigger"; + let data_to_delete = b"data_to_delete".to_vec(); + let trigger_data = b"trigger_data".to_vec(); + + // Write the key that we'll later lazily delete + KVStore::write( + &vss_store, + primary_namespace, + secondary_namespace, + key_to_delete, + data_to_delete.clone(), + ) + .await + .unwrap(); + + // Verify the key exists + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) + .await + .unwrap(); + assert_eq!(read_data, data_to_delete); + + // Perform a lazy delete + KVStore::remove(&vss_store, primary_namespace, secondary_namespace, key_to_delete, true) + .await + .unwrap(); + + // Verify the key still exists (lazy delete doesn't immediately remove it) + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete) + .await + .unwrap(); + assert_eq!(read_data, data_to_delete); + + // Verify the key is still in the list + let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); + assert!(keys.contains(&key_to_delete.to_string())); + + // Trigger the actual deletion by performing a write operation + KVStore::write( + &vss_store, + primary_namespace, + secondary_namespace, + key_for_trigger, + trigger_data.clone(), + ) + .await + .unwrap(); + + // Now verify the key is actually deleted + let read_result = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_to_delete).await; + assert!(read_result.is_err()); + assert_eq!(read_result.unwrap_err().kind(), ErrorKind::NotFound); + + // Verify the key is no longer in the list + let keys = KVStore::list(&vss_store, primary_namespace, secondary_namespace).await.unwrap(); + assert!(!keys.contains(&key_to_delete.to_string())); + + // Verify the trigger key still exists + let read_data = + KVStore::read(&vss_store, primary_namespace, secondary_namespace, key_for_trigger) + .await + .unwrap(); + assert_eq!(read_data, trigger_data); } } diff --git a/src/lib.rs b/src/lib.rs index e7de781e4..aa9efbd9c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,18 +25,21 @@ //! ```no_run //! # #[cfg(not(feature = "uniffi"))] //! # { -//! use ldk_node::Builder; -//! use ldk_node::lightning_invoice::Bolt11Invoice; -//! use ldk_node::lightning::ln::msgs::SocketAddress; -//! use ldk_node::bitcoin::Network; -//! use ldk_node::bitcoin::secp256k1::PublicKey; //! use std::str::FromStr; //! +//! use ldk_node::bitcoin::secp256k1::PublicKey; +//! use ldk_node::bitcoin::Network; +//! use ldk_node::lightning::ln::msgs::SocketAddress; +//! use ldk_node::lightning_invoice::Bolt11Invoice; +//! use ldk_node::Builder; +//! //! fn main() { //! let mut builder = Builder::new(); //! builder.set_network(Network::Testnet); //! builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); -//! builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); +//! builder.set_gossip_source_rgs( +//! "https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string(), +//! ); //! //! let node = builder.build().unwrap(); //! @@ -67,7 +70,6 @@ //! [`stop`]: Node::stop //! [`open_channel`]: Node::open_channel //! [`send`]: Bolt11Payment::send -//! #![cfg_attr(not(feature = "uniffi"), deny(missing_docs))] #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] @@ -84,6 +86,7 @@ mod data_store; mod error; mod event; mod fee_estimator; +mod ffi; mod gossip; pub mod graph; mod hex_utils; @@ -93,85 +96,83 @@ pub mod logger; mod message_handler; pub mod payment; mod peer_store; -mod sweep; +mod runtime; +mod scoring; mod tx_broadcaster; mod types; -#[cfg(feature = "uniffi")] -mod uniffi_types; mod wallet; -pub use bip39; -pub use bitcoin; -pub use lightning; -pub use lightning_invoice; -pub use lightning_liquidity; -pub use lightning_types; use std::collections::HashMap; -pub use vss_client; +use std::default::Default; +use std::net::ToSocketAddrs; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; -pub use error::Error as NodeError; -use error::Error; - -pub use event::Event; - -pub use io::utils::generate_entropy_mnemonic; - -#[cfg(feature = "uniffi")] -use uniffi_types::*; - +use bitcoin::secp256k1::PublicKey; +use bitcoin::{Address, Amount}; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; #[cfg(not(feature = "uniffi"))] pub use builder::NodeBuilder as Builder; - use chain::ChainSource; use config::{ - default_user_config, may_announce_channel, ChannelConfig, Config, - BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, + default_user_config, may_announce_channel, AsyncPaymentsRole, ChannelConfig, Config, NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; +pub use error::Error as NodeError; +use error::Error; +pub use event::Event; use event::{EventHandler, EventQueue}; +use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; +#[cfg(feature = "uniffi")] +use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; +pub use io::utils::generate_entropy_mnemonic; use io::utils::write_node_metrics; +use lightning::chain::BestBlock; +use lightning::events::bump_transaction::{Input, Wallet as LdkWallet}; +use lightning::impl_writeable_tlv_based; +use lightning::ln::chan_utils::{make_funding_redeemscript, FUNDING_TRANSACTION_WITNESS_WEIGHT}; +use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; +use lightning::ln::channelmanager::PaymentId; +use lightning::ln::funding::SpliceContribution; +use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; +use lightning::util::persist::KVStoreSync; +use lightning_background_processor::process_events_async; use liquidity::{LSPS1Liquidity, LiquiditySource}; +use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +use payment::asynchronous::om_mailbox::OnionMessageMailbox; +use payment::asynchronous::static_invoice_store::StaticInvoiceStore; use payment::{ Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, UnifiedQrPayment, }; use peer_store::{PeerInfo, PeerStore}; + +use rand::Rng; +use runtime::Runtime; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, - KeysManager, OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, Graph, KeysManager, + OnionMessenger, PaymentStore, PeerManager, Router, Scorer, Sweeper, Wallet, +}; +pub use types::{ + ChannelDetails, CustomTlvRecord, DynStore, PeerDetails, SyncAndAsyncKVStore, UserChannelId, + WordCount, +}; +pub use { + bip39, bitcoin, lightning, lightning_invoice, lightning_liquidity, lightning_types, tokio, + vss_client, }; -pub use types::{ChannelDetails, CustomTlvRecord, KeyValue, PeerDetails, TlvEntry, UserChannelId}; -#[cfg(feature = "uniffi")] -use types::{MigrateStorage, ResetState}; - -use logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; - -use lightning::chain::BestBlock; -use lightning::events::bump_transaction::Wallet as LdkWallet; -use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelShutdownState; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::msgs::SocketAddress; -use lightning::routing::gossip::NodeAlias; - -use lightning_background_processor::process_events_async; -use bitcoin::secp256k1::PublicKey; +use crate::scoring::setup_background_pathfinding_scores_sync; -use rand::Rng; - -use std::default::Default; -use std::net::ToSocketAddrs; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use crate::types::KeyValue; +pub use crate::types::{MigrateStorage, ResetState, TlvEntry}; #[cfg(feature = "uniffi")] uniffi::include_scaffolding!("ldk_node"); @@ -180,15 +181,14 @@ uniffi::include_scaffolding!("ldk_node"); /// /// Needs to be initialized and instantiated through [`Builder::build`]. pub struct Node { - runtime: Arc>>>, + runtime: Arc, stop_sender: tokio::sync::watch::Sender<()>, - background_processor_task: Mutex>>, - background_tasks: Mutex>>, - cancellable_background_tasks: Mutex>>, + background_processor_stop_sender: tokio::sync::watch::Sender<()>, config: Arc, wallet: Arc, chain_source: Arc, tx_broadcaster: Arc, + fee_estimator: Arc, event_queue: Arc>>, channel_manager: Arc, chain_monitor: Arc, @@ -199,6 +199,7 @@ pub struct Node { keys_manager: Arc, network_graph: Arc, gossip_source: Arc, + pathfinding_scores_sync_url: Option, liquidity_source: Option>>>, kv_store: Arc, logger: Arc, @@ -206,42 +207,28 @@ pub struct Node { scorer: Arc>, peer_store: Arc>>, payment_store: Arc, - is_listening: Arc, + is_running: Arc>, node_metrics: Arc>, + om_mailbox: Option>, + async_payments_role: Option, } impl Node { /// Starts the necessary background tasks, such as handling events coming from user input, /// LDK/BDK, and the peer-to-peer network. /// - /// After this returns, the [`Node`] instance can be controlled via the provided API methods in - /// a thread-safe manner. - pub fn start(&self) -> Result<(), Error> { - let runtime = - Arc::new(tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap()); - self.start_with_runtime(runtime) - } - - /// Starts the necessary background tasks (such as handling events coming from user input, - /// LDK/BDK, and the peer-to-peer network) on the the given `runtime`. - /// - /// This allows to have LDK Node reuse an outer pre-existing runtime, e.g., to avoid stacking Tokio - /// runtime contexts. + /// This will try to auto-detect an outer pre-existing runtime, e.g., to avoid stacking Tokio + /// runtime contexts. Note we require the outer runtime to be of the `multithreaded` flavor. /// /// After this returns, the [`Node`] instance can be controlled via the provided API methods in /// a thread-safe manner. - pub fn start_with_runtime(&self, runtime: Arc) -> Result<(), Error> { + pub fn start(&self) -> Result<(), Error> { // Acquire a run lock and hold it until we're setup. - let mut runtime_lock = self.runtime.write().unwrap(); - if runtime_lock.is_some() { - // We're already running. + let mut is_running_lock = self.is_running.write().unwrap(); + if *is_running_lock { return Err(Error::AlreadyRunning); } - let mut background_tasks = tokio::task::JoinSet::new(); - let mut cancellable_background_tasks = tokio::task::JoinSet::new(); - let runtime_handle = runtime.handle(); - log_info!( self.logger, "Starting up LDK Node with node ID {} on network: {}", @@ -250,37 +237,33 @@ impl Node { ); // Start up any runtime-dependant chain sources (e.g. Electrum) - self.chain_source.start(Arc::clone(&runtime)).map_err(|e| { + self.chain_source.start(Arc::clone(&self.runtime)).map_err(|e| { log_error!(self.logger, "Failed to start chain syncing: {}", e); e })?; // Block to ensure we update our fee rate cache once on startup let chain_source = Arc::clone(&self.chain_source); - let runtime_ref = &runtime; - tokio::task::block_in_place(move || { - runtime_ref.block_on(async move { chain_source.update_fee_rate_estimates().await }) - })?; + self.runtime.block_on(async move { chain_source.update_fee_rate_estimates().await })?; // Spawn background task continuously syncing onchain, lightning, and fee rate cache. let stop_sync_receiver = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); + let sync_wallet = Arc::clone(&self.wallet); let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - background_tasks.spawn_on( - async move { - chain_source - .continuously_sync_wallets( - stop_sync_receiver, - sync_cman, - sync_cmon, - sync_sweeper, - ) - .await; - }, - runtime_handle, - ); + self.runtime.spawn_background_task(async move { + chain_source + .continuously_sync_wallets( + stop_sync_receiver, + sync_wallet, + sync_cman, + sync_cmon, + sync_sweeper, + ) + .await; + }); if self.gossip_source.is_rgs() { let gossip_source = Arc::clone(&self.gossip_source); @@ -288,7 +271,7 @@ impl Node { let gossip_sync_logger = Arc::clone(&self.logger); let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); loop { tokio::select! { @@ -300,11 +283,10 @@ impl Node { return; } _ = interval.tick() => { - let gossip_sync_logger = Arc::clone(&gossip_sync_logger); let now = Instant::now(); match gossip_source.update_rgs_snapshot().await { Ok(updated_timestamp) => { - log_trace!( + log_info!( gossip_sync_logger, "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() @@ -329,15 +311,25 @@ impl Node { } } } - }, runtime_handle); + }); + } + + if let Some(pathfinding_scores_sync_url) = self.pathfinding_scores_sync_url.as_ref() { + setup_background_pathfinding_scores_sync( + pathfinding_scores_sync_url.clone(), + Arc::clone(&self.scorer), + Arc::clone(&self.node_metrics), + Arc::clone(&self.kv_store), + Arc::clone(&self.logger), + Arc::clone(&self.runtime), + self.stop_sender.subscribe(), + ); } if let Some(listening_addresses) = &self.config.listening_addresses { // Setup networking let peer_manager_connection_handler = Arc::clone(&self.peer_manager); - let mut stop_listen = self.stop_sender.subscribe(); let listening_logger = Arc::clone(&self.logger); - let listening_indicator = Arc::clone(&self.is_listening); let mut bind_addrs = Vec::with_capacity(listening_addresses.len()); @@ -355,45 +347,62 @@ impl Node { bind_addrs.extend(resolved_address); } - cancellable_background_tasks.spawn_on(async move { - { - let listener = - tokio::net::TcpListener::bind(&*bind_addrs).await - .unwrap_or_else(|e| { - log_error!(listening_logger, "Failed to bind to listen addresses/ports - is something else already listening on it?: {}", e); - panic!( - "Failed to bind to listen address/port - is something else already listening on it?", - ); - }); - - listening_indicator.store(true, Ordering::Release); + let logger = Arc::clone(&listening_logger); + let listeners = self.runtime.block_on(async move { + let mut listeners = Vec::new(); - loop { - let peer_mgr = Arc::clone(&peer_manager_connection_handler); - tokio::select! { - _ = stop_listen.changed() => { - log_debug!( - listening_logger, - "Stopping listening to inbound connections." + // Try to bind to all addresses + for addr in &*bind_addrs { + match tokio::net::TcpListener::bind(addr).await { + Ok(listener) => { + log_trace!(logger, "Listener bound to {}", addr); + listeners.push(listener); + }, + Err(e) => { + log_error!( + logger, + "Failed to bind to {}: {} - is something else already listening?", + addr, + e ); - break; - } - res = listener.accept() => { - let tcp_stream = res.unwrap().0; - tokio::spawn(async move { - lightning_net_tokio::setup_inbound( - Arc::clone(&peer_mgr), - tcp_stream.into_std().unwrap(), - ) - .await; - }); - } + return Err(Error::InvalidSocketAddress); + }, } } - } - listening_indicator.store(false, Ordering::Release); - }, runtime_handle); + Ok(listeners) + })?; + + for listener in listeners { + let logger = Arc::clone(&listening_logger); + let peer_mgr = Arc::clone(&peer_manager_connection_handler); + let mut stop_listen = self.stop_sender.subscribe(); + let runtime = Arc::clone(&self.runtime); + self.runtime.spawn_cancellable_background_task(async move { + loop { + tokio::select! { + _ = stop_listen.changed() => { + log_debug!( + logger, + "Stopping listening to inbound connections." + ); + break; + } + res = listener.accept() => { + let tcp_stream = res.unwrap().0; + let peer_mgr = Arc::clone(&peer_mgr); + runtime.spawn_cancellable_background_task(async move { + lightning_net_tokio::setup_inbound( + Arc::clone(&peer_mgr), + tcp_stream.into_std().unwrap(), + ) + .await; + }); + } + } + } + }); + } } // Regularly reconnect to persisted peers. @@ -402,7 +411,7 @@ impl Node { let connect_logger = Arc::clone(&self.logger); let connect_peer_store = Arc::clone(&self.peer_store); let mut stop_connect = self.stop_sender.subscribe(); - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(PEER_RECONNECTION_INTERVAL); interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); @@ -455,7 +464,7 @@ impl Node { // increase backoff randomly e.g. for the first 6 iterations: // 1, [2-3], [3-5], [4-7], [5-9], [6-11], [7-13] let mut new_peer_retry_backoff = peer_retry_backoff + 1; - new_peer_retry_backoff += rand::thread_rng().gen_range(0..new_peer_retry_backoff); + new_peer_retry_backoff += rand::rng().random_range(0..new_peer_retry_backoff); if new_peer_retry_backoff > 360 { new_peer_retry_backoff = 360 // 360 * 10 seconds = approx 1 hour maximum backoff } @@ -467,7 +476,7 @@ impl Node { } } } - }, runtime_handle); + }); // Regularly broadcast node announcements. let bcast_cm = Arc::clone(&self.channel_manager); @@ -479,7 +488,7 @@ impl Node { let mut stop_bcast = self.stop_sender.subscribe(); let node_alias = self.config.node_alias.clone(); if may_announce_channel(&self.config).is_ok() { - cancellable_background_tasks.spawn_on(async move { + self.runtime.spawn_cancellable_background_task(async move { // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. #[cfg(not(test))] let mut interval = tokio::time::interval(Duration::from_secs(30)); @@ -550,30 +559,13 @@ impl Node { } } } - }, runtime_handle); + }); } - let mut stop_tx_bcast = self.stop_sender.subscribe(); + let stop_tx_bcast = self.stop_sender.subscribe(); let chain_source = Arc::clone(&self.chain_source); - let tx_bcast_logger = Arc::clone(&self.logger); - runtime.spawn(async move { - // Every second we try to clear our broadcasting queue. - let mut interval = tokio::time::interval(Duration::from_secs(1)); - interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_tx_bcast.changed() => { - log_debug!( - tx_bcast_logger, - "Stopping broadcasting transactions.", - ); - return; - } - _ = interval.tick() => { - chain_source.process_broadcast_queue().await; - } - } - } + self.runtime.spawn_cancellable_background_task(async move { + chain_source.continuously_process_broadcast_queue(stop_tx_bcast).await }); let bump_tx_event_handler = Arc::new(BumpTransactionEventHandler::new( @@ -583,6 +575,13 @@ impl Node { Arc::clone(&self.logger), )); + let static_invoice_store = if let Some(AsyncPaymentsRole::Server) = self.async_payments_role + { + Some(StaticInvoiceStore::new(Arc::clone(&self.kv_store))) + } else { + None + }; + let event_handler = Arc::new(EventHandler::new( Arc::clone(&self.event_queue), Arc::clone(&self.wallet), @@ -594,6 +593,9 @@ impl Node { self.liquidity_source.clone(), Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), + static_invoice_store, + Arc::clone(&self.onion_messenger), + self.om_mailbox.clone(), Arc::clone(&self.runtime), Arc::clone(&self.logger), Arc::clone(&self.config), @@ -606,11 +608,14 @@ impl Node { let background_chan_man = Arc::clone(&self.channel_manager); let background_gossip_sync = self.gossip_source.as_gossip_sync(); let background_peer_man = Arc::clone(&self.peer_manager); + let background_liquidity_man_opt = + self.liquidity_source.as_ref().map(|ls| ls.liquidity_manager()); + let background_sweeper = Arc::clone(&self.output_sweeper); let background_onion_messenger = Arc::clone(&self.onion_messenger); let background_logger = Arc::clone(&self.logger); let background_error_logger = Arc::clone(&self.logger); let background_scorer = Arc::clone(&self.scorer); - let stop_bp = self.stop_sender.subscribe(); + let stop_bp = self.background_processor_stop_sender.subscribe(); let sleeper_logger = Arc::clone(&self.logger); let sleeper = move |d| { let mut stop = stop_bp.clone(); @@ -631,7 +636,7 @@ impl Node { }) }; - let handle = runtime.spawn(async move { + self.runtime.spawn_background_processor_task(async move { process_events_async( background_persister, |e| background_event_handler.handle_event(e), @@ -640,6 +645,8 @@ impl Node { Some(background_onion_messenger), background_gossip_sync, background_peer_man, + background_liquidity_man_opt, + Some(background_sweeper), background_logger, Some(background_scorer), sleeper, @@ -652,41 +659,29 @@ impl Node { panic!("Failed to process events"); }); }); - debug_assert!(self.background_processor_task.lock().unwrap().is_none()); - *self.background_processor_task.lock().unwrap() = Some(handle); if let Some(liquidity_source) = self.liquidity_source.as_ref() { let mut stop_liquidity_handler = self.stop_sender.subscribe(); let liquidity_handler = Arc::clone(&liquidity_source); let liquidity_logger = Arc::clone(&self.logger); - background_tasks.spawn_on( - async move { - loop { - tokio::select! { - _ = stop_liquidity_handler.changed() => { - log_debug!( - liquidity_logger, - "Stopping processing liquidity events.", - ); - return; - } - _ = liquidity_handler.handle_next_event() => {} + self.runtime.spawn_background_task(async move { + loop { + tokio::select! { + _ = stop_liquidity_handler.changed() => { + log_debug!( + liquidity_logger, + "Stopping processing liquidity events.", + ); + return; } + _ = liquidity_handler.handle_next_event() => {} } - }, - runtime_handle, - ); + } + }); } - *runtime_lock = Some(runtime); - - debug_assert!(self.background_tasks.lock().unwrap().is_none()); - *self.background_tasks.lock().unwrap() = Some(background_tasks); - - debug_assert!(self.cancellable_background_tasks.lock().unwrap().is_none()); - *self.cancellable_background_tasks.lock().unwrap() = Some(cancellable_background_tasks); - log_info!(self.logger, "Startup complete."); + *is_running_lock = true; Ok(()) } @@ -694,141 +689,71 @@ impl Node { /// /// After this returns most API methods will return [`Error::NotRunning`]. pub fn stop(&self) -> Result<(), Error> { - let runtime = self.runtime.write().unwrap().take().ok_or(Error::NotRunning)?; - #[cfg(tokio_unstable)] - let metrics_runtime = Arc::clone(&runtime); + let mut is_running_lock = self.is_running.write().unwrap(); + if !*is_running_lock { + return Err(Error::NotRunning); + } log_info!(self.logger, "Shutting down LDK Node with node ID {}...", self.node_id()); - // Stop any runtime-dependant chain sources. - self.chain_source.stop(); - - // Stop the runtime. - match self.stop_sender.send(()) { - Ok(_) => log_trace!(self.logger, "Sent shutdown signal to background tasks."), - Err(e) => { + // Stop background tasks. + self.stop_sender + .send(()) + .map(|_| { + log_trace!(self.logger, "Sent shutdown signal to background tasks."); + }) + .unwrap_or_else(|e| { log_error!( self.logger, "Failed to send shutdown signal. This should never happen: {}", e ); debug_assert!(false); - }, - } + }); // Cancel cancellable background tasks - if let Some(mut tasks) = self.cancellable_background_tasks.lock().unwrap().take() { - let runtime_2 = Arc::clone(&runtime); - tasks.abort_all(); - tokio::task::block_in_place(move || { - runtime_2.block_on(async { while let Some(_) = tasks.join_next().await {} }) - }); - } else { - debug_assert!(false, "Expected some cancellable background tasks"); - }; + self.runtime.abort_cancellable_background_tasks(); // Disconnect all peers. self.peer_manager.disconnect_all_peers(); log_debug!(self.logger, "Disconnected all network peers."); + // Wait until non-cancellable background tasks (mod LDK's background processor) are done. + self.runtime.wait_on_background_tasks(); + // Stop any runtime-dependant chain sources. self.chain_source.stop(); log_debug!(self.logger, "Stopped chain sources."); - // Wait until non-cancellable background tasks (mod LDK's background processor) are done. - let runtime_3 = Arc::clone(&runtime); - if let Some(mut tasks) = self.background_tasks.lock().unwrap().take() { - tokio::task::block_in_place(move || { - runtime_3.block_on(async { - loop { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS), - tasks.join_next_with_id(), - ); - match timeout_fut.await { - Ok(Some(Ok((id, _)))) => { - log_trace!(self.logger, "Stopped background task with id {}", id); - }, - Ok(Some(Err(e))) => { - tasks.abort_all(); - log_trace!(self.logger, "Stopping background task failed: {}", e); - break; - }, - Ok(None) => { - log_debug!(self.logger, "Stopped all background tasks"); - break; - }, - Err(e) => { - tasks.abort_all(); - log_error!( - self.logger, - "Stopping background task timed out: {}", - e - ); - break; - }, - } - } - }) - }); - } else { - debug_assert!(false, "Expected some background tasks"); - }; - - // Wait until background processing stopped, at least until a timeout is reached. - if let Some(background_processor_task) = - self.background_processor_task.lock().unwrap().take() - { - let abort_handle = background_processor_task.abort_handle(); - let timeout_res = tokio::task::block_in_place(move || { - runtime.block_on(async { - tokio::time::timeout( - Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), - background_processor_task, - ) - .await - }) + // Stop the background processor. + self.background_processor_stop_sender + .send(()) + .map(|_| { + log_trace!(self.logger, "Sent shutdown signal to background processor."); + }) + .unwrap_or_else(|e| { + log_error!( + self.logger, + "Failed to send shutdown signal. This should never happen: {}", + e + ); + debug_assert!(false); }); - match timeout_res { - Ok(stop_res) => match stop_res { - Ok(()) => log_debug!(self.logger, "Stopped background processing of events."), - Err(e) => { - abort_handle.abort(); - log_error!( - self.logger, - "Stopping event handling failed. This should never happen: {}", - e - ); - panic!("Stopping event handling failed. This should never happen."); - }, - }, - Err(e) => { - abort_handle.abort(); - log_error!(self.logger, "Stopping event handling timed out: {}", e); - }, - } - } else { - debug_assert!(false, "Expected a background processing task"); - }; + // Finally, wait until background processing stopped, at least until a timeout is reached. + self.runtime.wait_on_background_processor_task(); #[cfg(tokio_unstable)] - { - log_trace!( - self.logger, - "Active runtime tasks left prior to shutdown: {}", - metrics_runtime.metrics().active_tasks_count() - ); - } + self.runtime.log_metrics(); log_info!(self.logger, "Shutdown complete."); + *is_running_lock = false; Ok(()) } /// Returns the status of the [`Node`]. pub fn status(&self) -> NodeStatus { - let is_running = self.runtime.read().unwrap().is_some(); - let is_listening = self.is_listening.load(Ordering::Acquire); + let is_running = *self.is_running.read().unwrap(); let current_best_block = self.channel_manager.current_best_block().into(); let locked_node_metrics = self.node_metrics.read().unwrap(); let latest_lightning_wallet_sync_timestamp = @@ -839,6 +764,8 @@ impl Node { locked_node_metrics.latest_fee_rate_cache_update_timestamp; let latest_rgs_snapshot_timestamp = locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); + let latest_pathfinding_scores_sync_timestamp = + locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = locked_node_metrics.latest_node_announcement_broadcast_timestamp; let latest_channel_monitor_archival_height = @@ -846,12 +773,12 @@ impl Node { NodeStatus { is_running, - is_listening, current_best_block, latest_lightning_wallet_sync_timestamp, latest_onchain_wallet_sync_timestamp, latest_fee_rate_cache_update_timestamp, latest_rgs_snapshot_timestamp, + latest_pathfinding_scores_sync_timestamp, latest_node_announcement_broadcast_timestamp, latest_channel_monitor_archival_height, } @@ -895,14 +822,20 @@ impl Node { /// **Caution:** Users must handle events as quickly as possible to prevent a large event backlog, /// which can increase the memory footprint of [`Node`]. pub fn wait_next_event(&self) -> Event { - self.event_queue.wait_next_event() + let fut = self.event_queue.next_event_async(); + // We use our runtime for the sync variant to ensure `tokio::task::block_in_place` is + // always called if we'd ever hit this in an outer runtime context. + self.runtime.block_on(fut) } /// Confirm the last retrieved event handled. /// /// **Note:** This **MUST** be called after each event has been handled. pub fn event_handled(&self) -> Result<(), Error> { - self.event_queue.event_handled().map_err(|e| { + // We use our runtime for the sync variant to ensure `tokio::task::block_in_place` is + // always called if we'd ever hit this in an outer runtime context. + let fut = self.event_queue.event_handled(); + self.runtime.block_on(fut).map_err(|e| { log_error!( self.logger, "Couldn't mark event handled due to persistence failure: {}", @@ -948,6 +881,7 @@ impl Node { Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -965,6 +899,7 @@ impl Node { Arc::clone(&self.payment_store), Arc::clone(&self.peer_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -975,10 +910,12 @@ impl Node { #[cfg(not(feature = "uniffi"))] pub fn bolt12_payment(&self) -> Bolt12Payment { Bolt12Payment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), + self.async_payments_role, ) } @@ -988,10 +925,12 @@ impl Node { #[cfg(feature = "uniffi")] pub fn bolt12_payment(&self) -> Arc { Arc::new(Bolt12Payment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), + Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), + self.async_payments_role, )) } @@ -999,11 +938,11 @@ impl Node { #[cfg(not(feature = "uniffi"))] pub fn spontaneous_payment(&self) -> SpontaneousPayment { SpontaneousPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.keys_manager), Arc::clone(&self.payment_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -1012,11 +951,11 @@ impl Node { #[cfg(feature = "uniffi")] pub fn spontaneous_payment(&self) -> Arc { Arc::new(SpontaneousPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.keys_manager), Arc::clone(&self.payment_store), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -1025,10 +964,10 @@ impl Node { #[cfg(not(feature = "uniffi"))] pub fn onchain_payment(&self) -> OnchainPayment { OnchainPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.wallet), Arc::clone(&self.channel_manager), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), ) } @@ -1037,10 +976,10 @@ impl Node { #[cfg(feature = "uniffi")] pub fn onchain_payment(&self) -> Arc { Arc::new(OnchainPayment::new( - Arc::clone(&self.runtime), Arc::clone(&self.wallet), Arc::clone(&self.channel_manager), Arc::clone(&self.config), + Arc::clone(&self.is_running), Arc::clone(&self.logger), )) } @@ -1118,11 +1057,9 @@ impl Node { pub fn connect( &self, node_id: PublicKey, address: SocketAddress, persist: bool, ) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let runtime = rt_lock.as_ref().unwrap(); let peer_info = PeerInfo { node_id, address }; @@ -1132,10 +1069,8 @@ impl Node { // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; log_info!(self.logger, "Connected to peer {}@{}. ", peer_info.node_id, peer_info.address); @@ -1152,8 +1087,7 @@ impl Node { /// Will also remove the peer from the peer store, i.e., after this has been called we won't /// try to reconnect on restart. pub fn disconnect(&self, counterparty_node_id: PublicKey) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } @@ -1175,11 +1109,9 @@ impl Node { push_to_counterparty_msat: Option, channel_config: Option, announce_for_forwarding: bool, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let runtime = rt_lock.as_ref().unwrap(); let peer_info = PeerInfo { node_id, address }; @@ -1187,52 +1119,14 @@ impl Node { let con_addr = peer_info.address.clone(); let con_cm = Arc::clone(&self.connection_manager); - let cur_anchor_reserve_sats = - total_anchor_channels_reserve_sats(&self.channel_manager, &self.config); - let spendable_amount_sats = - self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0); - - // Fail early if we have less than the channel value available. - if spendable_amount_sats < channel_amount_sats { - log_error!(self.logger, - "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", - spendable_amount_sats, channel_amount_sats - ); - return Err(Error::InsufficientFunds); - } - // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; - // Fail if we have less than the channel value + anchor reserve available (if applicable). - let init_features = self - .peer_manager - .peer_by_node_id(&node_id) - .ok_or(Error::ConnectionFailed)? - .init_features; - let required_funds_sats = channel_amount_sats - + self.config.anchor_channels_config.as_ref().map_or(0, |c| { - if init_features.requires_anchors_zero_fee_htlc_tx() - && !c.trusted_peers_no_reserve.contains(&node_id) - { - c.per_channel_reserve_sats - } else { - 0 - } - }); - - if spendable_amount_sats < required_funds_sats { - log_error!(self.logger, - "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", - spendable_amount_sats, required_funds_sats - ); - return Err(Error::InsufficientFunds); - } + // Check funds availability after connection (includes anchor reserve calculation) + self.check_sufficient_funds_for_channel(channel_amount_sats, &node_id)?; let mut user_config = default_user_config(&self.config); user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding; @@ -1251,7 +1145,7 @@ impl Node { 100; let push_msat = push_to_counterparty_msat.unwrap_or(0); - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = rand::rng().random(); match self.channel_manager.create_channel( peer_info.node_id, @@ -1277,6 +1171,51 @@ impl Node { } } + fn check_sufficient_funds_for_channel( + &self, amount_sats: u64, peer_node_id: &PublicKey, + ) -> Result<(), Error> { + let cur_anchor_reserve_sats = + total_anchor_channels_reserve_sats(&self.channel_manager, &self.config); + let spendable_amount_sats = + self.wallet.get_spendable_amount_sats(cur_anchor_reserve_sats).unwrap_or(0); + + // Fail early if we have less than the channel value available. + if spendable_amount_sats < amount_sats { + log_error!(self.logger, + "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", + spendable_amount_sats, amount_sats + ); + return Err(Error::InsufficientFunds); + } + + // Fail if we have less than the channel value + anchor reserve available (if applicable). + let init_features = self + .peer_manager + .peer_by_node_id(peer_node_id) + .ok_or(Error::ConnectionFailed)? + .init_features; + let required_funds_sats = amount_sats + + self.config.anchor_channels_config.as_ref().map_or(0, |c| { + if init_features.requires_anchors_zero_fee_htlc_tx() + && !c.trusted_peers_no_reserve.contains(peer_node_id) + { + c.per_channel_reserve_sats + } else { + 0 + } + }); + + if spendable_amount_sats < required_funds_sats { + log_error!(self.logger, + "Unable to create channel due to insufficient funds. Available: {}sats, Required: {}sats", + spendable_amount_sats, required_funds_sats + ); + return Err(Error::InsufficientFunds); + } + + Ok(()) + } + /// Connect to a node and open a new unannounced channel. /// /// To open an announced channel, see [`Node::open_announced_channel`]. @@ -1348,31 +1287,196 @@ impl Node { ) } + /// Add funds from the on-chain wallet into an existing channel. + /// + /// This provides for increasing a channel's outbound liquidity without re-balancing or closing + /// it. Once negotiation with the counterparty is complete, the channel remains operational + /// while waiting for a new funding transaction to confirm. + /// + /// # Experimental API + /// + /// This API is experimental. Currently, a splice-in will be marked as an outbound payment, but + /// this classification may change in the future. + pub fn splice_in( + &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, + splice_amount_sats: u64, + ) -> Result<(), Error> { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if let Some(channel_details) = + open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) + { + self.check_sufficient_funds_for_channel(splice_amount_sats, &counterparty_node_id)?; + + const EMPTY_SCRIPT_SIG_WEIGHT: u64 = + 1 /* empty script_sig */ * bitcoin::constants::WITNESS_SCALE_FACTOR as u64; + + // Used for creating a redeem script for the previous funding txo and the new funding + // txo. Only needed when selecting which UTXOs to include in the funding tx that would + // be sufficient to pay for fees. Hence, the value does not matter. + let dummy_pubkey = PublicKey::from_slice(&[2; 33]).unwrap(); + + let funding_txo = channel_details.funding_txo.ok_or_else(|| { + log_error!(self.logger, "Failed to splice channel: channel not yet ready",); + Error::ChannelSplicingFailed + })?; + + let shared_input = Input { + outpoint: funding_txo.into_bitcoin_outpoint(), + previous_utxo: bitcoin::TxOut { + value: Amount::from_sat(channel_details.channel_value_satoshis), + script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey) + .to_p2wsh(), + }, + satisfaction_weight: EMPTY_SCRIPT_SIG_WEIGHT + FUNDING_TRANSACTION_WITNESS_WEIGHT, + }; + + let shared_output = bitcoin::TxOut { + value: shared_input.previous_utxo.value + Amount::from_sat(splice_amount_sats), + script_pubkey: make_funding_redeemscript(&dummy_pubkey, &dummy_pubkey).to_p2wsh(), + }; + + let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); + + let inputs = self + .wallet + .select_confirmed_utxos(vec![shared_input], &[shared_output], fee_rate) + .map_err(|()| { + log_error!( + self.logger, + "Failed to splice channel: insufficient confirmed UTXOs", + ); + Error::ChannelSplicingFailed + })?; + + let change_address = self.wallet.get_new_internal_address()?; + + let contribution = SpliceContribution::SpliceIn { + value: Amount::from_sat(splice_amount_sats), + inputs, + change_script: Some(change_address.script_pubkey()), + }; + + let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { + Ok(fee_rate) => fee_rate, + Err(_) => { + debug_assert!(false); + fee_estimator::get_fallback_rate_for_target(ConfirmationTarget::ChannelFunding) + }, + }; + + self.channel_manager + .splice_channel( + &channel_details.channel_id, + &counterparty_node_id, + contribution, + funding_feerate_per_kw, + None, + ) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {:?}", e); + let tx = bitcoin::Transaction { + version: bitcoin::transaction::Version::TWO, + lock_time: bitcoin::absolute::LockTime::ZERO, + input: vec![], + output: vec![bitcoin::TxOut { + value: Amount::ZERO, + script_pubkey: change_address.script_pubkey(), + }], + }; + match self.wallet.cancel_tx(&tx) { + Ok(()) => Error::ChannelSplicingFailed, + Err(e) => e, + } + }) + } else { + log_error!( + self.logger, + "Channel not found for user_channel_id {} and counterparty {}", + user_channel_id, + counterparty_node_id + ); + + Err(Error::ChannelSplicingFailed) + } + } + + /// Remove funds from an existing channel, sending them to an on-chain address. + /// + /// This provides for decreasing a channel's outbound liquidity without re-balancing or closing + /// it. Once negotiation with the counterparty is complete, the channel remains operational + /// while waiting for a new funding transaction to confirm. + /// + /// # Experimental API + /// + /// This API is experimental. Currently, a splice-out will be marked as an inbound payment if + /// paid to an address associated with the on-chain wallet, but this classification may change + /// in the future. + pub fn splice_out( + &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, address: &Address, + splice_amount_sats: u64, + ) -> Result<(), Error> { + let open_channels = + self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); + if let Some(channel_details) = + open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) + { + if splice_amount_sats > channel_details.outbound_capacity_msat { + return Err(Error::ChannelSplicingFailed); + } + + self.wallet.parse_and_validate_address(address)?; + + let contribution = SpliceContribution::SpliceOut { + outputs: vec![bitcoin::TxOut { + value: Amount::from_sat(splice_amount_sats), + script_pubkey: address.script_pubkey(), + }], + }; + + let fee_rate = self.fee_estimator.estimate_fee_rate(ConfirmationTarget::ChannelFunding); + let funding_feerate_per_kw: u32 = match fee_rate.to_sat_per_kwu().try_into() { + Ok(fee_rate) => fee_rate, + Err(_) => { + debug_assert!(false, "FeeRate should always fit within u32"); + log_error!(self.logger, "FeeRate should always fit within u32"); + fee_estimator::get_fallback_rate_for_target(ConfirmationTarget::ChannelFunding) + }, + }; + + self.channel_manager + .splice_channel( + &channel_details.channel_id, + &counterparty_node_id, + contribution, + funding_feerate_per_kw, + None, + ) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {:?}", e); + Error::ChannelSplicingFailed + }) + } else { + log_error!( + self.logger, + "Channel not found for user_channel_id {} and counterparty {}", + user_channel_id, + counterparty_node_id + ); + Err(Error::ChannelSplicingFailed) + } + } + /// Alby: update fee estimates separately rather than doing a full sync pub fn update_fee_estimates(&self) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let chain_source = Arc::clone(&self.chain_source); - tokio::task::block_in_place(move || { - tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( - async move { - match chain_source.as_ref() { - ChainSource::Esplora { .. } => { - chain_source.update_fee_rate_estimates().await?; - }, - ChainSource::Electrum { .. } => { - chain_source.update_fee_rate_estimates().await?; - }, - ChainSource::BitcoindRpc { .. } => { - chain_source.update_fee_rate_estimates().await?; - }, - } - Ok(()) - }, - ) + self.runtime.block_on(async move { + chain_source.update_fee_rate_estimates().await?; + Ok(()) }) } @@ -1389,43 +1493,35 @@ impl Node { /// **Note:** this is currently used by Alby (combined with disabled background syncs) to have /// dynamic sync intervals. pub fn sync_wallets(&self) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } let chain_source = Arc::clone(&self.chain_source); + let sync_wallet = Arc::clone(&self.wallet); let sync_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - tokio::task::block_in_place(move || { - tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( - async move { - match chain_source.as_ref() { - ChainSource::Esplora { .. } => { - chain_source.update_fee_rate_estimates().await?; - chain_source - .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) - .await?; - chain_source.sync_onchain_wallet().await?; - }, - ChainSource::Electrum { .. } => { - chain_source.update_fee_rate_estimates().await?; - chain_source - .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) - .await?; - chain_source.sync_onchain_wallet().await?; - }, - ChainSource::BitcoindRpc { .. } => { - chain_source.update_fee_rate_estimates().await?; - chain_source - .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) - .await?; - }, - } - Ok(()) - }, - ) + self.runtime.block_on(async move { + if chain_source.is_transaction_based() { + chain_source.update_fee_rate_estimates().await?; + chain_source + .sync_lightning_wallet(sync_cman, sync_cmon, Arc::clone(&sync_sweeper)) + .await?; + chain_source.sync_onchain_wallet(sync_wallet).await?; + } else { + chain_source.update_fee_rate_estimates().await?; + chain_source + .poll_and_update_listeners( + sync_wallet, + sync_cman, + sync_cmon, + Arc::clone(&sync_sweeper), + ) + .await?; + } + let _ = sync_sweeper.regenerate_and_broadcast_spend_if_necessary().await; + Ok(()) }) } @@ -1467,41 +1563,22 @@ impl Node { force_close_reason.is_none() || force, "Reason can only be set for force closures" ); - let open_channels = + let open_channels: Vec = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) { if force { - if self.config.anchor_channels_config.as_ref().map_or(false, |acc| { - acc.trusted_peers_no_reserve.contains(&counterparty_node_id) - }) { - self.channel_manager - .force_close_without_broadcasting_txn( - &channel_details.channel_id, - &counterparty_node_id, - force_close_reason.unwrap_or_default(), - ) - .map_err(|e| { - log_error!( - self.logger, - "Failed to force-close channel to trusted peer: {:?}", - e - ); - Error::ChannelClosingFailed - })?; - } else { - self.channel_manager - .force_close_broadcasting_latest_txn( - &channel_details.channel_id, - &counterparty_node_id, - force_close_reason.unwrap_or_default(), - ) - .map_err(|e| { - log_error!(self.logger, "Failed to force-close channel: {:?}", e); - Error::ChannelClosingFailed - })?; - } + self.channel_manager + .force_close_broadcasting_latest_txn( + &channel_details.channel_id, + &counterparty_node_id, + force_close_reason.unwrap_or_default(), + ) + .map_err(|e| { + log_error!(self.logger, "Failed to force-close channel: {:?}", e); + Error::ChannelClosingFailed + })?; } else { self.channel_manager .close_channel(&channel_details.channel_id, &counterparty_node_id) @@ -1525,7 +1602,7 @@ impl Node { &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, channel_config: ChannelConfig, ) -> Result<(), Error> { - let open_channels = + let open_channels: Vec = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = open_channels.iter().find(|c| c.user_channel_id == user_channel_id.0) @@ -1554,18 +1631,11 @@ impl Node { self.payment_store.remove(&payment_id) } - /// Alby: Used to recover funds after restoring static channel backup - pub fn force_close_all_channels_without_broadcasting_txn(&self) { - self.channel_manager.force_close_all_channels_without_broadcasting_txn( - "lost or corrupted channel state".to_string(), - ); - } - /// Alby: Return encoded channel monitors for a recovery of last resort pub fn get_encoded_channel_monitors(&self) -> Result, Error> { - let channel_monitor_store = Arc::clone(&self.kv_store); + let channel_monitor_store: &dyn KVStoreSync = &*self.kv_store; let channel_monitor_logger = Arc::clone(&self.logger); - let keys = channel_monitor_store.list("monitors", "").map_err(|e| { + let keys = KVStoreSync::list(channel_monitor_store, "monitors", "").map_err(|e| { log_error!(channel_monitor_logger, "Failed to get monitor keys: {}", e); Error::ConnectionFailed })?; @@ -1573,10 +1643,11 @@ impl Node { let mut entries = Vec::new(); for key in keys { - let value = channel_monitor_store.read("monitors", "", &key).map_err(|e| { - log_error!(channel_monitor_logger, "Failed to get monitor value: {}", e); - Error::ConnectionFailed - })?; + let value = + KVStoreSync::read(channel_monitor_store, "monitors", "", &key).map_err(|e| { + log_error!(channel_monitor_logger, "Failed to get monitor value: {}", e); + Error::ConnectionFailed + })?; entries.push(KeyValue { key, value }) } @@ -1598,14 +1669,13 @@ impl Node { let mut total_lightning_balance_sats = 0; let mut lightning_balances = Vec::new(); - for (funding_txo, channel_id) in self.chain_monitor.list_monitors() { - match self.chain_monitor.get_monitor(funding_txo) { + for channel_id in self.chain_monitor.list_monitors() { + match self.chain_monitor.get_monitor(channel_id) { Ok(monitor) => { + let funding_txo = monitor.get_funding_txo(); funding_txo_by_channel_id.insert(channel_id, funding_txo); - // unwrap safety: `get_counterparty_node_id` will always be `Some` after 0.0.110 and - // LDK Node 0.1 depended on 0.0.115 already. - let counterparty_node_id = monitor.get_counterparty_node_id().unwrap(); + let counterparty_node_id = monitor.get_counterparty_node_id(); for ldk_balance in monitor.get_claimable_balances() { total_lightning_balance_sats += ldk_balance.claimable_amount_satoshis(); lightning_balances.push(LightningBalance::from_ldk_balance( @@ -1631,9 +1701,9 @@ impl Node { // by LDK for a while (4032 blocks since balances become empty), so we can still try to access it. // See [`periodically_archive_fully_resolved_monitors`] for details. let funding_txo = - out.channel_id.and_then(|c| funding_txo_by_channel_id.get(&c)).cloned(); - let chmon = funding_txo.and_then(|txo| self.chain_monitor.get_monitor(txo).ok()); - let counterparty_node_id = chmon.and_then(|m| m.get_counterparty_node_id()); + out.channel_id.and_then(|c| funding_txo_by_channel_id.get(&c).cloned()); + let chmon = out.channel_id.and_then(|c| self.chain_monitor.get_monitor(c).ok()); + let counterparty_node_id = chmon.and_then(|m| Some(m.get_counterparty_node_id())); PendingSweepBalance::from_tracked_spendable_output( out, counterparty_node_id, @@ -1751,20 +1821,20 @@ impl Node { /// Exports the current state of the scorer. The result can be shared with and merged by light nodes that only have /// a limited view of the network. pub fn export_pathfinding_scores(&self) -> Result, Error> { - self.kv_store - .read( - lightning::util::persist::SCORER_PERSISTENCE_PRIMARY_NAMESPACE, - lightning::util::persist::SCORER_PERSISTENCE_SECONDARY_NAMESPACE, - lightning::util::persist::SCORER_PERSISTENCE_KEY, - ) - .map_err(|e| { - log_error!( - self.logger, - "Failed to access store while exporting pathfinding scores: {}", - e - ); - Error::PersistenceFailed - }) + KVStoreSync::read( + &*self.kv_store, + lightning::util::persist::SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + lightning::util::persist::SCORER_PERSISTENCE_SECONDARY_NAMESPACE, + lightning::util::persist::SCORER_PERSISTENCE_KEY, + ) + .map_err(|e| { + log_error!( + self.logger, + "Failed to access store while exporting pathfinding scores: {}", + e + ); + Error::PersistenceFailed + }) } } @@ -1779,9 +1849,6 @@ impl Drop for Node { pub struct NodeStatus { /// Indicates whether the [`Node`] is running. pub is_running: bool, - /// Indicates whether the [`Node`] is listening for incoming connections on the addresses - /// configured via [`Config::listening_addresses`]. - pub is_listening: bool, /// The best block to which our Lightning wallet is currently synced. pub current_best_block: BestBlock, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced @@ -1804,6 +1871,8 @@ pub struct NodeStatus { /// /// Will be `None` if RGS isn't configured or the snapshot hasn't been updated yet. pub latest_rgs_snapshot_timestamp: Option, + /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully merged external scores. + pub latest_pathfinding_scores_sync_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last broadcasted a node /// announcement. /// @@ -1822,6 +1891,7 @@ pub(crate) struct NodeMetrics { latest_onchain_wallet_sync_timestamp: Option, latest_fee_rate_cache_update_timestamp: Option, latest_rgs_snapshot_timestamp: Option, + latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, latest_channel_monitor_archival_height: Option, } @@ -1833,6 +1903,7 @@ impl Default for NodeMetrics { latest_onchain_wallet_sync_timestamp: None, latest_fee_rate_cache_update_timestamp: None, latest_rgs_snapshot_timestamp: None, + latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, latest_channel_monitor_archival_height: None, } @@ -1841,6 +1912,7 @@ impl Default for NodeMetrics { impl_writeable_tlv_based!(NodeMetrics, { (0, latest_lightning_wallet_sync_timestamp, option), + (1, latest_pathfinding_scores_sync_timestamp, option), (2, latest_onchain_wallet_sync_timestamp, option), (4, latest_fee_rate_cache_update_timestamp, option), (6, latest_rgs_snapshot_timestamp, option), diff --git a/src/liquidity.rs b/src/liquidity.rs index 47f3dcce4..74e6098dd 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -7,52 +7,51 @@ //! Objects related to liquidity management. -use crate::chain::ChainSource; -use crate::connection::ConnectionManager; -use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; -use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager, Wallet}; -use crate::{total_anchor_channels_reserve_sats, Config, Error}; +use std::collections::HashMap; +use std::ops::Deref; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::Duration; -use lightning::events::HTLCDestination; +use bitcoin::hashes::{sha256, Hash}; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; +use bitcoin::Transaction; +use chrono::Utc; +use lightning::events::HTLCHandlingFailureType; use lightning::ln::channelmanager::{InterceptId, MIN_FINAL_CLTV_EXPIRY_DELTA}; use lightning::ln::msgs::SocketAddress; use lightning::ln::types::ChannelId; use lightning::routing::router::{RouteHint, RouteHintHop}; - use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, InvoiceBuilder, RoutingFees}; - -use lightning_liquidity::events::Event; -use lightning_liquidity::lsps0::ser::RequestId; +use lightning_liquidity::events::LiquidityEvent; +use lightning_liquidity::lsps0::ser::{LSPSDateTime, LSPSRequestId}; use lightning_liquidity::lsps1::client::LSPS1ClientConfig as LdkLSPS1ClientConfig; use lightning_liquidity::lsps1::event::LSPS1ClientEvent; -use lightning_liquidity::lsps1::msgs::{ChannelInfo, LSPS1Options, OrderId, OrderParameters}; +use lightning_liquidity::lsps1::msgs::{ + LSPS1ChannelInfo, LSPS1Options, LSPS1OrderId, LSPS1OrderParams, +}; use lightning_liquidity::lsps2::client::LSPS2ClientConfig as LdkLSPS2ClientConfig; use lightning_liquidity::lsps2::event::{LSPS2ClientEvent, LSPS2ServiceEvent}; -use lightning_liquidity::lsps2::msgs::{OpeningFeeParams, RawOpeningFeeParams}; +use lightning_liquidity::lsps2::msgs::{LSPS2OpeningFeeParams, LSPS2RawOpeningFeeParams}; use lightning_liquidity::lsps2::service::LSPS2ServiceConfig as LdkLSPS2ServiceConfig; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_liquidity::{LiquidityClientConfig, LiquidityServiceConfig}; - use lightning_types::payment::PaymentHash; - -use bitcoin::hashes::{sha256, Hash}; -use bitcoin::secp256k1::{PublicKey, Secp256k1}; - -use tokio::sync::oneshot; - -use chrono::{DateTime, Utc}; - use rand::Rng; +use tokio::sync::oneshot; -use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::Duration; +use crate::builder::BuildError; +use crate::chain::ChainSource; +use crate::connection::ConnectionManager; +use crate::logger::{log_debug, log_error, log_info, LdkLogger, Logger}; +use crate::runtime::Runtime; +use crate::types::{ + Broadcaster, ChannelManager, DynStore, KeysManager, LiquidityManager, PeerManager, Wallet, +}; +use crate::{total_anchor_channels_reserve_sats, Config, Error}; const LIQUIDITY_REQUEST_TIMEOUT_SECS: u64 = 5; const LSPS2_GETINFO_REQUEST_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24); -const LSPS2_CLIENT_TRUSTS_LSP_MODE: bool = true; const LSPS2_CHANNEL_CLTV_EXPIRY_DELTA: u32 = 72; struct LSPS1Client { @@ -61,10 +60,10 @@ struct LSPS1Client { token: Option, ldk_client_config: LdkLSPS1ClientConfig, pending_opening_params_requests: - Mutex>>, - pending_create_order_requests: Mutex>>, + Mutex>>, + pending_create_order_requests: Mutex>>, pending_check_order_status_requests: - Mutex>>, + Mutex>>, } #[derive(Debug, Clone)] @@ -79,8 +78,8 @@ struct LSPS2Client { lsp_address: SocketAddress, token: Option, ldk_client_config: LdkLSPS2ClientConfig, - pending_fee_requests: Mutex>>, - pending_buy_requests: Mutex>>, + pending_fee_requests: Mutex>>, + pending_buy_requests: Mutex>>, } #[derive(Debug, Clone)] @@ -131,6 +130,19 @@ pub struct LSPS2ServiceConfig { pub min_payment_size_msat: u64, /// The maximum payment size that we will accept when opening a channel. pub max_payment_size_msat: u64, + /// Use the 'client-trusts-LSP' trust model. + /// + /// When set, the service will delay *broadcasting* the JIT channel's funding transaction until + /// the client claimed sufficient HTLC parts to pay for the channel open. + /// + /// Note this will render the flow incompatible with clients utilizing the 'LSP-trust-client' + /// trust model, i.e., in turn delay *claiming* any HTLCs until they see the funding + /// transaction in the mempool. + /// + /// Please refer to [`bLIP-52`] for more information. + /// + /// [`bLIP-52`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models + pub client_trusts_lsp: bool, } pub(crate) struct LiquiditySourceBuilder @@ -144,6 +156,8 @@ where channel_manager: Arc, keys_manager: Arc, chain_source: Arc, + tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: L, } @@ -154,7 +168,8 @@ where { pub(crate) fn new( wallet: Arc, channel_manager: Arc, keys_manager: Arc, - chain_source: Arc, config: Arc, logger: L, + chain_source: Arc, tx_broadcaster: Arc, kv_store: Arc, + config: Arc, logger: L, ) -> Self { let lsps1_client = None; let lsps2_client = None; @@ -167,6 +182,8 @@ where channel_manager, keys_manager, chain_source, + tx_broadcaster, + kv_store, config, logger, } @@ -217,28 +234,40 @@ where self } - pub(crate) fn build(self) -> LiquiditySource { + pub(crate) async fn build(self) -> Result, BuildError> { let liquidity_service_config = self.lsps2_service.as_ref().map(|s| { let lsps2_service_config = Some(s.ldk_service_config.clone()); + let lsps5_service_config = None; let advertise_service = s.service_config.advertise_service; - LiquidityServiceConfig { lsps2_service_config, advertise_service } + LiquidityServiceConfig { lsps2_service_config, lsps5_service_config, advertise_service } }); let lsps1_client_config = self.lsps1_client.as_ref().map(|s| s.ldk_client_config.clone()); let lsps2_client_config = self.lsps2_client.as_ref().map(|s| s.ldk_client_config.clone()); - let liquidity_client_config = - Some(LiquidityClientConfig { lsps1_client_config, lsps2_client_config }); + let lsps5_client_config = None; + let liquidity_client_config = Some(LiquidityClientConfig { + lsps1_client_config, + lsps2_client_config, + lsps5_client_config, + }); - let liquidity_manager = Arc::new(LiquidityManager::new( - Arc::clone(&self.keys_manager), - Arc::clone(&self.channel_manager), - Some(Arc::clone(&self.chain_source)), - None, - liquidity_service_config, - liquidity_client_config, - )); + let liquidity_manager = Arc::new( + LiquidityManager::new( + Arc::clone(&self.keys_manager), + Arc::clone(&self.keys_manager), + Arc::clone(&self.channel_manager), + Some(Arc::clone(&self.chain_source)), + None, + Arc::clone(&self.kv_store), + Arc::clone(&self.tx_broadcaster), + liquidity_service_config, + liquidity_client_config, + ) + .await + .map_err(|_| BuildError::ReadFailed)?, + ); - LiquiditySource { + Ok(LiquiditySource { lsps1_client: self.lsps1_client, lsps2_client: self.lsps2_client, lsps2_service: self.lsps2_service, @@ -249,7 +278,7 @@ where liquidity_manager, config: self.config, logger: self.logger, - } + }) } } @@ -274,13 +303,11 @@ where L::Target: LdkLogger, { pub(crate) fn set_peer_manager(&self, peer_manager: Arc) { - *self.peer_manager.write().unwrap() = Some(Arc::clone(&peer_manager)); - let process_msgs_callback = move || peer_manager.process_events(); - self.liquidity_manager.set_process_msgs_callback(process_msgs_callback); + *self.peer_manager.write().unwrap() = Some(peer_manager); } - pub(crate) fn liquidity_manager(&self) -> &LiquidityManager { - self.liquidity_manager.as_ref() + pub(crate) fn liquidity_manager(&self) -> Arc { + Arc::clone(&self.liquidity_manager) } pub(crate) fn get_lsps1_lsp_details(&self) -> Option<(PublicKey, SocketAddress)> { @@ -291,9 +318,76 @@ where self.lsps2_client.as_ref().map(|s| (s.lsp_node_id, s.lsp_address.clone())) } + pub(crate) fn lsps2_channel_needs_manual_broadcast( + &self, counterparty_node_id: PublicKey, user_channel_id: u128, + ) -> bool { + self.lsps2_service.as_ref().map_or(false, |lsps2_service| { + lsps2_service.service_config.client_trusts_lsp + && self + .liquidity_manager() + .lsps2_service_handler() + .and_then(|handler| { + handler + .channel_needs_manual_broadcast(user_channel_id, &counterparty_node_id) + .ok() + }) + .unwrap_or(false) + }) + } + + pub(crate) fn lsps2_store_funding_transaction( + &self, user_channel_id: u128, counterparty_node_id: PublicKey, funding_tx: Transaction, + ) { + if self.lsps2_service.as_ref().map_or(false, |svc| !svc.service_config.client_trusts_lsp) { + // Only necessary for client-trusts-LSP flow + return; + } + + let lsps2_service_handler = self.liquidity_manager.lsps2_service_handler(); + if let Some(handler) = lsps2_service_handler { + handler + .store_funding_transaction(user_channel_id, &counterparty_node_id, funding_tx) + .unwrap_or_else(|e| { + debug_assert!(false, "Failed to store funding transaction: {:?}", e); + log_error!(self.logger, "Failed to store funding transaction: {:?}", e); + }); + } else { + log_error!(self.logger, "LSPS2 service handler is not available."); + } + } + + pub(crate) fn lsps2_funding_tx_broadcast_safe( + &self, user_channel_id: u128, counterparty_node_id: PublicKey, + ) { + if self.lsps2_service.as_ref().map_or(false, |svc| !svc.service_config.client_trusts_lsp) { + // Only necessary for client-trusts-LSP flow + return; + } + + let lsps2_service_handler = self.liquidity_manager.lsps2_service_handler(); + if let Some(handler) = lsps2_service_handler { + handler + .set_funding_tx_broadcast_safe(user_channel_id, &counterparty_node_id) + .unwrap_or_else(|e| { + debug_assert!( + false, + "Failed to mark funding transaction safe to broadcast: {:?}", + e + ); + log_error!( + self.logger, + "Failed to mark funding transaction safe to broadcast: {:?}", + e + ); + }); + } else { + log_error!(self.logger, "LSPS2 service handler is not available."); + } + } + pub(crate) async fn handle_next_event(&self) { match self.liquidity_manager.next_event_async().await { - Event::LSPS1Client(LSPS1ClientEvent::SupportedOptionsReady { + LiquidityEvent::LSPS1Client(LSPS1ClientEvent::SupportedOptionsReady { request_id, counterparty_node_id, supported_options, @@ -346,7 +440,7 @@ where ); } }, - Event::LSPS1Client(LSPS1ClientEvent::OrderCreated { + LiquidityEvent::LSPS1Client(LSPS1ClientEvent::OrderCreated { request_id, counterparty_node_id, order_id, @@ -404,7 +498,7 @@ where log_error!(self.logger, "Received unexpected LSPS1Client::OrderCreated event!"); } }, - Event::LSPS1Client(LSPS1ClientEvent::OrderStatus { + LiquidityEvent::LSPS1Client(LSPS1ClientEvent::OrderStatus { request_id, counterparty_node_id, order_id, @@ -462,7 +556,7 @@ where log_error!(self.logger, "Received unexpected LSPS1Client::OrderStatus event!"); } }, - Event::LSPS2Service(LSPS2ServiceEvent::GetInfo { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::GetInfo { request_id, counterparty_node_id, token, @@ -483,7 +577,7 @@ where if token != Some(required) { log_error!( self.logger, - "Rejecting LSPS2 request {:?} from counterparty {} as the client provided an invalid token.", + "Rejecting LSPS2 request {:?} from counterparty {} as the client provided an invalid token.", request_id, counterparty_node_id ); @@ -501,10 +595,8 @@ where } } - let mut valid_until: DateTime = Utc::now(); - valid_until += LSPS2_GETINFO_REQUEST_EXPIRY; - - let opening_fee_params = RawOpeningFeeParams { + let valid_until = LSPSDateTime(Utc::now() + LSPS2_GETINFO_REQUEST_EXPIRY); + let opening_fee_params = LSPS2RawOpeningFeeParams { min_fee_msat: service_config.min_channel_opening_fee_msat, proportional: service_config.channel_opening_fee_ppm, valid_until, @@ -532,7 +624,7 @@ where return; } }, - Event::LSPS2Service(LSPS2ServiceEvent::BuyRequest { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::BuyRequest { request_id, counterparty_node_id, opening_fee_params: _, @@ -550,7 +642,7 @@ where return; }; - let user_channel_id: u128 = rand::thread_rng().gen::(); + let user_channel_id: u128 = rand::rng().random(); let intercept_scid = self.channel_manager.get_intercept_scid(); if let Some(payment_size_msat) = payment_size_msat { @@ -576,14 +668,17 @@ where } } - match lsps2_service_handler.invoice_parameters_generated( - &counterparty_node_id, - request_id, - intercept_scid, - LSPS2_CHANNEL_CLTV_EXPIRY_DELTA, - LSPS2_CLIENT_TRUSTS_LSP_MODE, - user_channel_id, - ) { + match lsps2_service_handler + .invoice_parameters_generated( + &counterparty_node_id, + request_id, + intercept_scid, + LSPS2_CHANNEL_CLTV_EXPIRY_DELTA, + service_config.client_trusts_lsp, + user_channel_id, + ) + .await + { Ok(()) => {}, Err(e) => { log_error!( @@ -599,7 +694,7 @@ where return; } }, - Event::LSPS2Service(LSPS2ServiceEvent::OpenChannel { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::OpenChannel { their_network_key, amt_to_forward_msat, opening_fee_msat: _, @@ -673,7 +768,7 @@ where return; } - let mut config = *self.channel_manager.get_current_default_configuration(); + let mut config = self.channel_manager.get_current_config().clone(); // We set these LSP-specific values during Node building, here we're making sure it's actually set. debug_assert_eq!( @@ -713,7 +808,7 @@ where }, } }, - Event::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { request_id, counterparty_node_id, opening_fee_params_menu, @@ -763,7 +858,7 @@ where ); } }, - Event::LSPS2Client(LSPS2ClientEvent::InvoiceParametersReady { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::InvoiceParametersReady { request_id, counterparty_node_id, intercept_scid, @@ -903,7 +998,7 @@ where return Err(Error::LiquidityRequestFailed); } - let order_params = OrderParameters { + let order_params = LSPS1OrderParams { lsp_balance_sat, client_balance_sat, required_channel_confirmations: lsp_limits.min_required_channel_confirmations, @@ -952,7 +1047,7 @@ where } pub(crate) async fn lsps1_check_order_status( - &self, order_id: OrderId, + &self, order_id: LSPS1OrderId, ) -> Result { let lsps1_client = self.lsps1_client.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; let client_handler = self.liquidity_manager.lsps1_client_handler().ok_or_else(|| { @@ -987,7 +1082,7 @@ where pub(crate) async fn lsps2_receive_to_jit_channel( &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, - max_total_lsp_fee_limit_msat: Option, + max_total_lsp_fee_limit_msat: Option, payment_hash: Option, ) -> Result<(Bolt11Invoice, u64), Error> { let fee_response = self.lsps2_request_opening_fee_params().await?; @@ -1039,6 +1134,7 @@ where Some(amount_msat), description, expiry_secs, + payment_hash, )?; log_info!(self.logger, "JIT-channel invoice created: {}", invoice); @@ -1047,7 +1143,7 @@ where pub(crate) async fn lsps2_receive_variable_amount_to_jit_channel( &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, - max_proportional_lsp_fee_limit_ppm_msat: Option, + max_proportional_lsp_fee_limit_ppm_msat: Option, payment_hash: Option, ) -> Result<(Bolt11Invoice, u64), Error> { let fee_response = self.lsps2_request_opening_fee_params().await?; @@ -1081,8 +1177,13 @@ where ); let buy_response = self.lsps2_send_buy_request(None, min_opening_params).await?; - let invoice = - self.lsps2_create_jit_invoice(buy_response, None, description, expiry_secs)?; + let invoice = self.lsps2_create_jit_invoice( + buy_response, + None, + description, + expiry_secs, + payment_hash, + )?; log_info!(self.logger, "JIT-channel invoice created: {}", invoice); Ok((invoice, min_prop_fee_ppm_msat)) @@ -1120,7 +1221,7 @@ where } async fn lsps2_send_buy_request( - &self, amount_msat: Option, opening_fee_params: OpeningFeeParams, + &self, amount_msat: Option, opening_fee_params: LSPS2OpeningFeeParams, ) -> Result { let lsps2_client = self.lsps2_client.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; @@ -1165,18 +1266,36 @@ where fn lsps2_create_jit_invoice( &self, buy_response: LSPS2BuyResponse, amount_msat: Option, description: &Bolt11InvoiceDescription, expiry_secs: u32, + payment_hash: Option, ) -> Result { let lsps2_client = self.lsps2_client.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; // LSPS2 requires min_final_cltv_expiry_delta to be at least 2 more than usual. let min_final_cltv_expiry_delta = MIN_FINAL_CLTV_EXPIRY_DELTA + 2; - let (payment_hash, payment_secret) = self - .channel_manager - .create_inbound_payment(None, expiry_secs, Some(min_final_cltv_expiry_delta)) - .map_err(|e| { - log_error!(self.logger, "Failed to register inbound payment: {:?}", e); - Error::InvoiceCreationFailed - })?; + let (payment_hash, payment_secret) = match payment_hash { + Some(payment_hash) => { + let payment_secret = self + .channel_manager + .create_inbound_payment_for_hash( + payment_hash, + None, + expiry_secs, + Some(min_final_cltv_expiry_delta), + ) + .map_err(|e| { + log_error!(self.logger, "Failed to register inbound payment: {:?}", e); + Error::InvoiceCreationFailed + })?; + (payment_hash, payment_secret) + }, + None => self + .channel_manager + .create_inbound_payment(None, expiry_secs, Some(min_final_cltv_expiry_delta)) + .map_err(|e| { + log_error!(self.logger, "Failed to register inbound payment: {:?}", e); + Error::InvoiceCreationFailed + })?, + }; let route_hint = RouteHint(vec![RouteHintHop { src_node_id: lsps2_client.lsp_node_id, @@ -1217,15 +1336,14 @@ where }) } - pub(crate) fn handle_channel_ready( + pub(crate) async fn handle_channel_ready( &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.channel_ready( - user_channel_id, - channel_id, - counterparty_node_id, - ) { + if let Err(e) = lsps2_service_handler + .channel_ready(user_channel_id, channel_id, counterparty_node_id) + .await + { log_error!( self.logger, "LSPS2 service failed to handle ChannelReady event: {:?}", @@ -1235,17 +1353,20 @@ where } } - pub(crate) fn handle_htlc_intercepted( + pub(crate) async fn handle_htlc_intercepted( &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.htlc_intercepted( - intercept_scid, - intercept_id, - expected_outbound_amount_msat, - payment_hash, - ) { + if let Err(e) = lsps2_service_handler + .htlc_intercepted( + intercept_scid, + intercept_id, + expected_outbound_amount_msat, + payment_hash, + ) + .await + { log_error!( self.logger, "LSPS2 service failed to handle HTLCIntercepted event: {:?}", @@ -1255,9 +1376,9 @@ where } } - pub(crate) fn handle_htlc_handling_failed(&self, failed_next_destination: HTLCDestination) { + pub(crate) async fn handle_htlc_handling_failed(&self, failure_type: HTLCHandlingFailureType) { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.htlc_handling_failed(failed_next_destination) { + if let Err(e) = lsps2_service_handler.htlc_handling_failed(failure_type).await { log_error!( self.logger, "LSPS2 service failed to handle HTLCHandlingFailed event: {:?}", @@ -1267,10 +1388,14 @@ where } } - pub(crate) fn handle_payment_forwarded(&self, next_channel_id: Option) { + pub(crate) async fn handle_payment_forwarded( + &self, next_channel_id: Option, skimmed_fee_msat: u64, + ) { if let Some(next_channel_id) = next_channel_id { if let Some(lsps2_service_handler) = self.liquidity_manager.lsps2_service_handler() { - if let Err(e) = lsps2_service_handler.payment_forwarded(next_channel_id) { + if let Err(e) = + lsps2_service_handler.payment_forwarded(next_channel_id, skimmed_fee_msat).await + { log_error!( self.logger, "LSPS2 service failed to handle PaymentForwarded: {:?}", @@ -1291,82 +1416,24 @@ pub(crate) struct LSPS1OpeningParamsResponse { #[derive(Debug, Clone)] pub struct LSPS1OrderStatus { /// The id of the channel order. - pub order_id: OrderId, + pub order_id: LSPS1OrderId, /// The parameters of channel order. - pub order_params: OrderParameters, + pub order_params: LSPS1OrderParams, /// Contains details about how to pay for the order. - pub payment_options: PaymentInfo, + pub payment_options: LSPS1PaymentInfo, /// Contains information about the channel state. - pub channel_state: Option, + pub channel_state: Option, } #[cfg(not(feature = "uniffi"))] -type PaymentInfo = lightning_liquidity::lsps1::msgs::PaymentInfo; - -/// Details regarding how to pay for an order. -#[cfg(feature = "uniffi")] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct PaymentInfo { - /// A Lightning payment using BOLT 11. - pub bolt11: Option, - /// An onchain payment. - pub onchain: Option, -} - -#[cfg(feature = "uniffi")] -impl From for PaymentInfo { - fn from(value: lightning_liquidity::lsps1::msgs::PaymentInfo) -> Self { - PaymentInfo { - bolt11: value.bolt11.map(|b| b.into()), - onchain: value.onchain.map(|o| o.into()), - } - } -} - -/// An onchain payment. -#[cfg(feature = "uniffi")] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct OnchainPaymentInfo { - /// Indicates the current state of the payment. - pub state: lightning_liquidity::lsps1::msgs::PaymentState, - /// The datetime when the payment option expires. - pub expires_at: chrono::DateTime, - /// The total fee the LSP will charge to open this channel in satoshi. - pub fee_total_sat: u64, - /// The amount the client needs to pay to have the requested channel openend. - pub order_total_sat: u64, - /// An on-chain address the client can send [`Self::order_total_sat`] to to have the channel - /// opened. - pub address: bitcoin::Address, - /// The minimum number of block confirmations that are required for the on-chain payment to be - /// considered confirmed. - pub min_onchain_payment_confirmations: Option, - /// The minimum fee rate for the on-chain payment in case the client wants the payment to be - /// confirmed without a confirmation. - pub min_fee_for_0conf: Arc, - /// The address where the LSP will send the funds if the order fails. - pub refund_onchain_address: Option, -} +type LSPS1PaymentInfo = lightning_liquidity::lsps1::msgs::LSPS1PaymentInfo; #[cfg(feature = "uniffi")] -impl From for OnchainPaymentInfo { - fn from(value: lightning_liquidity::lsps1::msgs::OnchainPaymentInfo) -> Self { - Self { - state: value.state, - expires_at: value.expires_at, - fee_total_sat: value.fee_total_sat, - order_total_sat: value.order_total_sat, - address: value.address, - min_onchain_payment_confirmations: value.min_onchain_payment_confirmations, - min_fee_for_0conf: Arc::new(value.min_fee_for_0conf), - refund_onchain_address: value.refund_onchain_address, - } - } -} +type LSPS1PaymentInfo = crate::ffi::LSPS1PaymentInfo; #[derive(Debug, Clone)] pub(crate) struct LSPS2FeeResponse { - opening_fee_params_menu: Vec, + opening_fee_params_menu: Vec, } #[derive(Debug, Clone)] @@ -1388,7 +1455,7 @@ pub(crate) struct LSPS2BuyResponse { /// [`Bolt11Payment::receive_via_jit_channel`]: crate::payment::Bolt11Payment::receive_via_jit_channel #[derive(Clone)] pub struct LSPS1Liquidity { - runtime: Arc>>>, + runtime: Arc, wallet: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, @@ -1397,7 +1464,7 @@ pub struct LSPS1Liquidity { impl LSPS1Liquidity { pub(crate) fn new( - runtime: Arc>>>, wallet: Arc, + runtime: Arc, wallet: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, logger: Arc, ) -> Self { @@ -1418,19 +1485,14 @@ impl LSPS1Liquidity { let (lsp_node_id, lsp_address) = liquidity_source.get_lsps1_lsp_details().ok_or(Error::LiquiditySourceUnavailable)?; - let rt_lock = self.runtime.read().unwrap(); - let runtime = rt_lock.as_ref().unwrap(); - let con_node_id = lsp_node_id; let con_addr = lsp_address.clone(); let con_cm = Arc::clone(&self.connection_manager); // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; log_info!(self.logger, "Connected to LSP {}@{}. ", lsp_node_id, lsp_address); @@ -1438,52 +1500,43 @@ impl LSPS1Liquidity { let refund_address = self.wallet.get_new_address()?; let liquidity_source = Arc::clone(&liquidity_source); - let response = tokio::task::block_in_place(move || { - runtime.block_on(async move { - liquidity_source - .lsps1_request_channel( - lsp_balance_sat, - client_balance_sat, - channel_expiry_blocks, - announce_channel, - refund_address, - ) - .await - }) + let response = self.runtime.block_on(async move { + liquidity_source + .lsps1_request_channel( + lsp_balance_sat, + client_balance_sat, + channel_expiry_blocks, + announce_channel, + refund_address, + ) + .await })?; Ok(response) } /// Connects to the configured LSP and checks for the status of a previously-placed order. - pub fn check_order_status(&self, order_id: OrderId) -> Result { + pub fn check_order_status(&self, order_id: LSPS1OrderId) -> Result { let liquidity_source = self.liquidity_source.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; let (lsp_node_id, lsp_address) = liquidity_source.get_lsps1_lsp_details().ok_or(Error::LiquiditySourceUnavailable)?; - let rt_lock = self.runtime.read().unwrap(); - let runtime = rt_lock.as_ref().unwrap(); - let con_node_id = lsp_node_id; let con_addr = lsp_address.clone(); let con_cm = Arc::clone(&self.connection_manager); // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; let liquidity_source = Arc::clone(&liquidity_source); - let response = tokio::task::block_in_place(move || { - runtime - .block_on(async move { liquidity_source.lsps1_check_order_status(order_id).await }) - })?; - + let response = self + .runtime + .block_on(async move { liquidity_source.lsps1_check_order_status(order_id).await })?; Ok(response) } } diff --git a/src/logger.rs b/src/logger.rs index d357f018d..4eaefad74 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -7,15 +7,6 @@ //! Logging-related objects. -pub(crate) use lightning::util::logger::{Logger as LdkLogger, Record as LdkRecord}; -pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; - -pub use lightning::util::logger::Level as LogLevel; - -use chrono::Utc; -use log::Level as LogFacadeLevel; -use log::Record as LogFacadeRecord; - #[cfg(not(feature = "uniffi"))] use core::fmt; use std::fs; @@ -23,6 +14,12 @@ use std::io::Write; use std::path::Path; use std::sync::Arc; +use chrono::Utc; +pub use lightning::util::logger::Level as LogLevel; +pub(crate) use lightning::util::logger::{Logger as LdkLogger, Record as LdkRecord}; +pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; +use log::{Level as LogFacadeLevel, Record as LogFacadeRecord}; + /// A unit of logging output with metadata to enable filtering `module_path`, /// `file`, and `line` to inform on log's source. #[cfg(not(feature = "uniffi"))] @@ -124,7 +121,7 @@ impl LogWriter for Writer { let log = format!( "{} {:<5} [{}:{}] {}\n", - Utc::now().format("%Y-%m-%d %H:%M:%S"), + Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), record.level.to_string(), record.module_path, record.line, @@ -153,6 +150,7 @@ impl LogWriter for Writer { #[cfg(not(feature = "uniffi"))] log::logger().log( &builder + .target(record.module_path) .module_path(Some(record.module_path)) .line(Some(record.line)) .args(format_args!("{}", record.args)) @@ -161,6 +159,7 @@ impl LogWriter for Writer { #[cfg(feature = "uniffi")] log::logger().log( &builder + .target(&record.module_path) .module_path(Some(&record.module_path)) .line(Some(record.line)) .args(format_args!("{}", record.args)) diff --git a/src/message_handler.rs b/src/message_handler.rs index cebd1ea07..fc206ec4d 100644 --- a/src/message_handler.rs +++ b/src/message_handler.rs @@ -5,20 +5,18 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::liquidity::LiquiditySource; +use std::ops::Deref; +use std::sync::Arc; +use bitcoin::secp256k1::PublicKey; use lightning::ln::peer_handler::CustomMessageHandler; use lightning::ln::wire::CustomMessageReader; use lightning::util::logger::Logger; - -use lightning_types::features::{InitFeatures, NodeFeatures}; - +use lightning::util::ser::LengthLimitedRead; use lightning_liquidity::lsps0::ser::RawLSPSMessage; +use lightning_types::features::{InitFeatures, NodeFeatures}; -use bitcoin::secp256k1::PublicKey; - -use std::ops::Deref; -use std::sync::Arc; +use crate::liquidity::LiquiditySource; pub(crate) enum NodeCustomMessageHandler where @@ -47,7 +45,7 @@ where { type CustomMessage = RawLSPSMessage; - fn read( + fn read( &self, message_type: u16, buffer: &mut RD, ) -> Result, lightning::ln::msgs::DecodeError> { match self { diff --git a/src/payment/asynchronous/mod.rs b/src/payment/asynchronous/mod.rs new file mode 100644 index 000000000..c28f6e243 --- /dev/null +++ b/src/payment/asynchronous/mod.rs @@ -0,0 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +pub(crate) mod om_mailbox; +mod rate_limiter; +pub(crate) mod static_invoice_store; diff --git a/src/payment/asynchronous/om_mailbox.rs b/src/payment/asynchronous/om_mailbox.rs new file mode 100644 index 000000000..9a7478706 --- /dev/null +++ b/src/payment/asynchronous/om_mailbox.rs @@ -0,0 +1,99 @@ +use std::collections::{HashMap, VecDeque}; +use std::sync::Mutex; + +use bitcoin::secp256k1::PublicKey; +use lightning::ln::msgs::OnionMessage; + +pub(crate) struct OnionMessageMailbox { + map: Mutex>>, +} + +impl OnionMessageMailbox { + const MAX_MESSAGES_PER_PEER: usize = 30; + const MAX_PEERS: usize = 300; + + pub fn new() -> Self { + Self { map: Mutex::new(HashMap::with_capacity(Self::MAX_PEERS)) } + } + + pub(crate) fn onion_message_intercepted(&self, peer_node_id: PublicKey, message: OnionMessage) { + let mut map = self.map.lock().unwrap(); + + let queue = map.entry(peer_node_id).or_insert_with(VecDeque::new); + if queue.len() >= Self::MAX_MESSAGES_PER_PEER { + queue.pop_front(); + } + queue.push_back(message); + + // Enforce a peers limit. If exceeded, evict the peer with the longest queue. + if map.len() > Self::MAX_PEERS { + let peer_to_remove = + map.iter().max_by_key(|(_, queue)| queue.len()).map(|(peer, _)| *peer).unwrap(); + + map.remove(&peer_to_remove); + } + } + + pub(crate) fn onion_message_peer_connected( + &self, peer_node_id: PublicKey, + ) -> Vec { + let mut map = self.map.lock().unwrap(); + + if let Some(queue) = map.remove(&peer_node_id) { + queue.into() + } else { + Vec::new() + } + } + + #[cfg(test)] + pub(crate) fn is_empty(&self) -> bool { + let map = self.map.lock().unwrap(); + map.is_empty() + } +} + +#[cfg(test)] +mod tests { + use bitcoin::key::Secp256k1; + use bitcoin::secp256k1::{PublicKey, SecretKey}; + use lightning::onion_message; + + use crate::payment::asynchronous::om_mailbox::OnionMessageMailbox; + + #[test] + fn onion_message_mailbox() { + let mailbox = OnionMessageMailbox::new(); + + let secp = Secp256k1::new(); + let sk_bytes = [12; 32]; + let sk = SecretKey::from_slice(&sk_bytes).unwrap(); + let peer_node_id = PublicKey::from_secret_key(&secp, &sk); + + let blinding_sk = SecretKey::from_slice(&[13; 32]).unwrap(); + let blinding_point = PublicKey::from_secret_key(&secp, &blinding_sk); + + let message_sk = SecretKey::from_slice(&[13; 32]).unwrap(); + let message_point = PublicKey::from_secret_key(&secp, &message_sk); + + let message = lightning::ln::msgs::OnionMessage { + blinding_point, + onion_routing_packet: onion_message::packet::Packet { + version: 0, + public_key: message_point, + hop_data: vec![1, 2, 3], + hmac: [0; 32], + }, + }; + mailbox.onion_message_intercepted(peer_node_id, message.clone()); + + let messages = mailbox.onion_message_peer_connected(peer_node_id); + assert_eq!(messages.len(), 1); + assert_eq!(messages[0], message); + + assert!(mailbox.is_empty()); + + let messages = mailbox.onion_message_peer_connected(peer_node_id); + assert_eq!(messages.len(), 0); + } +} diff --git a/src/payment/asynchronous/rate_limiter.rs b/src/payment/asynchronous/rate_limiter.rs new file mode 100644 index 000000000..671b1dc72 --- /dev/null +++ b/src/payment/asynchronous/rate_limiter.rs @@ -0,0 +1,96 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! [`RateLimiter`] to control the rate of requests from users. + +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +/// Implements a leaky-bucket style rate limiter parameterized by the max capacity of the bucket, the refill interval, +/// and the max idle duration. +/// +/// For every passing of the refill interval, one token is added to the bucket, up to the maximum capacity. When the +/// bucket has remained at the maximum capacity for longer than the max idle duration, it is removed to prevent memory +/// leakage. +pub(crate) struct RateLimiter { + users: HashMap, Bucket>, + capacity: u32, + refill_interval: Duration, + max_idle: Duration, +} + +struct Bucket { + tokens: u32, + last_refill: Instant, +} + +impl RateLimiter { + pub(crate) fn new(capacity: u32, refill_interval: Duration, max_idle: Duration) -> Self { + Self { users: HashMap::new(), capacity, refill_interval, max_idle } + } + + pub(crate) fn allow(&mut self, user_id: &[u8]) -> bool { + let now = Instant::now(); + + let entry = self.users.entry(user_id.to_vec()); + let is_new_user = matches!(entry, std::collections::hash_map::Entry::Vacant(_)); + + let bucket = entry.or_insert(Bucket { tokens: self.capacity, last_refill: now }); + + let elapsed = now.duration_since(bucket.last_refill); + let tokens_to_add = (elapsed.as_secs_f64() / self.refill_interval.as_secs_f64()) as u32; + + if tokens_to_add > 0 { + bucket.tokens = (bucket.tokens + tokens_to_add).min(self.capacity); + bucket.last_refill = now; + } + + let allow = if bucket.tokens > 0 { + bucket.tokens -= 1; + true + } else { + false + }; + + // Each time a new user is added, we take the opportunity to clean up old rate limits. + if is_new_user { + self.garbage_collect(self.max_idle); + } + + allow + } + + fn garbage_collect(&mut self, max_idle: Duration) { + let now = Instant::now(); + self.users.retain(|_, bucket| now.duration_since(bucket.last_refill) < max_idle); + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::payment::asynchronous::rate_limiter::RateLimiter; + + #[test] + fn rate_limiter_test() { + // Test + let mut rate_limiter = + RateLimiter::new(3, Duration::from_millis(100), Duration::from_secs(1)); + + assert!(rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user1")); + assert!(!rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user2")); + + std::thread::sleep(Duration::from_millis(150)); + + assert!(rate_limiter.allow(b"user1")); + assert!(rate_limiter.allow(b"user2")); + } +} diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs new file mode 100644 index 000000000..45125cfee --- /dev/null +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -0,0 +1,309 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Store implementation for [`StaticInvoice`]s. + +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use lightning::blinded_path::message::BlindedMessagePath; +use lightning::impl_writeable_tlv_based; +use lightning::offers::static_invoice::StaticInvoice; +use lightning::util::persist::KVStoreSync; +use lightning::util::ser::{Readable, Writeable}; + +use crate::hex_utils; +use crate::io::STATIC_INVOICE_STORE_PRIMARY_NAMESPACE; +use crate::payment::asynchronous::rate_limiter::RateLimiter; +use crate::types::DynStore; + +struct PersistedStaticInvoice { + invoice: StaticInvoice, + request_path: BlindedMessagePath, +} + +impl_writeable_tlv_based!(PersistedStaticInvoice, { + (0, invoice, required), + (2, request_path, required) +}); + +pub(crate) struct StaticInvoiceStore { + kv_store: Arc, + request_rate_limiter: Mutex, + persist_rate_limiter: Mutex, +} + +impl StaticInvoiceStore { + const RATE_LIMITER_BUCKET_CAPACITY: u32 = 5; + const RATE_LIMITER_REFILL_INTERVAL: Duration = Duration::from_millis(100); + const RATE_LIMITER_MAX_IDLE: Duration = Duration::from_secs(600); + + pub(crate) fn new(kv_store: Arc) -> Self { + Self { + kv_store, + request_rate_limiter: Mutex::new(RateLimiter::new( + Self::RATE_LIMITER_BUCKET_CAPACITY, + Self::RATE_LIMITER_REFILL_INTERVAL, + Self::RATE_LIMITER_MAX_IDLE, + )), + persist_rate_limiter: Mutex::new(RateLimiter::new( + Self::RATE_LIMITER_BUCKET_CAPACITY, + Self::RATE_LIMITER_REFILL_INTERVAL, + Self::RATE_LIMITER_MAX_IDLE, + )), + } + } + + fn check_rate_limit( + limiter: &Mutex, recipient_id: &[u8], + ) -> Result<(), lightning::io::Error> { + let mut limiter = limiter.lock().unwrap(); + if !limiter.allow(recipient_id) { + Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, "Rate limit exceeded")) + } else { + Ok(()) + } + } + + pub(crate) async fn handle_static_invoice_requested( + &self, recipient_id: &[u8], invoice_slot: u16, + ) -> Result, lightning::io::Error> { + Self::check_rate_limit(&self.request_rate_limiter, &recipient_id)?; + + let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, recipient_id); + + KVStoreSync::read( + &*self.kv_store, + STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, + &secondary_namespace, + &key, + ) + .and_then(|data| { + PersistedStaticInvoice::read(&mut &*data) + .map(|persisted_invoice| { + Some((persisted_invoice.invoice, persisted_invoice.request_path)) + }) + .map_err(|e| { + lightning::io::Error::new( + lightning::io::ErrorKind::InvalidData, + format!("Failed to parse static invoice: {:?}", e), + ) + }) + }) + .or_else( + |e| { + if e.kind() == lightning::io::ErrorKind::NotFound { + Ok(None) + } else { + Err(e) + } + }, + ) + } + + pub(crate) async fn handle_persist_static_invoice( + &self, invoice: StaticInvoice, invoice_request_path: BlindedMessagePath, invoice_slot: u16, + recipient_id: Vec, + ) -> Result<(), lightning::io::Error> { + Self::check_rate_limit(&self.persist_rate_limiter, &recipient_id)?; + + let (secondary_namespace, key) = Self::get_storage_location(invoice_slot, &recipient_id); + + let persisted_invoice = + PersistedStaticInvoice { invoice, request_path: invoice_request_path }; + + let mut buf = Vec::new(); + persisted_invoice.write(&mut buf)?; + + // Static invoices will be persisted at "static_invoices//". + // + // Example: static_invoices/039058c6f2c0cb492c533b0a4d14ef77cc0f78abccced5287d84a1a2011cfb81/00001 + KVStoreSync::write( + &*self.kv_store, + STATIC_INVOICE_STORE_PRIMARY_NAMESPACE, + &secondary_namespace, + &key, + buf, + ) + } + + fn get_storage_location(invoice_slot: u16, recipient_id: &[u8]) -> (String, String) { + let hash = Sha256::hash(recipient_id).to_byte_array(); + let secondary_namespace = hex_utils::to_string(&hash); + + let key = format!("{:05}", invoice_slot); + (secondary_namespace, key) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::time::Duration; + + use bitcoin::key::{Keypair, Secp256k1}; + use bitcoin::secp256k1::{PublicKey, SecretKey}; + use lightning::blinded_path::message::BlindedMessagePath; + use lightning::blinded_path::payment::{BlindedPayInfo, BlindedPaymentPath}; + use lightning::blinded_path::BlindedHop; + use lightning::ln::inbound_payment::ExpandedKey; + use lightning::offers::nonce::Nonce; + use lightning::offers::offer::OfferBuilder; + use lightning::offers::static_invoice::{StaticInvoice, StaticInvoiceBuilder}; + use lightning::sign::EntropySource; + use lightning_types::features::BlindedHopFeatures; + + use crate::io::test_utils::InMemoryStore; + use crate::payment::asynchronous::static_invoice_store::StaticInvoiceStore; + use crate::types::DynStore; + + #[tokio::test] + async fn static_invoice_store_test() { + let store: Arc = Arc::new(InMemoryStore::new()); + let static_invoice_store = StaticInvoiceStore::new(Arc::clone(&store)); + + let static_invoice = invoice(); + let recipient_id = vec![1, 1, 1]; + let invoice_request_path = blinded_path(); + assert!(static_invoice_store + .handle_persist_static_invoice( + static_invoice.clone(), + invoice_request_path.clone(), + 0, + recipient_id.clone() + ) + .await + .is_ok()); + + let requested_invoice = + static_invoice_store.handle_static_invoice_requested(&recipient_id, 0).await.unwrap(); + + assert_eq!(requested_invoice.unwrap(), (static_invoice, invoice_request_path)); + + assert!(static_invoice_store + .handle_static_invoice_requested(&recipient_id, 1) + .await + .unwrap() + .is_none()); + + assert!(static_invoice_store + .handle_static_invoice_requested(&[2, 2, 2], 0) + .await + .unwrap() + .is_none()); + } + + fn invoice() -> StaticInvoice { + let node_id = recipient_pubkey(); + let payment_paths = payment_paths(); + let now = now(); + let expanded_key = ExpandedKey::new([42; 32]); + let entropy = FixedEntropy {}; + let nonce = Nonce::from_entropy_source(&entropy); + let secp_ctx = Secp256k1::new(); + + let offer = OfferBuilder::deriving_signing_pubkey(node_id, &expanded_key, nonce, &secp_ctx) + .path(blinded_path()) + .build() + .unwrap(); + + StaticInvoiceBuilder::for_offer_using_derived_keys( + &offer, + payment_paths.clone(), + vec![blinded_path()], + now, + &expanded_key, + nonce, + &secp_ctx, + ) + .unwrap() + .build_and_sign(&secp_ctx) + .unwrap() + } + + fn now() -> Duration { + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH") + } + + fn payment_paths() -> Vec { + vec![ + BlindedPaymentPath::from_blinded_path_and_payinfo( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 43] }, + BlindedHop { blinded_node_id: pubkey(44), encrypted_payload: vec![0; 44] }, + ], + BlindedPayInfo { + fee_base_msat: 1, + fee_proportional_millionths: 1_000, + cltv_expiry_delta: 42, + htlc_minimum_msat: 100, + htlc_maximum_msat: 1_000_000_000_000, + features: BlindedHopFeatures::empty(), + }, + ), + BlindedPaymentPath::from_blinded_path_and_payinfo( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(45), encrypted_payload: vec![0; 45] }, + BlindedHop { blinded_node_id: pubkey(46), encrypted_payload: vec![0; 46] }, + ], + BlindedPayInfo { + fee_base_msat: 1, + fee_proportional_millionths: 1_000, + cltv_expiry_delta: 42, + htlc_minimum_msat: 100, + htlc_maximum_msat: 1_000_000_000_000, + features: BlindedHopFeatures::empty(), + }, + ), + ] + } + + fn blinded_path() -> BlindedMessagePath { + BlindedMessagePath::from_blinded_path( + pubkey(40), + pubkey(41), + vec![ + BlindedHop { blinded_node_id: pubkey(42), encrypted_payload: vec![0; 43] }, + BlindedHop { blinded_node_id: pubkey(43), encrypted_payload: vec![0; 44] }, + ], + ) + } + + fn pubkey(byte: u8) -> PublicKey { + let secp_ctx = Secp256k1::new(); + PublicKey::from_secret_key(&secp_ctx, &privkey(byte)) + } + + fn privkey(byte: u8) -> SecretKey { + SecretKey::from_slice(&[byte; 32]).unwrap() + } + + fn recipient_keys() -> Keypair { + let secp_ctx = Secp256k1::new(); + Keypair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap()) + } + + fn recipient_pubkey() -> PublicKey { + recipient_keys().public_key() + } + + struct FixedEntropy; + + impl EntropySource for FixedEntropy { + fn get_secure_random_bytes(&self) -> [u8; 32] { + [42; 32] + } + } +} diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index 1df418bb9..eda349774 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -9,76 +9,43 @@ //! //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +use std::sync::{Arc, RwLock}; + +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use lightning::ln::channelmanager::{ + Bolt11InvoiceParameters, Bolt11PaymentError, PaymentId, Retry, RetryableSendFailure, +}; +use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; +use lightning_invoice::{ + Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescription as LdkBolt11InvoiceDescription, +}; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; + use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::connection::ConnectionManager; use crate::data_store::DataStoreUpdateResult; use crate::error::Error; +use crate::ffi::{maybe_deref, maybe_try_convert_enum, maybe_wrap}; use crate::liquidity::LiquiditySource; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{ LSPFeeLimits, PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, }; -use crate::payment::SendingParameters; use crate::peer_store::{PeerInfo, PeerStore}; +use crate::runtime::Runtime; use crate::types::{ChannelManager, PaymentStore}; -use lightning::ln::bolt11_payment; -use lightning::ln::channelmanager::{ - Bolt11InvoiceParameters, PaymentId, RecipientOnionFields, Retry, RetryableSendFailure, -}; -use lightning::routing::router::{PaymentParameters, RouteParameters}; - -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - -use lightning_invoice::Bolt11Invoice as LdkBolt11Invoice; -use lightning_invoice::Bolt11InvoiceDescription as LdkBolt11InvoiceDescription; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; - -use std::sync::{Arc, RwLock}; - #[cfg(not(feature = "uniffi"))] type Bolt11Invoice = LdkBolt11Invoice; #[cfg(feature = "uniffi")] -type Bolt11Invoice = Arc; - -#[cfg(not(feature = "uniffi"))] -pub(crate) fn maybe_wrap_invoice(invoice: LdkBolt11Invoice) -> Bolt11Invoice { - invoice -} -#[cfg(feature = "uniffi")] -pub(crate) fn maybe_wrap_invoice(invoice: LdkBolt11Invoice) -> Bolt11Invoice { - Arc::new(invoice.into()) -} - -#[cfg(not(feature = "uniffi"))] -pub fn maybe_convert_invoice(invoice: &Bolt11Invoice) -> &LdkBolt11Invoice { - invoice -} -#[cfg(feature = "uniffi")] -pub fn maybe_convert_invoice(invoice: &Bolt11Invoice) -> &LdkBolt11Invoice { - &invoice.inner -} +type Bolt11Invoice = Arc; #[cfg(not(feature = "uniffi"))] type Bolt11InvoiceDescription = LdkBolt11InvoiceDescription; #[cfg(feature = "uniffi")] -type Bolt11InvoiceDescription = crate::uniffi_types::Bolt11InvoiceDescription; - -macro_rules! maybe_convert_description { - ($description: expr) => {{ - #[cfg(not(feature = "uniffi"))] - { - $description - } - #[cfg(feature = "uniffi")] - { - &LdkBolt11InvoiceDescription::try_from($description)? - } - }}; -} +type Bolt11InvoiceDescription = crate::ffi::Bolt11InvoiceDescription; /// A payment handler allowing to create and pay [BOLT 11] invoices. /// @@ -87,24 +54,24 @@ macro_rules! maybe_convert_description { /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [`Node::bolt11_payment`]: crate::Node::bolt11_payment pub struct Bolt11Payment { - runtime: Arc>>>, + runtime: Arc, channel_manager: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>>, config: Arc, + is_running: Arc>, logger: Arc, } impl Bolt11Payment { pub(crate) fn new( - runtime: Arc>>>, - channel_manager: Arc, + runtime: Arc, channel_manager: Arc, connection_manager: Arc>>, liquidity_source: Option>>>, payment_store: Arc, peer_store: Arc>>, - config: Arc, logger: Arc, + config: Arc, is_running: Arc>, logger: Arc, ) -> Self { Self { runtime, @@ -114,28 +81,24 @@ impl Bolt11Payment { payment_store, peer_store, config, + is_running, logger, } } /// Send a payment given an invoice. /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( - &self, invoice: &Bolt11Invoice, sending_parameters: Option, + &self, invoice: &Bolt11Invoice, route_parameters: Option, ) -> Result { - let invoice = maybe_convert_invoice(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let (payment_hash, recipient_onion, mut route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { - log_error!(self.logger, "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead."); - Error::InvalidInvoice - })?; - + let invoice = maybe_deref(invoice); + let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); let payment_id = PaymentId(invoice.payment_hash().to_byte_array()); if let Some(payment) = self.payment_store.get(&payment_id) { if payment.status == PaymentStatus::Pending @@ -146,29 +109,16 @@ impl Bolt11Payment { } } - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; - - let payment_secret = Some(*invoice.payment_secret()); + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); + let payment_secret = Some(*invoice.payment_secret()); - match self.channel_manager.send_payment( - payment_hash, - recipient_onion, + match self.channel_manager.pay_for_bolt11_invoice( + invoice, payment_id, - route_params, + None, + route_parameters, retry_strategy, ) { Ok(()) => { @@ -195,7 +145,13 @@ impl Bolt11Payment { Ok(payment_id) }, - Err(e) => { + Err(Bolt11PaymentError::InvalidAmount) => { + log_error!(self.logger, + "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead." + ); + return Err(Error::InvalidInvoice); + }, + Err(Bolt11PaymentError::SendingFailed(e)) => { log_error!(self.logger, "Failed to send payment: {:?}", e); match e { RetryableSendFailure::DuplicatePayment => Err(Error::DuplicatePayment), @@ -230,18 +186,17 @@ impl Bolt11Payment { /// This can be used to pay a so-called "zero-amount" invoice, i.e., an invoice that leaves the /// amount paid to be determined by the user. /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, - sending_parameters: Option, + route_parameters: Option, ) -> Result { - let invoice = maybe_convert_invoice(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let invoice = maybe_deref(invoice); if let Some(invoice_amount_msat) = invoice.amount_milli_satoshis() { if amount_msat < invoice_amount_msat { log_error!( @@ -262,46 +217,16 @@ impl Bolt11Payment { } } - let payment_secret = invoice.payment_secret(); - let expiry_time = invoice.duration_since_epoch().saturating_add(invoice.expiry_time()); - let mut payment_params = PaymentParameters::from_node_id( - invoice.recover_payee_pub_key(), - invoice.min_final_cltv_expiry_delta() as u32, - ) - .with_expiry_time(expiry_time.as_secs()) - .with_route_hints(invoice.route_hints()) - .map_err(|_| Error::InvalidInvoice)?; - if let Some(features) = invoice.features() { - payment_params = payment_params - .with_bolt11_features(features.clone()) - .map_err(|_| Error::InvalidInvoice)?; - } - let mut route_params = - RouteParameters::from_payment_params_and_value(payment_params, amount_msat); - - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; - + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let recipient_fields = RecipientOnionFields::secret_only(*payment_secret); + let payment_secret = Some(*invoice.payment_secret()); - match self.channel_manager.send_payment( - payment_hash, - recipient_fields, + match self.channel_manager.pay_for_bolt11_invoice( + invoice, payment_id, - route_params, + Some(amount_msat), + route_parameters, retry_strategy, ) { Ok(()) => { @@ -316,8 +241,8 @@ impl Bolt11Payment { let kind = PaymentKind::Bolt11 { hash: payment_hash, preimage: None, - secret: Some(*payment_secret), bolt11_invoice: Some(invoice.to_string()), + secret: payment_secret, }; let payment = PaymentDetails::new( @@ -333,16 +258,22 @@ impl Bolt11Payment { Ok(payment_id) }, - Err(e) => { + Err(Bolt11PaymentError::InvalidAmount) => { + log_error!( + self.logger, + "Failed to send payment due to amount given being insufficient." + ); + return Err(Error::InvalidInvoice); + }, + Err(Bolt11PaymentError::SendingFailed(e)) => { log_error!(self.logger, "Failed to send payment: {:?}", e); - match e { RetryableSendFailure::DuplicatePayment => Err(Error::DuplicatePayment), _ => { let kind = PaymentKind::Bolt11 { hash: payment_hash, preimage: None, - secret: Some(*payment_secret), + secret: payment_secret, bolt11_invoice: Some(invoice.to_string()), }; let payment = PaymentDetails::new( @@ -355,7 +286,6 @@ impl Bolt11Payment { ); self.payment_store.insert(payment)?; - Err(Error::PaymentSendingFailed) }, } @@ -396,8 +326,17 @@ impl Bolt11Payment { } if let Some(details) = self.payment_store.get(&payment_id) { - if let Some(expected_amount_msat) = details.amount_msat { - if claimable_amount_msat < expected_amount_msat { + // For payments requested via `receive*_via_jit_channel_for_hash()` + // `skimmed_fee_msat` held by LSP must be taken into account. + let skimmed_fee_msat = match details.kind { + PaymentKind::Bolt11Jit { + counterparty_skimmed_fee_msat: Some(skimmed_fee_msat), + .. + } => skimmed_fee_msat, + _ => 0, + }; + if let Some(invoice_amount_msat) = details.amount_msat { + if claimable_amount_msat < invoice_amount_msat - skimmed_fee_msat { log_error!( self.logger, "Failed to manually claim payment {} as the claimable amount is less than expected", @@ -472,9 +411,9 @@ impl Bolt11Payment { pub fn receive( &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, ) -> Result { - let description = maybe_convert_description!(description); - let invoice = self.receive_inner(Some(amount_msat), description, expiry_secs, None)?; - Ok(maybe_wrap_invoice(invoice)) + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_inner(Some(amount_msat), &description, expiry_secs, None)?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request a payment of the amount @@ -495,10 +434,10 @@ impl Bolt11Payment { &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, payment_hash: PaymentHash, ) -> Result { - let description = maybe_convert_description!(description); + let description = maybe_try_convert_enum(description)?; let invoice = - self.receive_inner(Some(amount_msat), description, expiry_secs, Some(payment_hash))?; - Ok(maybe_wrap_invoice(invoice)) + self.receive_inner(Some(amount_msat), &description, expiry_secs, Some(payment_hash))?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request and receive a payment for which the @@ -508,9 +447,9 @@ impl Bolt11Payment { pub fn receive_variable_amount( &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, ) -> Result { - let description = maybe_convert_description!(description); - let invoice = self.receive_inner(None, description, expiry_secs, None)?; - Ok(maybe_wrap_invoice(invoice)) + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_inner(None, &description, expiry_secs, None)?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request a payment for the given payment hash @@ -530,9 +469,9 @@ impl Bolt11Payment { pub fn receive_variable_amount_for_hash( &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, payment_hash: PaymentHash, ) -> Result { - let description = maybe_convert_description!(description); - let invoice = self.receive_inner(None, description, expiry_secs, Some(payment_hash))?; - Ok(maybe_wrap_invoice(invoice)) + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_inner(None, &description, expiry_secs, Some(payment_hash))?; + Ok(maybe_wrap(invoice)) } pub(crate) fn receive_inner( @@ -609,15 +548,55 @@ impl Bolt11Payment { &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, max_total_lsp_fee_limit_msat: Option, ) -> Result { - let description = maybe_convert_description!(description); + let description = maybe_try_convert_enum(description)?; let invoice = self.receive_via_jit_channel_inner( Some(amount_msat), - description, + &description, expiry_secs, max_total_lsp_fee_limit_msat, None, + None, )?; - Ok(maybe_wrap_invoice(invoice)) + Ok(maybe_wrap(invoice)) + } + + /// Returns a payable invoice that can be used to request a payment of the amount given and + /// receive it via a newly created just-in-time (JIT) channel. + /// + /// When the returned invoice is paid, the configured [LSPS2]-compliant LSP will open a channel + /// to us, supplying just-in-time inbound liquidity. + /// + /// If set, `max_total_lsp_fee_limit_msat` will limit how much fee we allow the LSP to take for opening the + /// channel to us. We'll use its cheapest offer otherwise. + /// + /// We will register the given payment hash and emit a [`PaymentClaimable`] event once + /// the inbound payment arrives. The check that [`counterparty_skimmed_fee_msat`] is within the limits + /// is performed *before* emitting the event. + /// + /// **Note:** users *MUST* handle this event and claim the payment manually via + /// [`claim_for_hash`] as soon as they have obtained access to the preimage of the given + /// payment hash. If they're unable to obtain the preimage, they *MUST* immediately fail the payment via + /// [`fail_for_hash`]. + /// + /// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md + /// [`PaymentClaimable`]: crate::Event::PaymentClaimable + /// [`claim_for_hash`]: Self::claim_for_hash + /// [`fail_for_hash`]: Self::fail_for_hash + /// [`counterparty_skimmed_fee_msat`]: crate::payment::PaymentKind::Bolt11Jit::counterparty_skimmed_fee_msat + pub fn receive_via_jit_channel_for_hash( + &self, amount_msat: u64, description: &Bolt11InvoiceDescription, expiry_secs: u32, + max_total_lsp_fee_limit_msat: Option, payment_hash: PaymentHash, + ) -> Result { + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_via_jit_channel_inner( + Some(amount_msat), + &description, + expiry_secs, + max_total_lsp_fee_limit_msat, + None, + Some(payment_hash), + )?; + Ok(maybe_wrap(invoice)) } /// Returns a payable invoice that can be used to request a variable amount payment (also known @@ -635,21 +614,62 @@ impl Bolt11Payment { &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, max_proportional_lsp_fee_limit_ppm_msat: Option, ) -> Result { - let description = maybe_convert_description!(description); + let description = maybe_try_convert_enum(description)?; let invoice = self.receive_via_jit_channel_inner( None, - description, + &description, expiry_secs, None, max_proportional_lsp_fee_limit_ppm_msat, + None, )?; - Ok(maybe_wrap_invoice(invoice)) + Ok(maybe_wrap(invoice)) + } + + /// Returns a payable invoice that can be used to request a variable amount payment (also known + /// as "zero-amount" invoice) and receive it via a newly created just-in-time (JIT) channel. + /// + /// When the returned invoice is paid, the configured [LSPS2]-compliant LSP will open a channel + /// to us, supplying just-in-time inbound liquidity. + /// + /// If set, `max_proportional_lsp_fee_limit_ppm_msat` will limit how much proportional fee, in + /// parts-per-million millisatoshis, we allow the LSP to take for opening the channel to us. + /// We'll use its cheapest offer otherwise. + /// + /// We will register the given payment hash and emit a [`PaymentClaimable`] event once + /// the inbound payment arrives. The check that [`counterparty_skimmed_fee_msat`] is within the limits + /// is performed *before* emitting the event. + /// + /// **Note:** users *MUST* handle this event and claim the payment manually via + /// [`claim_for_hash`] as soon as they have obtained access to the preimage of the given + /// payment hash. If they're unable to obtain the preimage, they *MUST* immediately fail the payment via + /// [`fail_for_hash`]. + /// + /// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md + /// [`PaymentClaimable`]: crate::Event::PaymentClaimable + /// [`claim_for_hash`]: Self::claim_for_hash + /// [`fail_for_hash`]: Self::fail_for_hash + /// [`counterparty_skimmed_fee_msat`]: crate::payment::PaymentKind::Bolt11Jit::counterparty_skimmed_fee_msat + pub fn receive_variable_amount_via_jit_channel_for_hash( + &self, description: &Bolt11InvoiceDescription, expiry_secs: u32, + max_proportional_lsp_fee_limit_ppm_msat: Option, payment_hash: PaymentHash, + ) -> Result { + let description = maybe_try_convert_enum(description)?; + let invoice = self.receive_via_jit_channel_inner( + None, + &description, + expiry_secs, + None, + max_proportional_lsp_fee_limit_ppm_msat, + Some(payment_hash), + )?; + Ok(maybe_wrap(invoice)) } fn receive_via_jit_channel_inner( &self, amount_msat: Option, description: &LdkBolt11InvoiceDescription, expiry_secs: u32, max_total_lsp_fee_limit_msat: Option, - max_proportional_lsp_fee_limit_ppm_msat: Option, + max_proportional_lsp_fee_limit_ppm_msat: Option, payment_hash: Option, ) -> Result { let liquidity_source = self.liquidity_source.as_ref().ok_or(Error::LiquiditySourceUnavailable)?; @@ -657,9 +677,6 @@ impl Bolt11Payment { let (node_id, address) = liquidity_source.get_lsps2_lsp_details().ok_or(Error::LiquiditySourceUnavailable)?; - let rt_lock = self.runtime.read().unwrap(); - let runtime = rt_lock.as_ref().unwrap(); - let peer_info = PeerInfo { node_id, address }; let con_node_id = peer_info.node_id; @@ -668,39 +685,37 @@ impl Bolt11Payment { // We need to use our main runtime here as a local runtime might not be around to poll // connection futures going forward. - tokio::task::block_in_place(move || { - runtime.block_on(async move { - con_cm.connect_peer_if_necessary(con_node_id, con_addr).await - }) + self.runtime.block_on(async move { + con_cm.connect_peer_if_necessary(con_node_id, con_addr).await })?; log_info!(self.logger, "Connected to LSP {}@{}. ", peer_info.node_id, peer_info.address); let liquidity_source = Arc::clone(&liquidity_source); let (invoice, lsp_total_opening_fee, lsp_prop_opening_fee) = - tokio::task::block_in_place(move || { - runtime.block_on(async move { - if let Some(amount_msat) = amount_msat { - liquidity_source - .lsps2_receive_to_jit_channel( - amount_msat, - description, - expiry_secs, - max_total_lsp_fee_limit_msat, - ) - .await - .map(|(invoice, total_fee)| (invoice, Some(total_fee), None)) - } else { - liquidity_source - .lsps2_receive_variable_amount_to_jit_channel( - description, - expiry_secs, - max_proportional_lsp_fee_limit_ppm_msat, - ) - .await - .map(|(invoice, prop_fee)| (invoice, None, Some(prop_fee))) - } - }) + self.runtime.block_on(async move { + if let Some(amount_msat) = amount_msat { + liquidity_source + .lsps2_receive_to_jit_channel( + amount_msat, + description, + expiry_secs, + max_total_lsp_fee_limit_msat, + payment_hash, + ) + .await + .map(|(invoice, total_fee)| (invoice, Some(total_fee), None)) + } else { + liquidity_source + .lsps2_receive_variable_amount_to_jit_channel( + description, + expiry_secs, + max_proportional_lsp_fee_limit_ppm_msat, + payment_hash, + ) + .await + .map(|(invoice, prop_fee)| (invoice, None, Some(prop_fee))) + } })?; // Register payment in payment store. @@ -750,18 +765,41 @@ impl Bolt11Payment { /// payment. To mitigate this issue, channels with available liquidity less than the required /// amount times [`Config::probing_liquidity_limit_multiplier`] won't be used to send /// pre-flight probes. - pub fn send_probes(&self, invoice: &Bolt11Invoice) -> Result<(), Error> { - let invoice = maybe_convert_invoice(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + pub fn send_probes( + &self, invoice: &Bolt11Invoice, route_parameters: Option, + ) -> Result<(), Error> { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let (_payment_hash, _recipient_onion, route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let invoice = maybe_deref(invoice); + let payment_params = PaymentParameters::from_bolt11_invoice(invoice); + + let amount_msat = invoice.amount_milli_satoshis().ok_or_else(|| { log_error!(self.logger, "Failed to send probes due to the given invoice being \"zero-amount\". Please use send_probes_using_amount instead."); Error::InvalidInvoice })?; + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + + if let Some(RouteParametersConfig { + max_total_routing_fee_msat, + max_total_cltv_expiry_delta, + max_path_count, + max_channel_saturation_power_of_half, + }) = route_parameters.as_ref().or(self.config.route_parameters.as_ref()) + { + route_params.max_total_routing_fee_msat = *max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = *max_total_cltv_expiry_delta; + route_params.payment_params.max_path_count = *max_path_count; + route_params.payment_params.max_channel_saturation_power_of_half = + *max_channel_saturation_power_of_half; + } + let liquidity_limit_multiplier = Some(self.config.probing_liquidity_limit_multiplier); self.channel_manager @@ -780,36 +818,49 @@ impl Bolt11Payment { /// This can be used to send pre-flight probes for a so-called "zero-amount" invoice, i.e., an /// invoice that leaves the amount paid to be determined by the user. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// See [`Self::send_probes`] for more information. pub fn send_probes_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, + route_parameters: Option, ) -> Result<(), Error> { - let invoice = maybe_convert_invoice(invoice); - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let (_payment_hash, _recipient_onion, route_params) = if let Some(invoice_amount_msat) = - invoice.amount_milli_satoshis() - { + let invoice = maybe_deref(invoice); + let payment_params = PaymentParameters::from_bolt11_invoice(invoice); + + if let Some(invoice_amount_msat) = invoice.amount_milli_satoshis() { if amount_msat < invoice_amount_msat { log_error!( self.logger, - "Failed to send probes as the given amount needs to be at least the invoice amount: required {}msat, gave {}msat.", invoice_amount_msat, amount_msat); + "Failed to send probes as the given amount needs to be at least the invoice amount: required {}msat, gave {}msat.", + invoice_amount_msat, + amount_msat + ); return Err(Error::InvalidAmount); } + } - bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { - log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being \"zero-amount\"."); - Error::InvalidInvoice - })? - } else { - bolt11_payment::payment_parameters_from_variable_amount_invoice(&invoice, amount_msat).map_err(|_| { - log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being not \"zero-amount\"."); - Error::InvalidInvoice - })? - }; + let mut route_params = + RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + + if let Some(RouteParametersConfig { + max_total_routing_fee_msat, + max_total_cltv_expiry_delta, + max_path_count, + max_channel_saturation_power_of_half, + }) = route_parameters.as_ref().or(self.config.route_parameters.as_ref()) + { + route_params.max_total_routing_fee_msat = *max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = *max_total_cltv_expiry_delta; + route_params.payment_params.max_path_count = *max_path_count; + route_params.payment_params.max_channel_saturation_power_of_half = + *max_channel_saturation_power_of_half; + } let liquidity_limit_multiplier = Some(self.config.probing_liquidity_limit_multiplier); diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 8006f4bb9..0dd38edca 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -9,24 +9,41 @@ //! //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -use crate::config::LDK_PAYMENT_RETRY_TIMEOUT; +use std::num::NonZeroU64; +use std::sync::{Arc, RwLock}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use lightning::blinded_path::message::BlindedMessagePath; +use lightning::ln::channelmanager::{OptionalOfferPaymentParams, PaymentId, Retry}; +use lightning::offers::offer::{Amount, Offer as LdkOffer, Quantity}; +use lightning::offers::parse::Bolt12SemanticError; +use lightning::routing::router::RouteParametersConfig; +#[cfg(feature = "uniffi")] +use lightning::util::ser::{Readable, Writeable}; +use lightning_types::string::UntrustedString; +use rand::RngCore; + +use crate::config::{AsyncPaymentsRole, Config, LDK_PAYMENT_RETRY_TIMEOUT}; use crate::error::Error; +use crate::ffi::{maybe_deref, maybe_wrap}; use crate::logger::{log_error, log_info, LdkLogger, Logger}; use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; use crate::types::{ChannelManager, PaymentStore}; -use lightning::ln::channelmanager::{PaymentId, Retry}; -use lightning::offers::invoice::Bolt12Invoice; -use lightning::offers::offer::{Amount, Offer, Quantity}; -use lightning::offers::parse::Bolt12SemanticError; -use lightning::offers::refund::Refund; -use lightning::util::string::UntrustedString; +#[cfg(not(feature = "uniffi"))] +type Bolt12Invoice = lightning::offers::invoice::Bolt12Invoice; +#[cfg(feature = "uniffi")] +type Bolt12Invoice = Arc; -use rand::RngCore; +#[cfg(not(feature = "uniffi"))] +type Offer = LdkOffer; +#[cfg(feature = "uniffi")] +type Offer = Arc; -use std::num::NonZeroU64; -use std::sync::{Arc, RwLock}; -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +#[cfg(not(feature = "uniffi"))] +type Refund = lightning::offers::refund::Refund; +#[cfg(feature = "uniffi")] +type Refund = Arc; /// A payment handler allowing to create and pay [BOLT 12] offers and refunds. /// @@ -35,19 +52,21 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [`Node::bolt12_payment`]: crate::Node::bolt12_payment pub struct Bolt12Payment { - runtime: Arc>>>, channel_manager: Arc, payment_store: Arc, + config: Arc, + is_running: Arc>, logger: Arc, + async_payments_role: Option, } impl Bolt12Payment { pub(crate) fn new( - runtime: Arc>>>, channel_manager: Arc, payment_store: Arc, - logger: Arc, + config: Arc, is_running: Arc>, logger: Arc, + async_payments_role: Option, ) -> Self { - Self { runtime, channel_manager, payment_store, logger } + Self { channel_manager, payment_store, config, is_running, logger, async_payments_role } } /// Send a payment given an offer. @@ -56,18 +75,25 @@ impl Bolt12Payment { /// response. /// /// If `quantity` is `Some` it represents the number of items requested. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( &self, offer: &Offer, quantity: Option, payer_note: Option, + route_parameters: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + + let offer = maybe_deref(offer); + let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let max_total_routing_fee_msat = None; + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -81,15 +107,19 @@ impl Bolt12Payment { }, }; - match self.channel_manager.pay_for_offer( - &offer, - quantity, - None, - payer_note.clone(), - payment_id, + let params = OptionalOfferPaymentParams { + payer_note: payer_note.clone(), retry_strategy, - max_total_routing_fee_msat, - ) { + route_params_config: route_parameters, + }; + let res = if let Some(quantity) = quantity { + self.channel_manager + .pay_for_offer_with_quantity(&offer, None, payment_id, params, quantity) + } else { + self.channel_manager.pay_for_offer(&offer, None, payment_id, params) + }; + + match res { Ok(()) => { let payee_pubkey = offer.issuer_signing_pubkey(); log_info!( @@ -157,19 +187,25 @@ impl Bolt12Payment { /// /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send_using_amount( &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, + route_parameters: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } + let offer = maybe_deref(offer); + let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let max_total_routing_fee_msat = None; + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let offer_amount_msat = match offer.amount() { Some(Amount::Bitcoin { amount_msats }) => amount_msats, @@ -187,15 +223,24 @@ impl Bolt12Payment { return Err(Error::InvalidAmount); } - match self.channel_manager.pay_for_offer( - &offer, - quantity, - Some(amount_msat), - payer_note.clone(), - payment_id, + let params = OptionalOfferPaymentParams { + payer_note: payer_note.clone(), retry_strategy, - max_total_routing_fee_msat, - ) { + route_params_config: route_parameters, + }; + let res = if let Some(quantity) = quantity { + self.channel_manager.pay_for_offer_with_quantity( + &offer, + Some(amount_msat), + payment_id, + params, + quantity, + ) + } else { + self.channel_manager.pay_for_offer(&offer, Some(amount_msat), payment_id, params) + }; + + match res { Ok(()) => { let payee_pubkey = offer.issuer_signing_pubkey(); log_info!( @@ -254,22 +299,20 @@ impl Bolt12Payment { } } - /// Returns a payable offer that can be used to request and receive a payment of the amount - /// given. - pub fn receive( + pub(crate) fn receive_inner( &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, - ) -> Result { - let absolute_expiry = expiry_secs.map(|secs| { - (SystemTime::now() + Duration::from_secs(secs as u64)) - .duration_since(UNIX_EPOCH) - .unwrap() - }); + ) -> Result { + let mut offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; - let offer_builder = - self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; + if let Some(expiry_secs) = expiry_secs { + let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) + .duration_since(UNIX_EPOCH) + .unwrap(); + offer_builder = offer_builder.absolute_expiry(absolute_expiry); + } let mut offer = offer_builder.amount_msats(amount_msat).description(description.to_string()); @@ -291,36 +334,54 @@ impl Bolt12Payment { Ok(finalized_offer) } + /// Returns a payable offer that can be used to request and receive a payment of the amount + /// given. + pub fn receive( + &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, + ) -> Result { + let offer = self.receive_inner(amount_msat, description, expiry_secs, quantity)?; + Ok(maybe_wrap(offer)) + } + /// Returns a payable offer that can be used to request and receive a payment for which the /// amount is to be determined by the user, also known as a "zero-amount" offer. pub fn receive_variable_amount( &self, description: &str, expiry_secs: Option, ) -> Result { - let absolute_expiry = expiry_secs.map(|secs| { - (SystemTime::now() + Duration::from_secs(secs as u64)) + let mut offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; + + if let Some(expiry_secs) = expiry_secs { + let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap() - }); + .unwrap(); + offer_builder = offer_builder.absolute_expiry(absolute_expiry); + } - let offer_builder = - self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; let offer = offer_builder.description(description.to_string()).build().map_err(|e| { log_error!(self.logger, "Failed to create offer: {:?}", e); Error::OfferCreationFailed })?; - Ok(offer) + Ok(maybe_wrap(offer)) } /// Requests a refund payment for the given [`Refund`]. /// /// The returned [`Bolt12Invoice`] is for informational purposes only (i.e., isn't needed to /// retrieve the refund). + /// + /// [`Refund`]: lightning::offers::refund::Refund + /// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice pub fn request_refund_payment(&self, refund: &Refund) -> Result { - let invoice = self.channel_manager.request_refund_payment(refund).map_err(|e| { + if !*self.is_running.read().unwrap() { + return Err(Error::NotRunning); + } + + let refund = maybe_deref(refund); + let invoice = self.channel_manager.request_refund_payment(&refund).map_err(|e| { log_error!(self.logger, "Failed to request refund payment: {:?}", e); Error::InvoiceRequestCreationFailed })?; @@ -347,23 +408,29 @@ impl Bolt12Payment { self.payment_store.insert(payment)?; - Ok(invoice) + Ok(maybe_wrap(invoice)) } /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. + /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// + /// [`Refund`]: lightning::offers::refund::Refund pub fn initiate_refund( &self, amount_msat: u64, expiry_secs: u32, quantity: Option, - payer_note: Option, + payer_note: Option, route_parameters: Option, ) -> Result { let mut random_bytes = [0u8; 32]; - rand::thread_rng().fill_bytes(&mut random_bytes); + rand::rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) .unwrap(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); - let max_total_routing_fee_msat = None; + let route_parameters = + route_parameters.or(self.config.route_parameters).unwrap_or_default(); let mut refund_builder = self .channel_manager @@ -372,7 +439,7 @@ impl Bolt12Payment { absolute_expiry, payment_id, retry_strategy, - max_total_routing_fee_msat, + route_parameters, ) .map_err(|e| { log_error!(self.logger, "Failed to create refund builder: {:?}", e); @@ -412,6 +479,104 @@ impl Bolt12Payment { self.payment_store.insert(payment)?; - Ok(refund) + Ok(maybe_wrap(refund)) + } + + /// Retrieve an [`Offer`] for receiving async payments as an often-offline recipient. + /// + /// Will only return an offer if [`Bolt12Payment::set_paths_to_static_invoice_server`] was called and we succeeded + /// in interactively building a [`StaticInvoice`] with the static invoice server. + /// + /// Useful for posting offers to receive payments later, such as posting an offer on a website. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + /// [`Offer`]: lightning::offers::offer::Offer + pub fn receive_async(&self) -> Result { + self.channel_manager + .get_async_receive_offer() + .map(maybe_wrap) + .or(Err(Error::OfferCreationFailed)) + } + + /// Sets the [`BlindedMessagePath`]s that we will use as an async recipient to interactively build [`Offer`]s with a + /// static invoice server, so the server can serve [`StaticInvoice`]s to payers on our behalf when we're offline. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(not(feature = "uniffi"))] + pub fn set_paths_to_static_invoice_server( + &self, paths: Vec, + ) -> Result<(), Error> { + self.channel_manager + .set_paths_to_static_invoice_server(paths) + .or(Err(Error::InvalidBlindedPaths)) + } + + /// Sets the [`BlindedMessagePath`]s that we will use as an async recipient to interactively build [`Offer`]s with a + /// static invoice server, so the server can serve [`StaticInvoice`]s to payers on our behalf when we're offline. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(feature = "uniffi")] + pub fn set_paths_to_static_invoice_server(&self, paths: Vec) -> Result<(), Error> { + let decoded_paths = as Readable>::read(&mut &paths[..]) + .or(Err(Error::InvalidBlindedPaths))?; + + self.channel_manager + .set_paths_to_static_invoice_server(decoded_paths) + .or(Err(Error::InvalidBlindedPaths)) + } + + /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively + /// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(not(feature = "uniffi"))] + pub fn blinded_paths_for_async_recipient( + &self, recipient_id: Vec, + ) -> Result, Error> { + self.blinded_paths_for_async_recipient_internal(recipient_id) + } + + /// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively + /// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments. + /// + /// **Caution**: Async payments support is considered experimental. + /// + /// [`Offer`]: lightning::offers::offer::Offer + /// [`StaticInvoice`]: lightning::offers::static_invoice::StaticInvoice + #[cfg(feature = "uniffi")] + pub fn blinded_paths_for_async_recipient( + &self, recipient_id: Vec, + ) -> Result, Error> { + let paths = self.blinded_paths_for_async_recipient_internal(recipient_id)?; + + let mut bytes = Vec::new(); + paths.write(&mut bytes).or(Err(Error::InvalidBlindedPaths))?; + Ok(bytes) + } + + fn blinded_paths_for_async_recipient_internal( + &self, recipient_id: Vec, + ) -> Result, Error> { + match self.async_payments_role { + Some(AsyncPaymentsRole::Server) => {}, + _ => { + return Err(Error::AsyncPaymentServicesDisabled); + }, + } + + self.channel_manager + .blinded_paths_for_async_recipient(recipient_id, None) + .or(Err(Error::InvalidBlindedPaths)) } } diff --git a/src/payment/mod.rs b/src/payment/mod.rs index b031e37fd..f629960e1 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -7,6 +7,7 @@ //! Objects for different types of payments. +pub(crate) mod asynchronous; mod bolt11; mod bolt12; mod onchain; @@ -22,87 +23,3 @@ pub use store::{ ConfirmationStatus, LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, }; pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; - -/// Represents information used to send a payment. -#[derive(Clone, Debug, PartialEq)] -pub struct SendingParameters { - /// The maximum total fees, in millisatoshi, that may accrue during route finding. - /// - /// This limit also applies to the total fees that may arise while retrying failed payment - /// paths. - /// - /// Note that values below a few sats may result in some paths being spuriously ignored. - #[cfg(not(feature = "uniffi"))] - pub max_total_routing_fee_msat: Option>, - /// The maximum total fees, in millisatoshi, that may accrue during route finding. - /// - /// This limit also applies to the total fees that may arise while retrying failed payment - /// paths. - /// - /// Note that values below a few sats may result in some paths being spuriously ignored. - #[cfg(feature = "uniffi")] - pub max_total_routing_fee_msat: Option, - /// The maximum total CLTV delta we accept for the route. - /// - /// Defaults to [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]. - /// - /// [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]: lightning::routing::router::DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA - pub max_total_cltv_expiry_delta: Option, - /// The maximum number of paths that may be used by (MPP) payments. - /// - /// Defaults to [`DEFAULT_MAX_PATH_COUNT`]. - /// - /// [`DEFAULT_MAX_PATH_COUNT`]: lightning::routing::router::DEFAULT_MAX_PATH_COUNT - pub max_path_count: Option, - /// Selects the maximum share of a channel's total capacity which will be sent over a channel, - /// as a power of 1/2. - /// - /// A higher value prefers to send the payment using more MPP parts whereas - /// a lower value prefers to send larger MPP parts, potentially saturating channels and - /// increasing failure probability for those paths. - /// - /// Note that this restriction will be relaxed during pathfinding after paths which meet this - /// restriction have been found. While paths which meet this criteria will be searched for, it - /// is ultimately up to the scorer to select them over other paths. - /// - /// Examples: - /// - /// | Value | Max Proportion of Channel Capacity Used | - /// |-------|-----------------------------------------| - /// | 0 | Up to 100% of the channel’s capacity | - /// | 1 | Up to 50% of the channel’s capacity | - /// | 2 | Up to 25% of the channel’s capacity | - /// | 3 | Up to 12.5% of the channel’s capacity | - /// - /// Default value: 2 - pub max_channel_saturation_power_of_half: Option, -} - -/// Represents the possible states of [`SendingParameters::max_total_routing_fee_msat`]. -// -// Required only in bindings as UniFFI can't expose `Option>`. -#[cfg(feature = "uniffi")] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum MaxTotalRoutingFeeLimit { - None, - Some { amount_msat: u64 }, -} - -#[cfg(feature = "uniffi")] -impl From for Option { - fn from(value: MaxTotalRoutingFeeLimit) -> Self { - match value { - MaxTotalRoutingFeeLimit::Some { amount_msat } => Some(amount_msat), - MaxTotalRoutingFeeLimit::None => None, - } - } -} - -#[cfg(feature = "uniffi")] -impl From> for MaxTotalRoutingFeeLimit { - fn from(value: Option) -> Self { - value.map_or(MaxTotalRoutingFeeLimit::None, |amount_msat| MaxTotalRoutingFeeLimit::Some { - amount_msat, - }) - } -} diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index 046d66c69..695f96d43 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -7,23 +7,23 @@ //! Holds a payment handler allowing to send and receive on-chain payments. +use std::sync::{Arc, RwLock}; + +use bitcoin::{Address, Txid}; + use crate::config::Config; use crate::error::Error; use crate::logger::{log_info, LdkLogger, Logger}; use crate::types::{ChannelManager, Wallet}; use crate::wallet::OnchainSendAmount; -use bitcoin::{Address, Txid}; - -use std::sync::{Arc, RwLock}; - #[cfg(not(feature = "uniffi"))] type FeeRate = bitcoin::FeeRate; #[cfg(feature = "uniffi")] type FeeRate = Arc; macro_rules! maybe_map_fee_rate_opt { - ($fee_rate_opt: expr) => {{ + ($fee_rate_opt:expr) => {{ #[cfg(not(feature = "uniffi"))] { $fee_rate_opt @@ -41,19 +41,19 @@ macro_rules! maybe_map_fee_rate_opt { /// /// [`Node::onchain_payment`]: crate::Node::onchain_payment pub struct OnchainPayment { - runtime: Arc>>>, wallet: Arc, channel_manager: Arc, config: Arc, + is_running: Arc>, logger: Arc, } impl OnchainPayment { pub(crate) fn new( - runtime: Arc>>>, wallet: Arc, - channel_manager: Arc, config: Arc, logger: Arc, + wallet: Arc, channel_manager: Arc, config: Arc, + is_running: Arc>, logger: Arc, ) -> Self { - Self { runtime, wallet, channel_manager, config, logger } + Self { wallet, channel_manager, config, is_running, logger } } /// Retrieve a new on-chain/funding address. @@ -75,8 +75,7 @@ impl OnchainPayment { pub fn send_to_address( &self, address: &bitcoin::Address, amount_sats: u64, fee_rate: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } @@ -106,8 +105,7 @@ impl OnchainPayment { pub fn send_all_to_address( &self, address: &bitcoin::Address, retain_reserves: bool, fee_rate: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 0f25ad1c5..5f1825dea 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -7,22 +7,19 @@ //! Holds a payment handler allowing to send spontaneous ("keysend") payments. -use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; -use crate::error::Error; -use crate::logger::{log_error, log_info, LdkLogger, Logger}; -use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; -use crate::payment::SendingParameters; -use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore, TlvEntry}; +use std::sync::{Arc, RwLock}; +use bitcoin::secp256k1::PublicKey; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; -use lightning::routing::router::{PaymentParameters, RouteParameters}; +use lightning::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; use lightning::sign::EntropySource; - use lightning_types::payment::{PaymentHash, PaymentPreimage}; -use bitcoin::secp256k1::PublicKey; - -use std::sync::{Arc, RwLock}; +use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; +use crate::error::Error; +use crate::logger::{log_error, log_info, LdkLogger, Logger}; +use crate::payment::store::{PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; +use crate::types::{ChannelManager, CustomTlvRecord, KeysManager, PaymentStore}; // The default `final_cltv_expiry_delta` we apply when not set. const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; @@ -33,163 +30,70 @@ const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; /// /// [`Node::spontaneous_payment`]: crate::Node::spontaneous_payment pub struct SpontaneousPayment { - runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, payment_store: Arc, config: Arc, + is_running: Arc>, logger: Arc, } impl SpontaneousPayment { pub(crate) fn new( - runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, - payment_store: Arc, config: Arc, logger: Arc, + payment_store: Arc, config: Arc, is_running: Arc>, + logger: Arc, ) -> Self { - Self { runtime, channel_manager, keys_manager, payment_store, config, logger } - } - - // Alby: send a keysend payment with TLVs and preimage - /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. - pub fn send_with_tlvs_and_preimage( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Vec, preimage: Option, - ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { - return Err(Error::NotRunning); - } - - let payment_preimage = preimage - .unwrap_or_else(|| PaymentPreimage(self.keys_manager.get_secure_random_bytes())); - let payment_hash = PaymentHash::from(payment_preimage); - let payment_id = PaymentId(payment_hash.0); - - if let Some(payment) = self.payment_store.get(&payment_id) { - if payment.status == PaymentStatus::Pending - || payment.status == PaymentStatus::Succeeded - { - log_error!(self.logger, "Payment error: must not send duplicate payments."); - return Err(Error::DuplicatePayment); - } - } - - let mut route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::from_node_id(node_id, LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA), - amount_msat, - ); - - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; - - let recipient_fields = RecipientOnionFields::spontaneous_empty() - .with_custom_tlvs( - custom_tlvs.iter().map(|tlv| (tlv.r#type, tlv.value.clone())).collect(), - ) - .map_err(|_| { - log_error!(self.logger, "Payment error: invalid custom TLVs."); - Error::InvalidCustomTlv - })?; - - match self.channel_manager.send_spontaneous_payment( - Some(payment_preimage), - recipient_fields, - PaymentId(payment_hash.0), - route_params, - Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT), - ) { - Ok(_hash) => { - log_info!(self.logger, "Initiated sending {}msat to {}.", amount_msat, node_id); - - let kind = PaymentKind::Spontaneous { - hash: payment_hash, - preimage: Some(payment_preimage), - custom_tlvs, - }; - let payment = PaymentDetails::new( - payment_id, - kind, - Some(amount_msat), - None, - PaymentDirection::Outbound, - PaymentStatus::Pending, - ); - - self.payment_store.insert(payment)?; - - Ok(payment_id) - }, - Err(e) => { - log_error!(self.logger, "Failed to send payment: {:?}", e); - - match e { - RetryableSendFailure::DuplicatePayment => Err(Error::DuplicatePayment), - _ => { - let kind = PaymentKind::Spontaneous { - hash: payment_hash, - preimage: Some(payment_preimage), - custom_tlvs, - }; - let payment = PaymentDetails::new( - payment_id, - kind, - Some(amount_msat), - None, - PaymentDirection::Outbound, - PaymentStatus::Failed, - ); - - self.payment_store.insert(payment)?; - Err(Error::PaymentSendingFailed) - }, - } - }, - } + Self { channel_manager, keys_manager, payment_store, config, is_running, logger } } /// Send a spontaneous aka. "keysend", payment. /// - /// If `sending_parameters` are provided they will override the default as well as the - /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. pub fn send( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, None) + self.send_inner(amount_msat, node_id, route_parameters, None, None) } /// Send a spontaneous payment including a list of custom TLVs. pub fn send_with_custom_tlvs( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Vec, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, custom_tlvs: Vec, ) -> Result { - self.send_inner(amount_msat, node_id, sending_parameters, Some(custom_tlvs)) + self.send_inner(amount_msat, node_id, route_parameters, Some(custom_tlvs), None) + } + + /// Send a spontaneous payment with custom preimage + pub fn send_with_preimage( + &self, amount_msat: u64, node_id: PublicKey, preimage: PaymentPreimage, + route_parameters: Option, + ) -> Result { + self.send_inner(amount_msat, node_id, route_parameters, None, Some(preimage)) + } + + /// Send a spontaneous payment with custom preimage including a list of custom TLVs. + pub fn send_with_preimage_and_custom_tlvs( + &self, amount_msat: u64, node_id: PublicKey, custom_tlvs: Vec, + preimage: PaymentPreimage, route_parameters: Option, + ) -> Result { + self.send_inner(amount_msat, node_id, route_parameters, Some(custom_tlvs), Some(preimage)) } fn send_inner( - &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, - custom_tlvs: Option>, + &self, amount_msat: u64, node_id: PublicKey, + route_parameters: Option, custom_tlvs: Option>, + preimage: Option, ) -> Result { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } - let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); + let payment_preimage = preimage + .unwrap_or_else(|| PaymentPreimage(self.keys_manager.get_secure_random_bytes())); + let payment_hash = PaymentHash::from(payment_preimage); let payment_id = PaymentId(payment_hash.0); @@ -207,20 +111,19 @@ impl SpontaneousPayment { amount_msat, ); - let override_params = - sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); - if let Some(override_params) = override_params { - override_params - .max_total_routing_fee_msat - .map(|f| route_params.max_total_routing_fee_msat = f.into()); - override_params - .max_total_cltv_expiry_delta - .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); - override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); - override_params - .max_channel_saturation_power_of_half - .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); - }; + if let Some(RouteParametersConfig { + max_total_routing_fee_msat, + max_total_cltv_expiry_delta, + max_path_count, + max_channel_saturation_power_of_half, + }) = route_parameters.as_ref().or(self.config.route_parameters.as_ref()) + { + route_params.max_total_routing_fee_msat = *max_total_routing_fee_msat; + route_params.payment_params.max_total_cltv_expiry_delta = *max_total_cltv_expiry_delta; + route_params.payment_params.max_path_count = *max_path_count; + route_params.payment_params.max_channel_saturation_power_of_half = + *max_channel_saturation_power_of_half; + } let recipient_fields = match custom_tlvs { Some(tlvs) => RecipientOnionFields::spontaneous_empty() @@ -294,8 +197,7 @@ impl SpontaneousPayment { /// /// [`Bolt11Payment::send_probes`]: crate::payment::Bolt11Payment pub fn send_probes(&self, amount_msat: u64, node_id: PublicKey) -> Result<(), Error> { - let rt_lock = self.runtime.read().unwrap(); - if rt_lock.is_none() { + if !*self.is_running.read().unwrap() { return Err(Error::NotRunning); } diff --git a/src/payment/store.rs b/src/payment/store.rs index 5c5e829ef..8ab3d12a2 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -7,21 +7,19 @@ use crate::types::TlvEntry; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use bitcoin::{BlockHash, Txid}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::DecodeError; use lightning::offers::offer::OfferId; use lightning::util::ser::{Readable, Writeable}; -use lightning::util::string::UntrustedString; use lightning::{ _init_and_read_len_prefixed_tlv_fields, impl_writeable_tlv_based, impl_writeable_tlv_based_enum, write_tlv_fields, }; - use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; - -use bitcoin::{BlockHash, Txid}; - -use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use lightning_types::string::UntrustedString; use crate::data_store::{StorableObject, StorableObjectId, StorableObjectUpdate}; use crate::hex_utils; @@ -50,12 +48,6 @@ pub struct PaymentDetails { pub status: PaymentStatus, /// The timestamp, in seconds since start of the UNIX epoch, when this entry was last updated. pub latest_update_timestamp: u64, - - // Old Alby fields - duplicates of new LDK fields - /*/// Alby: Last update timestamp, as seconds since Unix epoch. TODO: remove and use latest_update_timestamp - pub last_update: u64, - /// Alby: Fee paid. TODO: remove and use fee_paid_msat - pub fee_msat: Option,*/ /// Alby: Payment creation timestamp, as seconds since Unix epoch. pub created_at: u64, } @@ -212,7 +204,7 @@ impl StorableObject for PaymentDetails { let mut updated = false; macro_rules! update_if_necessary { - ($val: expr, $update: expr) => { + ($val:expr, $update:expr) => { if $val != $update { $val = $update; updated = true; @@ -645,10 +637,11 @@ impl StorableObjectUpdate for PaymentDetailsUpdate { #[cfg(test)] mod tests { - use super::*; use bitcoin::io::Cursor; use lightning::util::ser::Readable; + use super::*; + /// We refactored `PaymentDetails` to hold a payment id and moved some required fields into /// `PaymentKind`. Here, we keep the old layout available in order test de/ser compatibility. #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs index 5971daead..6ebf25563 100644 --- a/src/payment/unified_qr.rs +++ b/src/payment/unified_qr.rs @@ -11,22 +11,23 @@ //! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md -use crate::error::Error; -use crate::logger::{log_error, LdkLogger, Logger}; -use crate::payment::{bolt11::maybe_wrap_invoice, Bolt11Payment, Bolt12Payment, OnchainPayment}; -use crate::Config; - -use lightning::ln::channelmanager::PaymentId; -use lightning::offers::offer::Offer; -use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; +use std::sync::Arc; +use std::vec::IntoIter; use bip21::de::ParamKind; use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; use bitcoin::address::{NetworkChecked, NetworkUnchecked}; use bitcoin::{Amount, Txid}; +use lightning::ln::channelmanager::PaymentId; +use lightning::offers::offer::Offer; +use lightning::routing::router::RouteParametersConfig; +use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; -use std::sync::Arc; -use std::vec::IntoIter; +use crate::error::Error; +use crate::ffi::maybe_wrap; +use crate::logger::{log_error, LdkLogger, Logger}; +use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::Config; type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; @@ -94,14 +95,14 @@ impl UnifiedQrPayment { let amount_msats = amount_sats * 1_000; - let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description, None, None) - { - Ok(offer) => Some(offer), - Err(e) => { - log_error!(self.logger, "Failed to create offer: {}", e); - None - }, - }; + let bolt12_offer = + match self.bolt12_payment.receive_inner(amount_msats, description, None, None) { + Ok(offer) => Some(offer), + Err(e) => { + log_error!(self.logger, "Failed to create offer: {}", e); + None + }, + }; let invoice_description = Bolt11InvoiceDescription::Direct( Description::new(description.to_string()).map_err(|_| Error::InvoiceCreationFailed)?, @@ -137,8 +138,13 @@ impl UnifiedQrPayment { /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error /// occurs, an `Error` is returned detailing the issue encountered. /// + /// If `route_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::route_parameters`] on a per-field basis. + /// /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki - pub fn send(&self, uri_str: &str) -> Result { + pub fn send( + &self, uri_str: &str, route_parameters: Option, + ) -> Result { let uri: bip21::Uri = uri_str.parse().map_err(|_| Error::InvalidUri)?; @@ -146,15 +152,16 @@ impl UnifiedQrPayment { uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; if let Some(offer) = uri_network_checked.extras.bolt12_offer { - match self.bolt12_payment.send(&offer, None, None) { + let offer = maybe_wrap(offer); + match self.bolt12_payment.send(&offer, None, None, route_parameters) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), } } if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { - let invoice = maybe_wrap_invoice(invoice); - match self.bolt11_invoice.send(&invoice, None) { + let invoice = maybe_wrap(invoice); + match self.bolt11_invoice.send(&invoice, route_parameters) { Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), } @@ -301,10 +308,12 @@ impl DeserializationError for Extras { #[cfg(test)] mod tests { + use std::str::FromStr; + + use bitcoin::{Address, Network}; + use super::*; use crate::payment::unified_qr::Extras; - use bitcoin::{Address, Network}; - use std::str::FromStr; #[test] fn parse_uri() { diff --git a/src/peer_store.rs b/src/peer_store.rs index 4d1c65157..59cd3d94f 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -5,6 +5,15 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::collections::HashMap; +use std::ops::Deref; +use std::sync::{Arc, RwLock}; + +use bitcoin::secp256k1::PublicKey; +use lightning::impl_writeable_tlv_based; +use lightning::util::persist::KVStoreSync; +use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; + use crate::io::{ PEER_INFO_PERSISTENCE_KEY, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, @@ -13,15 +22,6 @@ use crate::logger::{log_error, LdkLogger}; use crate::types::DynStore; use crate::{Error, SocketAddress}; -use lightning::impl_writeable_tlv_based; -use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; - -use bitcoin::secp256k1::PublicKey; - -use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, RwLock}; - pub struct PeerStore where L::Target: LdkLogger, @@ -68,24 +68,24 @@ where fn persist_peers(&self, locked_peers: &HashMap) -> Result<(), Error> { let data = PeerStoreSerWrapper(&*locked_peers).encode(); - self.kv_store - .write( + KVStoreSync::write( + &*self.kv_store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + data, + ) + .map_err(|e| { + log_error!( + self.logger, + "Write for key {}/{}/{} failed due to: {}", PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, PEER_INFO_PERSISTENCE_KEY, - &data, - ) - .map_err(|e| { - log_error!( - self.logger, - "Write for key {}/{}/{} failed due to: {}", - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - e - ); - Error::PersistenceFailed - })?; + e + ); + Error::PersistenceFailed + })?; Ok(()) } } @@ -149,15 +149,17 @@ impl_writeable_tlv_based!(PeerInfo, { #[cfg(test)] mod tests { - use super::*; - use lightning::util::test_utils::{TestLogger, TestStore}; - use std::str::FromStr; use std::sync::Arc; + use lightning::util::test_utils::TestLogger; + + use super::*; + use crate::io::test_utils::InMemoryStore; + #[test] fn peer_info_persistence() { - let store: Arc = Arc::new(TestStore::new(false)); + let store: Arc = Arc::new(InMemoryStore::new()); let logger = Arc::new(TestLogger::new()); let peer_store = PeerStore::new(Arc::clone(&store), Arc::clone(&logger)); @@ -167,23 +169,23 @@ mod tests { .unwrap(); let address = SocketAddress::from_str("127.0.0.1:9738").unwrap(); let expected_peer_info = PeerInfo { node_id, address }; - assert!(store - .read( - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - ) - .is_err()); + assert!(KVStoreSync::read( + &*store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .is_err()); peer_store.add_peer(expected_peer_info.clone()).unwrap(); // Check we can read back what we persisted. - let persisted_bytes = store - .read( - PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - PEER_INFO_PERSISTENCE_KEY, - ) - .unwrap(); + let persisted_bytes = KVStoreSync::read( + &*store, + PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + PEER_INFO_PERSISTENCE_KEY, + ) + .unwrap(); let deser_peer_store = PeerStore::read(&mut &persisted_bytes[..], (Arc::clone(&store), logger)).unwrap(); diff --git a/src/runtime.rs b/src/runtime.rs new file mode 100644 index 000000000..1e9883ae4 --- /dev/null +++ b/src/runtime.rs @@ -0,0 +1,221 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::future::Future; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use tokio::task::{JoinHandle, JoinSet}; + +use crate::config::{ + BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS, LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS, +}; +use crate::logger::{log_debug, log_error, log_trace, LdkLogger, Logger}; + +pub(crate) struct Runtime { + mode: RuntimeMode, + background_tasks: Mutex>, + cancellable_background_tasks: Mutex>, + background_processor_task: Mutex>>, + logger: Arc, +} + +impl Runtime { + pub fn new(logger: Arc) -> Result { + let mode = match tokio::runtime::Handle::try_current() { + Ok(handle) => RuntimeMode::Handle(handle), + Err(_) => { + let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; + RuntimeMode::Owned(rt) + }, + }; + let background_tasks = Mutex::new(JoinSet::new()); + let cancellable_background_tasks = Mutex::new(JoinSet::new()); + let background_processor_task = Mutex::new(None); + + Ok(Self { + mode, + background_tasks, + cancellable_background_tasks, + background_processor_task, + logger, + }) + } + + pub fn with_handle(handle: tokio::runtime::Handle, logger: Arc) -> Self { + let mode = RuntimeMode::Handle(handle); + let background_tasks = Mutex::new(JoinSet::new()); + let cancellable_background_tasks = Mutex::new(JoinSet::new()); + let background_processor_task = Mutex::new(None); + + Self { + mode, + background_tasks, + cancellable_background_tasks, + background_processor_task, + logger, + } + } + + pub fn spawn_background_task(&self, future: F) + where + F: Future + Send + 'static, + { + let mut background_tasks = self.background_tasks.lock().unwrap(); + let runtime_handle = self.handle(); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + background_tasks.spawn_on(async { future.await }, runtime_handle); + } + + pub fn spawn_cancellable_background_task(&self, future: F) + where + F: Future + Send + 'static, + { + let mut cancellable_background_tasks = self.cancellable_background_tasks.lock().unwrap(); + let runtime_handle = self.handle(); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + cancellable_background_tasks.spawn_on(async { future.await }, runtime_handle); + } + + pub fn spawn_background_processor_task(&self, future: F) + where + F: Future + Send + 'static, + { + let mut background_processor_task = self.background_processor_task.lock().unwrap(); + debug_assert!(background_processor_task.is_none(), "Expected no background processor_task"); + + let runtime_handle = self.handle(); + let handle = runtime_handle.spawn(future); + *background_processor_task = Some(handle); + } + + pub fn spawn_blocking(&self, func: F) -> JoinHandle + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let handle = self.handle(); + handle.spawn_blocking(func) + } + + pub fn block_on(&self, future: F) -> F::Output { + // While we generally decided not to overthink via which call graph users would enter our + // runtime context, we'd still try to reuse whatever current context would be present + // during `block_on`, as this is the context `block_in_place` would operate on. So we try + // to detect the outer context here, and otherwise use whatever was set during + // initialization. + let handle = tokio::runtime::Handle::try_current().unwrap_or(self.handle().clone()); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + tokio::task::block_in_place(move || handle.block_on(async { future.await })) + } + + pub fn abort_cancellable_background_tasks(&self) { + let mut tasks = core::mem::take(&mut *self.cancellable_background_tasks.lock().unwrap()); + debug_assert!(tasks.len() > 0, "Expected some cancellable background_tasks"); + tasks.abort_all(); + self.block_on(async { while let Some(_) = tasks.join_next().await {} }) + } + + pub fn wait_on_background_tasks(&self) { + let mut tasks = core::mem::take(&mut *self.background_tasks.lock().unwrap()); + debug_assert!(tasks.len() > 0, "Expected some background_tasks"); + self.block_on(async { + loop { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(BACKGROUND_TASK_SHUTDOWN_TIMEOUT_SECS), + tasks.join_next_with_id(), + ); + match timeout_fut.await { + Ok(Some(Ok((id, _)))) => { + log_trace!(self.logger, "Stopped background task with id {}", id); + }, + Ok(Some(Err(e))) => { + tasks.abort_all(); + log_trace!(self.logger, "Stopping background task failed: {}", e); + break; + }, + Ok(None) => { + log_debug!(self.logger, "Stopped all background tasks"); + break; + }, + Err(e) => { + tasks.abort_all(); + log_error!(self.logger, "Stopping background task timed out: {}", e); + break; + }, + } + } + }) + } + + pub fn wait_on_background_processor_task(&self) { + if let Some(background_processor_task) = + self.background_processor_task.lock().unwrap().take() + { + let abort_handle = background_processor_task.abort_handle(); + // Since it seems to make a difference to `tokio` (see + // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures + // are always put in an `async` / `.await` closure. + let timeout_res = self.block_on(async { + tokio::time::timeout( + Duration::from_secs(LDK_EVENT_HANDLER_SHUTDOWN_TIMEOUT_SECS), + background_processor_task, + ) + .await + }); + + match timeout_res { + Ok(stop_res) => match stop_res { + Ok(()) => log_debug!(self.logger, "Stopped background processing of events."), + Err(e) => { + abort_handle.abort(); + log_error!( + self.logger, + "Stopping event handling failed. This should never happen: {}", + e + ); + panic!("Stopping event handling failed. This should never happen."); + }, + }, + Err(e) => { + abort_handle.abort(); + log_error!(self.logger, "Stopping event handling timed out: {}", e); + }, + } + } else { + debug_assert!(false, "Expected a background processing task"); + }; + } + + #[cfg(tokio_unstable)] + pub fn log_metrics(&self) { + let runtime_handle = self.handle(); + log_trace!( + self.logger, + "Active runtime tasks left prior to shutdown: {}", + runtime_handle.metrics().active_tasks_count() + ); + } + + fn handle(&self) -> &tokio::runtime::Handle { + match &self.mode { + RuntimeMode::Owned(rt) => rt.handle(), + RuntimeMode::Handle(handle) => handle, + } + } +} + +enum RuntimeMode { + Owned(tokio::runtime::Runtime), + Handle(tokio::runtime::Handle), +} diff --git a/src/scoring.rs b/src/scoring.rs new file mode 100644 index 000000000..e85abade3 --- /dev/null +++ b/src/scoring.rs @@ -0,0 +1,112 @@ +use std::io::Cursor; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, SystemTime}; + +use lightning::routing::scoring::ChannelLiquidities; +use lightning::util::ser::Readable; +use lightning::{log_error, log_info, log_trace}; + +use crate::config::{ + EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL, EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS, +}; +use crate::io::utils::write_external_pathfinding_scores_to_cache; +use crate::logger::LdkLogger; +use crate::runtime::Runtime; +use crate::{write_node_metrics, DynStore, Logger, NodeMetrics, Scorer}; + +/// Start a background task that periodically downloads scores via an external url and merges them into the local +/// pathfinding scores. +pub fn setup_background_pathfinding_scores_sync( + url: String, scorer: Arc>, node_metrics: Arc>, + kv_store: Arc, logger: Arc, runtime: Arc, + mut stop_receiver: tokio::sync::watch::Receiver<()>, +) { + log_info!(logger, "External scores background syncing enabled from {}", url); + + let logger = Arc::clone(&logger); + + runtime.spawn_background_processor_task(async move { + let mut interval = tokio::time::interval(EXTERNAL_PATHFINDING_SCORES_SYNC_INTERVAL); + loop { + tokio::select! { + _ = stop_receiver.changed() => { + log_trace!( + logger, + "Stopping background syncing external scores.", + ); + return; + } + _ = interval.tick() => { + log_trace!( + logger, + "Background sync of external scores started.", + ); + + sync_external_scores(logger.as_ref(), scorer.as_ref(), node_metrics.as_ref(), Arc::clone(&kv_store), &url).await; + } + } + } + }); +} + +async fn sync_external_scores( + logger: &Logger, scorer: &Mutex, node_metrics: &RwLock, + kv_store: Arc, url: &String, +) -> () { + let response = tokio::time::timeout( + Duration::from_secs(EXTERNAL_PATHFINDING_SCORES_SYNC_TIMEOUT_SECS), + reqwest::get(url), + ) + .await; + + let response = match response { + Ok(resp) => resp, + Err(e) => { + log_error!(logger, "Retrieving external scores timed out: {}", e); + return; + }, + }; + let response = match response { + Ok(resp) => resp, + Err(e) => { + log_error!(logger, "Failed to retrieve external scores update: {}", e); + return; + }, + }; + let body = match response.bytes().await { + Ok(bytes) => bytes, + Err(e) => { + log_error!(logger, "Failed to read external scores update: {}", e); + return; + }, + }; + let mut reader = Cursor::new(body); + match ChannelLiquidities::read(&mut reader) { + Ok(liquidities) => { + if let Err(e) = write_external_pathfinding_scores_to_cache( + Arc::clone(&kv_store), + &liquidities, + logger, + ) + .await + { + log_error!(logger, "Failed to persist external scores to cache: {}", e); + } + + let duration_since_epoch = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); + scorer.lock().unwrap().merge(liquidities, duration_since_epoch); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_pathfinding_scores_sync_timestamp = + Some(duration_since_epoch.as_secs()); + write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), logger) + .unwrap_or_else(|e| { + log_error!(logger, "Persisting node metrics failed: {}", e); + }); + log_trace!(logger, "External scores merged successfully"); + }, + Err(e) => { + log_error!(logger, "Failed to parse external scores update: {}", e); + }, + } +} diff --git a/src/sweep.rs b/src/sweep.rs deleted file mode 100644 index ba10869b8..000000000 --- a/src/sweep.rs +++ /dev/null @@ -1,47 +0,0 @@ -// This file is Copyright its original authors, visible in version control history. -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in -// accordance with one or both of these licenses. - -//! The output sweeper used to live here before we upstreamed it to `rust-lightning` and migrated -//! to the upstreamed version with LDK Node v0.3.0 (May 2024). We should drop this module entirely -//! once sufficient time has passed for us to be confident any users completed the migration. - -use lightning::impl_writeable_tlv_based; -use lightning::ln::types::ChannelId; -use lightning::sign::SpendableOutputDescriptor; - -use bitcoin::{Amount, BlockHash, Transaction}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) struct DeprecatedSpendableOutputInfo { - pub(crate) id: [u8; 32], - pub(crate) descriptor: SpendableOutputDescriptor, - pub(crate) channel_id: Option, - pub(crate) first_broadcast_hash: Option, - pub(crate) latest_broadcast_height: Option, - pub(crate) latest_spending_tx: Option, - pub(crate) confirmation_height: Option, - pub(crate) confirmation_hash: Option, -} - -impl_writeable_tlv_based!(DeprecatedSpendableOutputInfo, { - (0, id, required), - (2, descriptor, required), - (4, channel_id, option), - (6, first_broadcast_hash, option), - (8, latest_broadcast_height, option), - (10, latest_spending_tx, option), - (12, confirmation_height, option), - (14, confirmation_hash, option), -}); - -pub(crate) fn value_from_descriptor(descriptor: &SpendableOutputDescriptor) -> Amount { - match &descriptor { - SpendableOutputDescriptor::StaticOutput { output, .. } => output.value, - SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.output.value, - SpendableOutputDescriptor::StaticPaymentOutput(output) => output.output.value, - } -} diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 4d9397a61..12a1fe650 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -5,16 +5,13 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::logger::{log_error, LdkLogger}; - -use lightning::chain::chaininterface::BroadcasterInterface; +use std::ops::Deref; use bitcoin::Transaction; +use lightning::chain::chaininterface::BroadcasterInterface; +use tokio::sync::{mpsc, Mutex, MutexGuard}; -use tokio::sync::mpsc; -use tokio::sync::{Mutex, MutexGuard}; - -use std::ops::Deref; +use crate::logger::{log_error, LdkLogger}; const BCAST_PACKAGE_QUEUE_SIZE: usize = 50; diff --git a/src/types.rs b/src/types.rs index 32ea3c984..c0ae1f466 100644 --- a/src/types.rs +++ b/src/types.rs @@ -5,40 +5,87 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use crate::chain::ChainSource; -use crate::config::ChannelConfig; -use crate::data_store::DataStore; -use crate::fee_estimator::OnchainFeeEstimator; -use crate::gossip::RuntimeSpawner; -use crate::logger::Logger; -use crate::message_handler::NodeCustomMessageHandler; -use crate::payment::PaymentDetails; +use std::fmt; +use std::sync::{Arc, Mutex}; +use bitcoin::secp256k1::PublicKey; +use bitcoin::OutPoint; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; -use lightning::ln::msgs::RoutingMessageHandler; -use lightning::ln::msgs::SocketAddress; +use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; -use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; +use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::persist::KVStore; +use lightning::util::persist::{KVStore, KVStoreSync, MonitorUpdatingPersister}; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; - use lightning_block_sync::gossip::{GossipVerifier, UtxoSource}; - +use lightning_liquidity::utils::time::DefaultTimeProvider; use lightning_net_tokio::SocketDescriptor; -use bitcoin::secp256k1::PublicKey; -use bitcoin::OutPoint; +use crate::chain::ChainSource; +use crate::config::ChannelConfig; +use crate::data_store::DataStore; +use crate::fee_estimator::OnchainFeeEstimator; +use crate::gossip::RuntimeSpawner; +use crate::logger::Logger; +use crate::message_handler::NodeCustomMessageHandler; +use crate::payment::PaymentDetails; -use std::sync::{Arc, Mutex}; +/// Supported BIP39 mnemonic word counts for entropy generation. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum WordCount { + /// 12-word mnemonic (128-bit entropy) + Words12, + /// 15-word mnemonic (160-bit entropy) + Words15, + /// 18-word mnemonic (192-bit entropy) + Words18, + /// 21-word mnemonic (224-bit entropy) + Words21, + /// 24-word mnemonic (256-bit entropy) + Words24, +} + +impl WordCount { + /// Returns the word count as a usize value. + pub fn word_count(&self) -> usize { + match self { + WordCount::Words12 => 12, + WordCount::Words15 => 15, + WordCount::Words18 => 18, + WordCount::Words21 => 21, + WordCount::Words24 => 24, + } + } +} -pub(crate) type DynStore = dyn KVStore + Sync + Send; +/// A supertrait that requires that a type implements both [`KVStore`] and [`KVStoreSync`] at the +/// same time. +pub trait SyncAndAsyncKVStore: KVStore + KVStoreSync {} + +impl SyncAndAsyncKVStore for T +where + T: KVStore, + T: KVStoreSync, +{ +} + +/// A type alias for [`SyncAndAsyncKVStore`] with `Sync`/`Send` markers; +pub type DynStore = dyn SyncAndAsyncKVStore + Sync + Send; + +pub type Persister = MonitorUpdatingPersister< + Arc, + Arc, + Arc, + Arc, + Arc, + Arc, +>; pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, @@ -46,7 +93,8 @@ pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< Arc, Arc, Arc, - Arc, + Arc, + Arc, >; pub(crate) type PeerManager = lightning::ln::peer_handler::PeerManager< @@ -57,10 +105,18 @@ pub(crate) type PeerManager = lightning::ln::peer_handler::PeerManager< Arc, Arc>>, Arc, + Arc, >; -pub(crate) type LiquidityManager = - lightning_liquidity::LiquidityManager, Arc, Arc>; +pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager< + Arc, + Arc, + Arc, + Arc, + Arc, + DefaultTimeProvider, + Arc, +>; pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< Arc, @@ -76,11 +132,8 @@ pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< pub(crate) type Broadcaster = crate::tx_broadcaster::TransactionBroadcaster>; -pub(crate) type Wallet = - crate::wallet::Wallet, Arc, Arc>; - -pub(crate) type KeysManager = - crate::wallet::WalletKeysManager, Arc, Arc>; +pub(crate) type Wallet = crate::wallet::Wallet; +pub(crate) type KeysManager = crate::wallet::WalletKeysManager; pub(crate) type Router = DefaultRouter< Arc, @@ -90,7 +143,7 @@ pub(crate) type Router = DefaultRouter< ProbabilisticScoringFeeParameters, Scorer, >; -pub(crate) type Scorer = ProbabilisticScorer, Arc>; +pub(crate) type Scorer = CombinedScorer, Arc>; pub(crate) type Graph = gossip::NetworkGraph>; @@ -116,7 +169,7 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, Arc, - IgnoringMessageHandler, + Arc, IgnoringMessageHandler, IgnoringMessageHandler, >; @@ -167,18 +220,11 @@ impl Readable for UserChannelId { } } -/// The type of a channel, as negotiated during channel opening. -/// -/// See [`BOLT 2`] for more information. -/// -/// [`BOLT 2`]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md#defined-channel-types -// #[derive(Debug, Clone, PartialEq, Eq)] -// pub enum ChannelType { -// /// A channel of type `option_static_remotekey`. -// StaticRemoteKey, -// /// A channel of type `option_anchors_zero_fee_htlc_tx`. -// Anchors, -// } +impl fmt::Display for UserChannelId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UserChannelId({})", self.0) + } +} /// Details of a channel as returned by [`Node::list_channels`]. /// @@ -337,14 +383,6 @@ pub struct ChannelDetails { impl From for ChannelDetails { fn from(value: LdkChannelDetails) -> Self { - // let channel_type = value.channel_type.map(|t| { - // if t.requires_anchors_zero_fee_htlc_tx() { - // ChannelType::Anchors - // } else { - // ChannelType::StaticRemoteKey - // } - // }); - ChannelDetails { channel_id: value.channel_id, counterparty_node_id: value.counterparty.node_id, diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs deleted file mode 100644 index 5a62623b3..000000000 --- a/src/uniffi_types.rs +++ /dev/null @@ -1,790 +0,0 @@ -// This file is Copyright its original authors, visible in version control history. -// -// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in -// accordance with one or both of these licenses. - -// Importing these items ensures they are accessible in the uniffi bindings -// without introducing unused import warnings in lib.rs. -// -// Make sure to add any re-exported items that need to be used in uniffi below. - -pub use crate::config::{ - default_config, AnchorChannelsConfig, BackgroundSyncConfig, ElectrumSyncConfig, - EsploraSyncConfig, MaxDustHTLCExposure, -}; -pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; -pub use crate::liquidity::{LSPS1OrderStatus, LSPS2ServiceConfig, OnchainPaymentInfo, PaymentInfo}; -pub use crate::logger::{LogLevel, LogRecord, LogWriter}; -pub use crate::payment::store::{ - ConfirmationStatus, LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus, -}; -pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; - -pub use lightning::chain::channelmonitor::BalanceSource; -pub use lightning::events::{ClosureReason, PaymentFailureReason}; -pub use lightning::ln::types::ChannelId; -pub use lightning::offers::invoice::Bolt12Invoice; -pub use lightning::offers::offer::{Offer, OfferId}; -pub use lightning::offers::refund::Refund; -pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; -pub use lightning::util::string::UntrustedString; - -pub use lightning_types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; - -pub use lightning_invoice::{Description, SignedRawBolt11Invoice}; - -pub use lightning_liquidity::lsps1::msgs::ChannelInfo as ChannelOrderInfo; -pub use lightning_liquidity::lsps1::msgs::{OrderId, OrderParameters, PaymentState}; - -pub use bitcoin::{Address, BlockHash, FeeRate, Network, OutPoint, Txid}; - -pub use bip39::Mnemonic; - -pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; - -pub type DateTime = chrono::DateTime; - -use crate::UniffiCustomTypeConverter; - -use crate::builder::sanitize_alias; -use crate::error::Error; -use crate::hex_utils; -use crate::{SocketAddress, UserChannelId}; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; -use bitcoin::secp256k1::PublicKey; -use lightning::ln::channelmanager::PaymentId; -use lightning::util::ser::Writeable; -use lightning_invoice::{Bolt11Invoice as LdkBolt11Invoice, Bolt11InvoiceDescriptionRef}; - -use std::convert::TryInto; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; - -impl UniffiCustomTypeConverter for PublicKey { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Ok(key) = PublicKey::from_str(&val) { - return Ok(key); - } - - Err(Error::InvalidPublicKey.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for NodeId { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Ok(key) = NodeId::from_str(&val) { - return Ok(key); - } - - Err(Error::InvalidNodeId.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for Address { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Ok(addr) = Address::from_str(&val) { - return Ok(addr.assume_checked()); - } - - Err(Error::InvalidAddress.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for Offer { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Offer::from_str(&val).map_err(|_| Error::InvalidOffer.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for Refund { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Refund::from_str(&val).map_err(|_| Error::InvalidRefund.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for Bolt12Invoice { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(bytes_vec) = hex_utils::to_vec(&val) { - if let Ok(invoice) = Bolt12Invoice::try_from(bytes_vec) { - return Ok(invoice); - } - } - Err(Error::InvalidInvoice.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.encode()) - } -} - -impl UniffiCustomTypeConverter for OfferId { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(bytes_vec) = hex_utils::to_vec(&val) { - let bytes_res = bytes_vec.try_into(); - if let Ok(bytes) = bytes_res { - return Ok(OfferId(bytes)); - } - } - Err(Error::InvalidOfferId.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.0) - } -} - -impl UniffiCustomTypeConverter for PaymentId { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(bytes_vec) = hex_utils::to_vec(&val) { - let bytes_res = bytes_vec.try_into(); - if let Ok(bytes) = bytes_res { - return Ok(PaymentId(bytes)); - } - } - Err(Error::InvalidPaymentId.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.0) - } -} - -impl UniffiCustomTypeConverter for PaymentHash { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Ok(hash) = Sha256::from_str(&val) { - Ok(PaymentHash(hash.to_byte_array())) - } else { - Err(Error::InvalidPaymentHash.into()) - } - } - - fn from_custom(obj: Self) -> Self::Builtin { - Sha256::from_slice(&obj.0).unwrap().to_string() - } -} - -impl UniffiCustomTypeConverter for PaymentPreimage { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(bytes_vec) = hex_utils::to_vec(&val) { - let bytes_res = bytes_vec.try_into(); - if let Ok(bytes) = bytes_res { - return Ok(PaymentPreimage(bytes)); - } - } - Err(Error::InvalidPaymentPreimage.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.0) - } -} - -impl UniffiCustomTypeConverter for PaymentSecret { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(bytes_vec) = hex_utils::to_vec(&val) { - let bytes_res = bytes_vec.try_into(); - if let Ok(bytes) = bytes_res { - return Ok(PaymentSecret(bytes)); - } - } - Err(Error::InvalidPaymentSecret.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.0) - } -} - -impl UniffiCustomTypeConverter for ChannelId { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - if let Some(hex_vec) = hex_utils::to_vec(&val) { - if hex_vec.len() == 32 { - let mut channel_id = [0u8; 32]; - channel_id.copy_from_slice(&hex_vec[..]); - return Ok(Self(channel_id)); - } - } - Err(Error::InvalidChannelId.into()) - } - - fn from_custom(obj: Self) -> Self::Builtin { - hex_utils::to_string(&obj.0) - } -} - -impl UniffiCustomTypeConverter for UserChannelId { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(UserChannelId(u128::from_str(&val).map_err(|_| Error::InvalidChannelId)?)) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.0.to_string() - } -} - -impl UniffiCustomTypeConverter for Txid { - type Builtin = String; - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(Txid::from_str(&val)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for BlockHash { - type Builtin = String; - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(BlockHash::from_str(&val)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for Mnemonic { - type Builtin = String; - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(Mnemonic::from_str(&val).map_err(|_| Error::InvalidSecretKey)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for SocketAddress { - type Builtin = String; - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(SocketAddress::from_str(&val).map_err(|_| Error::InvalidSocketAddress)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for UntrustedString { - type Builtin = String; - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(UntrustedString(val)) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for Network { - type Builtin = String; - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(Network::from_str(&val).map_err(|_| Error::InvalidNetwork)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -impl UniffiCustomTypeConverter for NodeAlias { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(sanitize_alias(&val).map_err(|_| Error::InvalidNodeAlias)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_string() - } -} - -/// Represents the description of an invoice which has to be either a directly included string or -/// a hash of a description provided out of band. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Bolt11InvoiceDescription { - /// Contains a full description. - Direct { - /// Description of what the invoice is for - description: String, - }, - /// Contains a hash. - Hash { - /// Hash of the description of what the invoice is for - hash: String, - }, -} - -impl TryFrom<&Bolt11InvoiceDescription> for lightning_invoice::Bolt11InvoiceDescription { - type Error = Error; - - fn try_from(value: &Bolt11InvoiceDescription) -> Result { - match value { - Bolt11InvoiceDescription::Direct { description } => { - Description::new(description.clone()) - .map(lightning_invoice::Bolt11InvoiceDescription::Direct) - .map_err(|_| Error::InvoiceCreationFailed) - }, - Bolt11InvoiceDescription::Hash { hash } => Sha256::from_str(&hash) - .map(lightning_invoice::Sha256) - .map(lightning_invoice::Bolt11InvoiceDescription::Hash) - .map_err(|_| Error::InvoiceCreationFailed), - } - } -} - -impl From for Bolt11InvoiceDescription { - fn from(value: lightning_invoice::Bolt11InvoiceDescription) -> Self { - match value { - lightning_invoice::Bolt11InvoiceDescription::Direct(description) => { - Bolt11InvoiceDescription::Direct { description: description.to_string() } - }, - lightning_invoice::Bolt11InvoiceDescription::Hash(hash) => { - Bolt11InvoiceDescription::Hash { hash: hex_utils::to_string(hash.0.as_ref()) } - }, - } - } -} - -impl<'a> From> for Bolt11InvoiceDescription { - fn from(value: Bolt11InvoiceDescriptionRef<'a>) -> Self { - match value { - lightning_invoice::Bolt11InvoiceDescriptionRef::Direct(description) => { - Bolt11InvoiceDescription::Direct { description: description.to_string() } - }, - lightning_invoice::Bolt11InvoiceDescriptionRef::Hash(hash) => { - Bolt11InvoiceDescription::Hash { hash: hex_utils::to_string(hash.0.as_ref()) } - }, - } - } -} - -/// Enum representing the crypto currencies (or networks) supported by this library -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum Currency { - /// Bitcoin mainnet - Bitcoin, - - /// Bitcoin testnet - BitcoinTestnet, - - /// Bitcoin regtest - Regtest, - - /// Bitcoin simnet - Simnet, - - /// Bitcoin signet - Signet, -} - -impl From for Currency { - fn from(currency: lightning_invoice::Currency) -> Self { - match currency { - lightning_invoice::Currency::Bitcoin => Currency::Bitcoin, - lightning_invoice::Currency::BitcoinTestnet => Currency::BitcoinTestnet, - lightning_invoice::Currency::Regtest => Currency::Regtest, - lightning_invoice::Currency::Simnet => Currency::Simnet, - lightning_invoice::Currency::Signet => Currency::Signet, - } - } -} - -/// A channel descriptor for a hop along a payment path. -/// -/// While this generally comes from BOLT 11's `r` field, this struct includes more fields than are -/// available in BOLT 11. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct RouteHintHop { - /// The node_id of the non-target end of the route - pub src_node_id: PublicKey, - /// The short_channel_id of this channel - pub short_channel_id: u64, - /// The fees which must be paid to use this channel - pub fees: RoutingFees, - /// The difference in CLTV values between this node and the next node. - pub cltv_expiry_delta: u16, - /// The minimum value, in msat, which must be relayed to the next hop. - pub htlc_minimum_msat: Option, - /// The maximum value in msat available for routing with a single HTLC. - pub htlc_maximum_msat: Option, -} - -impl From for RouteHintHop { - fn from(hop: lightning::routing::router::RouteHintHop) -> Self { - Self { - src_node_id: hop.src_node_id, - short_channel_id: hop.short_channel_id, - cltv_expiry_delta: hop.cltv_expiry_delta, - htlc_minimum_msat: hop.htlc_minimum_msat, - htlc_maximum_msat: hop.htlc_maximum_msat, - fees: hop.fees, - } - } -} - -/// Represents a syntactically and semantically correct lightning BOLT11 invoice. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Bolt11Invoice { - pub(crate) inner: LdkBolt11Invoice, -} - -impl Bolt11Invoice { - pub fn from_str(invoice_str: &str) -> Result { - invoice_str.parse() - } - - /// Returns the underlying invoice [`LdkBolt11Invoice`] - pub fn into_inner(self) -> LdkBolt11Invoice { - self.inner - } - - /// The hash of the [`RawBolt11Invoice`] that was signed. - /// - /// [`RawBolt11Invoice`]: lightning_invoice::RawBolt11Invoice - pub fn signable_hash(&self) -> Vec { - self.inner.signable_hash().to_vec() - } - - /// Returns the hash to which we will receive the preimage on completion of the payment - pub fn payment_hash(&self) -> PaymentHash { - PaymentHash(self.inner.payment_hash().to_byte_array()) - } - - /// Get the payment secret if one was included in the invoice - pub fn payment_secret(&self) -> PaymentSecret { - PaymentSecret(self.inner.payment_secret().0) - } - - /// Returns the amount if specified in the invoice as millisatoshis. - pub fn amount_milli_satoshis(&self) -> Option { - self.inner.amount_milli_satoshis() - } - - /// Returns the invoice's expiry time (in seconds), if present, otherwise [`DEFAULT_EXPIRY_TIME`]. - /// - /// [`DEFAULT_EXPIRY_TIME`]: lightning_invoice::DEFAULT_EXPIRY_TIME - pub fn expiry_time_seconds(&self) -> u64 { - self.inner.expiry_time().as_secs() - } - - /// Returns the `Bolt11Invoice`'s timestamp as seconds since the Unix epoch - pub fn seconds_since_epoch(&self) -> u64 { - self.inner.duration_since_epoch().as_secs() - } - - /// Returns the seconds remaining until the invoice expires. - pub fn seconds_until_expiry(&self) -> u64 { - self.inner.duration_until_expiry().as_secs() - } - - /// Returns whether the invoice has expired. - pub fn is_expired(&self) -> bool { - self.inner.is_expired() - } - - /// Returns whether the expiry time would pass at the given point in time. - /// `at_time_seconds` is the timestamp as seconds since the Unix epoch. - pub fn would_expire(&self, at_time_seconds: u64) -> bool { - self.inner.would_expire(Duration::from_secs(at_time_seconds)) - } - - /// Return the description or a hash of it for longer ones - pub fn invoice_description(&self) -> Bolt11InvoiceDescription { - self.inner.description().into() - } - - /// Returns the invoice's `min_final_cltv_expiry_delta` time, if present, otherwise - /// [`DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA`]. - /// - /// [`DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA`]: lightning_invoice::DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA - pub fn min_final_cltv_expiry_delta(&self) -> u64 { - self.inner.min_final_cltv_expiry_delta() - } - - /// Returns the network for which the invoice was issued - pub fn network(&self) -> Network { - self.inner.network() - } - - /// Returns the currency for which the invoice was issued - pub fn currency(&self) -> Currency { - self.inner.currency().into() - } - - /// Returns a list of all fallback addresses as [`Address`]es - pub fn fallback_addresses(&self) -> Vec
{ - self.inner.fallback_addresses() - } - - /// Returns a list of all routes included in the invoice as the underlying hints - pub fn route_hints(&self) -> Vec> { - self.inner - .route_hints() - .iter() - .map(|route| route.0.iter().map(|hop| RouteHintHop::from(hop.clone())).collect()) - .collect() - } - - /// Recover the payee's public key (only to be used if none was included in the invoice) - pub fn recover_payee_pub_key(&self) -> PublicKey { - self.inner.recover_payee_pub_key() - } -} - -impl std::str::FromStr for Bolt11Invoice { - type Err = Error; - - fn from_str(invoice_str: &str) -> Result { - match invoice_str.parse::() { - Ok(signed) => match LdkBolt11Invoice::from_signed(signed) { - Ok(invoice) => Ok(Bolt11Invoice { inner: invoice }), - Err(_) => Err(Error::InvalidInvoice), - }, - Err(_) => Err(Error::InvalidInvoice), - } - } -} - -impl From for Bolt11Invoice { - fn from(invoice: LdkBolt11Invoice) -> Self { - Bolt11Invoice { inner: invoice } - } -} - -impl From for LdkBolt11Invoice { - fn from(wrapper: Bolt11Invoice) -> Self { - wrapper.into_inner() - } -} - -impl std::fmt::Display for Bolt11Invoice { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.inner) - } -} - -/// A Lightning payment using BOLT 11. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Bolt11PaymentInfo { - /// Indicates the current state of the payment. - pub state: PaymentState, - /// The datetime when the payment option expires. - pub expires_at: chrono::DateTime, - /// The total fee the LSP will charge to open this channel in satoshi. - pub fee_total_sat: u64, - /// The amount the client needs to pay to have the requested channel openend. - pub order_total_sat: u64, - /// A BOLT11 invoice the client can pay to have to channel opened. - pub invoice: Arc, -} - -impl From for Bolt11PaymentInfo { - fn from(info: lightning_liquidity::lsps1::msgs::Bolt11PaymentInfo) -> Self { - Self { - state: info.state, - expires_at: info.expires_at, - fee_total_sat: info.fee_total_sat, - order_total_sat: info.order_total_sat, - invoice: Arc::new(info.invoice.into()), - } - } -} - -impl UniffiCustomTypeConverter for OrderId { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(Self(val)) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.0 - } -} - -impl UniffiCustomTypeConverter for DateTime { - type Builtin = String; - - fn into_custom(val: Self::Builtin) -> uniffi::Result { - Ok(DateTime::from_str(&val).map_err(|_| Error::InvalidDateTime)?) - } - - fn from_custom(obj: Self) -> Self::Builtin { - obj.to_rfc3339() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn create_test_invoice() -> (LdkBolt11Invoice, Bolt11Invoice) { - let invoice_string = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; - let ldk_invoice: LdkBolt11Invoice = invoice_string.parse().unwrap(); - let wrapped_invoice = Bolt11Invoice::from(ldk_invoice.clone()); - (ldk_invoice, wrapped_invoice) - } - - #[test] - fn test_invoice_description_conversion() { - let hash = "09d08d4865e8af9266f6cc7c0ae23a1d6bf868207cf8f7c5979b9f6ed850dfb0".to_string(); - let description = Bolt11InvoiceDescription::Hash { hash }; - let converted_description = - lightning_invoice::Bolt11InvoiceDescription::try_from(&description).unwrap(); - let reconverted_description: Bolt11InvoiceDescription = converted_description.into(); - assert_eq!(description, reconverted_description); - } - - #[test] - fn test_bolt11_invoice_basic_properties() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); - - assert_eq!( - ldk_invoice.payment_hash().to_string(), - wrapped_invoice.payment_hash().to_string() - ); - assert_eq!(ldk_invoice.amount_milli_satoshis(), wrapped_invoice.amount_milli_satoshis()); - - assert_eq!( - ldk_invoice.min_final_cltv_expiry_delta(), - wrapped_invoice.min_final_cltv_expiry_delta() - ); - assert_eq!( - ldk_invoice.payment_secret().0.to_vec(), - wrapped_invoice.payment_secret().0.to_vec() - ); - - assert_eq!(ldk_invoice.network(), wrapped_invoice.network()); - assert_eq!( - format!("{:?}", ldk_invoice.currency()), - format!("{:?}", wrapped_invoice.currency()) - ); - } - - #[test] - fn test_bolt11_invoice_time_related_fields() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); - - assert_eq!(ldk_invoice.expiry_time().as_secs(), wrapped_invoice.expiry_time_seconds()); - assert_eq!( - ldk_invoice.duration_until_expiry().as_secs(), - wrapped_invoice.seconds_until_expiry() - ); - assert_eq!( - ldk_invoice.duration_since_epoch().as_secs(), - wrapped_invoice.seconds_since_epoch() - ); - - let future_time = Duration::from_secs(wrapped_invoice.seconds_since_epoch() + 10000); - assert!(!ldk_invoice.would_expire(future_time)); - assert!(!wrapped_invoice.would_expire(future_time.as_secs())); - } - - #[test] - fn test_bolt11_invoice_description() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); - - let ldk_description = ldk_invoice.description(); - let wrapped_description = wrapped_invoice.invoice_description(); - - match (ldk_description, &wrapped_description) { - ( - lightning_invoice::Bolt11InvoiceDescriptionRef::Direct(ldk_description), - Bolt11InvoiceDescription::Direct { description }, - ) => { - assert_eq!(ldk_description.to_string(), *description) - }, - ( - lightning_invoice::Bolt11InvoiceDescriptionRef::Hash(ldk_hash), - Bolt11InvoiceDescription::Hash { hash }, - ) => { - assert_eq!(hex_utils::to_string(ldk_hash.0.as_ref()), *hash) - }, - _ => panic!("Description types don't match"), - } - } - - #[test] - fn test_bolt11_invoice_route_hints() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); - - let wrapped_route_hints = wrapped_invoice.route_hints(); - let ldk_route_hints = ldk_invoice.route_hints(); - assert_eq!(ldk_route_hints.len(), wrapped_route_hints.len()); - - let ldk_hop = &ldk_route_hints[0].0[0]; - let wrapped_hop = &wrapped_route_hints[0][0]; - assert_eq!(ldk_hop.src_node_id, wrapped_hop.src_node_id); - assert_eq!(ldk_hop.short_channel_id, wrapped_hop.short_channel_id); - assert_eq!(ldk_hop.cltv_expiry_delta, wrapped_hop.cltv_expiry_delta); - assert_eq!(ldk_hop.htlc_minimum_msat, wrapped_hop.htlc_minimum_msat); - assert_eq!(ldk_hop.htlc_maximum_msat, wrapped_hop.htlc_maximum_msat); - assert_eq!(ldk_hop.fees.base_msat, wrapped_hop.fees.base_msat); - assert_eq!(ldk_hop.fees.proportional_millionths, wrapped_hop.fees.proportional_millionths); - } - - #[test] - fn test_bolt11_invoice_roundtrip() { - let (ldk_invoice, wrapped_invoice) = create_test_invoice(); - - let invoice_str = wrapped_invoice.to_string(); - let parsed_invoice: LdkBolt11Invoice = invoice_str.parse().unwrap(); - assert_eq!( - ldk_invoice.payment_hash().to_byte_array().to_vec(), - parsed_invoice.payment_hash().to_byte_array().to_vec() - ); - } -} diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index fbac1d1b6..2f8daa500 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -5,54 +5,54 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use persist::KVStoreWalletPersister; - -use crate::config::Config; -use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger}; - -use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; -use crate::payment::store::ConfirmationStatus; -use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; -use crate::types::PaymentStore; -use crate::Error; - -use lightning::chain::chaininterface::BroadcasterInterface; -use lightning::chain::channelmonitor::ANTI_REORG_DELAY; -use lightning::chain::{BestBlock, Listen}; - -use lightning::events::bump_transaction::{Utxo, WalletSource}; -use lightning::ln::channelmanager::PaymentId; -use lightning::ln::inbound_payment::ExpandedKey; -use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; -use lightning::ln::script::ShutdownScript; -use lightning::sign::{ - ChangeDestinationSource, EntropySource, InMemorySigner, KeysManager, NodeSigner, OutputSpender, - Recipient, SignerProvider, SpendableOutputDescriptor, -}; - -use lightning::util::message_signing; -use lightning_invoice::RawBolt11Invoice; +use std::future::Future; +use std::ops::Deref; +use std::pin::Pin; +use std::str::FromStr; +use std::sync::{Arc, Mutex}; use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; -use bdk_wallet::{Balance, KeychainKind, PersistedWallet, SignOptions, Update}; - +use bdk_wallet::descriptor::ExtendedDescriptor; +#[allow(deprecated)] +use bdk_wallet::SignOptions; +use bdk_wallet::{Balance, KeychainKind, PersistedWallet, Update}; use bitcoin::address::NetworkUnchecked; use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::hashes::Hash; use bitcoin::key::XOnlyPublicKey; -use bitcoin::psbt::Psbt; +use bitcoin::psbt::{self, Psbt}; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; -use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, Signing}; +use bitcoin::secp256k1::{All, PublicKey, Scalar, Secp256k1, SecretKey}; use bitcoin::{ - Address, Amount, FeeRate, Network, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, + Address, Amount, FeeRate, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, Weight, WitnessProgram, WitnessVersion, }; +use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::channelmonitor::ANTI_REORG_DELAY; +use lightning::chain::{BestBlock, Listen}; +use lightning::events::bump_transaction::{Input, Utxo, WalletSource}; +use lightning::ln::channelmanager::PaymentId; +use lightning::ln::funding::FundingTxInput; +use lightning::ln::inbound_payment::ExpandedKey; +use lightning::ln::msgs::UnsignedGossipMessage; +use lightning::ln::script::ShutdownScript; +use lightning::sign::{ + ChangeDestinationSource, EntropySource, InMemorySigner, KeysManager, NodeSigner, OutputSpender, + PeerStorageKey, Recipient, SignerProvider, SpendableOutputDescriptor, +}; +use lightning::util::message_signing; +use lightning_invoice::RawBolt11Invoice; +use persist::KVStoreWalletPersister; -use std::ops::Deref; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; +use crate::config::Config; +use crate::fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; +use crate::logger::{log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; +use crate::payment::store::ConfirmationStatus; +use crate::payment::{PaymentDetails, PaymentDirection, PaymentStatus}; +use crate::types::{Broadcaster, PaymentStore}; +use crate::Error; pub(crate) enum OnchainSendAmount { ExactRetainingReserve { amount_sats: u64, cur_anchor_reserve_sats: u64 }, @@ -63,32 +63,23 @@ pub(crate) enum OnchainSendAmount { pub(crate) mod persist; pub(crate) mod ser; -pub(crate) struct Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +pub(crate) struct Wallet { // A BDK on-chain wallet. inner: Mutex>, persister: Mutex, - broadcaster: B, - fee_estimator: E, + broadcaster: Arc, + fee_estimator: Arc, payment_store: Arc, config: Arc, - logger: L, + logger: Arc, } -impl Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl Wallet { pub(crate) fn new( wallet: bdk_wallet::PersistedWallet, - wallet_persister: KVStoreWalletPersister, broadcaster: B, fee_estimator: E, - payment_store: Arc, config: Arc, logger: L, + wallet_persister: KVStoreWalletPersister, broadcaster: Arc, + fee_estimator: Arc, payment_store: Arc, + config: Arc, logger: Arc, ) -> Self { let inner = Mutex::new(wallet); let persister = Mutex::new(wallet_persister); @@ -236,6 +227,7 @@ where Ok(()) } + #[allow(deprecated)] pub(crate) fn create_funding_transaction( &self, output_script: ScriptBuf, amount: Amount, confirmation_target: ConfirmationTarget, locktime: LockTime, @@ -296,7 +288,7 @@ where Ok(address_info.address) } - fn get_new_internal_address(&self) -> Result { + pub(crate) fn get_new_internal_address(&self) -> Result { let mut locked_wallet = self.inner.lock().unwrap(); let mut locked_persister = self.persister.lock().unwrap(); @@ -308,6 +300,19 @@ where Ok(address_info.address) } + pub(crate) fn cancel_tx(&self, tx: &Transaction) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + locked_wallet.cancel_tx(tx); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + pub(crate) fn get_balances( &self, total_anchor_channels_reserve_sats: u64, ) -> Result<(u64, u64), Error> { @@ -318,7 +323,7 @@ where #[cfg(debug_assertions)] if balance.confirmed != Amount::ZERO { debug_assert!( - self.list_confirmed_utxos().map_or(false, |v| !v.is_empty()), + self.list_confirmed_utxos_inner().map_or(false, |v| !v.is_empty()), "Confirmed amounts should always be available for Anchor spending" ); } @@ -343,20 +348,19 @@ where self.get_balances(total_anchor_channels_reserve_sats).map(|(_, s)| s) } - fn parse_and_validate_address( - &self, network: Network, address: &Address, - ) -> Result { + pub(crate) fn parse_and_validate_address(&self, address: &Address) -> Result { Address::::from_str(address.to_string().as_str()) .map_err(|_| Error::InvalidAddress)? - .require_network(network) + .require_network(self.config.network) .map_err(|_| Error::InvalidAddress) } + #[allow(deprecated)] pub(crate) fn send_to_address( &self, address: &bitcoin::Address, send_amount: OnchainSendAmount, fee_rate: Option, ) -> Result { - self.parse_and_validate_address(self.config.network, &address)?; + self.parse_and_validate_address(&address)?; // Use the set fee_rate or default to fee estimation. let confirmation_target = ConfirmationTarget::OnchainPayment; @@ -568,80 +572,58 @@ where Ok(txid) } -} - -impl Listen for Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ - fn filtered_block_connected( - &self, _header: &bitcoin::block::Header, - _txdata: &lightning::chain::transaction::TransactionData, _height: u32, - ) { - debug_assert!(false, "Syncing filtered blocks is currently not supported"); - // As far as we can tell this would be a no-op anyways as we don't have to tell BDK about - // the header chain of intermediate blocks. According to the BDK team, it's sufficient to - // only connect full blocks starting from the last point of disagreement. - } - fn block_connected(&self, block: &bitcoin::Block, height: u32) { + pub(crate) fn select_confirmed_utxos( + &self, must_spend: Vec, must_pay_to: &[TxOut], fee_rate: FeeRate, + ) -> Result, ()> { let mut locked_wallet = self.inner.lock().unwrap(); + debug_assert!(matches!( + locked_wallet.public_descriptor(KeychainKind::External), + ExtendedDescriptor::Wpkh(_) + )); + debug_assert!(matches!( + locked_wallet.public_descriptor(KeychainKind::Internal), + ExtendedDescriptor::Wpkh(_) + )); - let pre_checkpoint = locked_wallet.latest_checkpoint(); - if pre_checkpoint.height() != height - 1 - || pre_checkpoint.hash() != block.header.prev_blockhash - { - log_debug!( - self.logger, - "Detected reorg while applying a connected block to on-chain wallet: new block with hash {} at height {}", - block.header.block_hash(), - height - ); + let mut tx_builder = locked_wallet.build_tx(); + tx_builder.only_witness_utxo(); + + for input in &must_spend { + let psbt_input = psbt::Input { + witness_utxo: Some(input.previous_utxo.clone()), + ..Default::default() + }; + let weight = Weight::from_wu(input.satisfaction_weight); + tx_builder.add_foreign_utxo(input.outpoint, psbt_input, weight).map_err(|_| ())?; } - match locked_wallet.apply_block(block, height) { - Ok(()) => { - if let Err(e) = self.update_payment_store(&mut *locked_wallet) { - log_error!(self.logger, "Failed to update payment store: {}", e); - return; - } - }, - Err(e) => { - log_error!( - self.logger, - "Failed to apply connected block to on-chain wallet: {}", - e - ); - return; - }, - }; + for output in must_pay_to { + tx_builder.add_recipient(output.script_pubkey.clone(), output.value); + } - let mut locked_persister = self.persister.lock().unwrap(); - match locked_wallet.persist(&mut locked_persister) { - Ok(_) => (), - Err(e) => { - log_error!(self.logger, "Failed to persist on-chain wallet: {}", e); - return; - }, - }; - } + tx_builder.fee_rate(fee_rate); + tx_builder.exclude_unconfirmed(); - fn block_disconnected(&self, _header: &bitcoin::block::Header, _height: u32) { - // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK - // team, it's sufficient in case of a reorg to always connect blocks starting from the last - // point of disagreement. + tx_builder + .finish() + .map_err(|e| { + log_error!(self.logger, "Failed to select confirmed UTXOs: {}", e); + })? + .unsigned_tx + .input + .iter() + .filter(|txin| must_spend.iter().all(|input| input.outpoint != txin.previous_output)) + .filter_map(|txin| { + locked_wallet + .tx_details(txin.previous_output.txid) + .map(|tx_details| tx_details.tx.deref().clone()) + .map(|prevtx| FundingTxInput::new_p2wpkh(prevtx, txin.previous_output.vout)) + }) + .collect::, ()>>() } -} -impl WalletSource for Wallet -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ - fn list_confirmed_utxos(&self) -> Result, ()> { + fn list_confirmed_utxos_inner(&self) -> Result, ()> { let locked_wallet = self.inner.lock().unwrap(); let mut utxos = Vec::new(); let confirmed_txs: Vec = locked_wallet @@ -713,7 +695,7 @@ where script_pubkey: ScriptBuf::new_witness_program(&witness_program), }, satisfaction_weight: 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + - 1 /* witness items */ + 1 /* schnorr sig len */ + 64, /* schnorr sig */ + 1 /* witness items */ + 1 /* schnorr sig len */ + 64, // schnorr sig }; utxos.push(utxo); }, @@ -733,7 +715,8 @@ where Ok(utxos) } - fn get_change_script(&self) -> Result { + #[allow(deprecated)] + fn get_change_script_inner(&self) -> Result { let mut locked_wallet = self.inner.lock().unwrap(); let mut locked_persister = self.persister.lock().unwrap(); @@ -745,7 +728,45 @@ where Ok(address_info.address.script_pubkey()) } - fn sign_psbt(&self, mut psbt: Psbt) -> Result { + #[allow(deprecated)] + pub(crate) fn sign_owned_inputs(&self, unsigned_tx: Transaction) -> Result { + let locked_wallet = self.inner.lock().unwrap(); + + let mut psbt = Psbt::from_unsigned_tx(unsigned_tx).map_err(|e| { + log_error!(self.logger, "Failed to construct PSBT: {}", e); + })?; + for (i, txin) in psbt.unsigned_tx.input.iter().enumerate() { + if let Some(utxo) = locked_wallet.get_utxo(txin.previous_output) { + debug_assert!(!utxo.is_spent); + psbt.inputs[i] = locked_wallet.get_psbt_input(utxo, None, true).map_err(|e| { + log_error!(self.logger, "Failed to construct PSBT input: {}", e); + })?; + } + } + + let mut sign_options = SignOptions::default(); + sign_options.trust_witness_utxo = true; + + match locked_wallet.sign(&mut psbt, sign_options) { + Ok(finalized) => debug_assert!(!finalized), + Err(e) => { + log_error!(self.logger, "Failed to sign owned inputs: {}", e); + return Err(()); + }, + } + + match psbt.extract_tx() { + Ok(tx) => Ok(tx), + Err(bitcoin::psbt::ExtractTxError::MissingInputValue { tx }) => Ok(tx), + Err(e) => { + log_error!(self.logger, "Failed to extract transaction: {}", e); + Err(()) + }, + } + } + + #[allow(deprecated)] + fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { let locked_wallet = self.inner.lock().unwrap(); // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As @@ -775,34 +796,104 @@ where } } +impl Listen for Wallet { + fn filtered_block_connected( + &self, _header: &bitcoin::block::Header, + _txdata: &lightning::chain::transaction::TransactionData, _height: u32, + ) { + debug_assert!(false, "Syncing filtered blocks is currently not supported"); + // As far as we can tell this would be a no-op anyways as we don't have to tell BDK about + // the header chain of intermediate blocks. According to the BDK team, it's sufficient to + // only connect full blocks starting from the last point of disagreement. + } + + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + let mut locked_wallet = self.inner.lock().unwrap(); + + let pre_checkpoint = locked_wallet.latest_checkpoint(); + if pre_checkpoint.height() != height - 1 + || pre_checkpoint.hash() != block.header.prev_blockhash + { + log_debug!( + self.logger, + "Detected reorg while applying a connected block to on-chain wallet: new block with hash {} at height {}", + block.header.block_hash(), + height + ); + } + + match locked_wallet.apply_block(block, height) { + Ok(()) => { + if let Err(e) = self.update_payment_store(&mut *locked_wallet) { + log_error!(self.logger, "Failed to update payment store: {}", e); + return; + } + }, + Err(e) => { + log_error!( + self.logger, + "Failed to apply connected block to on-chain wallet: {}", + e + ); + return; + }, + }; + + let mut locked_persister = self.persister.lock().unwrap(); + match locked_wallet.persist(&mut locked_persister) { + Ok(_) => (), + Err(e) => { + log_error!(self.logger, "Failed to persist on-chain wallet: {}", e); + return; + }, + }; + } + + fn blocks_disconnected(&self, _fork_point_block: BestBlock) { + // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK + // team, it's sufficient in case of a reorg to always connect blocks starting from the last + // point of disagreement. + } +} + +impl WalletSource for Wallet { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> Pin, ()>> + Send + 'a>> { + Box::pin(async move { self.list_confirmed_utxos_inner() }) + } + + fn get_change_script<'a>( + &'a self, + ) -> Pin> + Send + 'a>> { + Box::pin(async move { self.get_change_script_inner() }) + } + + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> Pin> + Send + 'a>> { + Box::pin(async move { self.sign_psbt_inner(psbt) }) + } +} + /// Similar to [`KeysManager`], but overrides the destination and shutdown scripts so they are /// directly spendable by the BDK wallet. -pub(crate) struct WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +pub(crate) struct WalletKeysManager { inner: KeysManager, - wallet: Arc>, - logger: L, + wallet: Arc, + logger: Arc, } -impl WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl WalletKeysManager { /// Constructs a `WalletKeysManager` that overrides the destination and shutdown scripts. /// /// See [`KeysManager::new`] for more information on `seed`, `starting_time_secs`, and /// `starting_time_nanos`. pub fn new( - seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, - wallet: Arc>, logger: L, + seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, wallet: Arc, + logger: Arc, ) -> Self { - let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos); + let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos, true); Self { inner, wallet, logger } } @@ -819,12 +910,7 @@ where } } -impl NodeSigner for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl NodeSigner for WalletKeysManager { fn get_node_id(&self, recipient: Recipient) -> Result { self.inner.get_node_id(recipient) } @@ -835,8 +921,16 @@ where self.inner.ecdh(recipient, other_key, tweak) } - fn get_inbound_payment_key(&self) -> ExpandedKey { - self.inner.get_inbound_payment_key() + fn get_expanded_key(&self) -> ExpandedKey { + self.inner.get_expanded_key() + } + + fn get_peer_storage_key(&self) -> PeerStorageKey { + self.inner.get_peer_storage_key() + } + + fn get_receive_auth_key(&self) -> lightning::sign::ReceiveAuthKey { + self.inner.get_receive_auth_key() } fn sign_invoice( @@ -854,19 +948,17 @@ where ) -> Result { self.inner.sign_bolt12_invoice(invoice) } + fn sign_message(&self, msg: &[u8]) -> Result { + self.inner.sign_message(msg) + } } -impl OutputSpender for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl OutputSpender for WalletKeysManager { /// See [`KeysManager::spend_spendable_outputs`] for documentation on this method. - fn spend_spendable_outputs( + fn spend_spendable_outputs( &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec, change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, - locktime: Option, secp_ctx: &Secp256k1, + locktime: Option, secp_ctx: &Secp256k1, ) -> Result { self.inner.spend_spendable_outputs( descriptors, @@ -879,39 +971,21 @@ where } } -impl EntropySource for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl EntropySource for WalletKeysManager { fn get_secure_random_bytes(&self) -> [u8; 32] { self.inner.get_secure_random_bytes() } } -impl SignerProvider for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ +impl SignerProvider for WalletKeysManager { type EcdsaSigner = InMemorySigner; - fn generate_channel_keys_id( - &self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128, - ) -> [u8; 32] { - self.inner.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id) - } - - fn derive_channel_signer( - &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32], - ) -> Self::EcdsaSigner { - self.inner.derive_channel_signer(channel_value_satoshis, channel_keys_id) + fn generate_channel_keys_id(&self, inbound: bool, user_channel_id: u128) -> [u8; 32] { + self.inner.generate_channel_keys_id(inbound, user_channel_id) } - fn read_chan_signer(&self, reader: &[u8]) -> Result { - self.inner.read_chan_signer(reader) + fn derive_channel_signer(&self, channel_keys_id: [u8; 32]) -> Self::EcdsaSigner { + self.inner.derive_channel_signer(channel_keys_id) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { @@ -941,16 +1015,20 @@ where } } -impl ChangeDestinationSource for WalletKeysManager -where - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: LdkLogger, -{ - fn get_change_destination_script(&self) -> Result { - let address = self.wallet.get_new_internal_address().map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; - Ok(address.script_pubkey()) +impl ChangeDestinationSource for WalletKeysManager { + fn get_change_destination_script<'a>( + &'a self, + ) -> Pin> + Send + 'a>> { + let wallet = Arc::clone(&self.wallet); + let logger = Arc::clone(&self.logger); + Box::pin(async move { + wallet + .get_new_internal_address() + .map_err(|e| { + log_error!(logger, "Failed to retrieve new address from wallet: {}", e); + }) + .map(|addr| addr.script_pubkey()) + .map_err(|_| ()) + }) } } diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs index d9e4e7135..5c8668937 100644 --- a/src/wallet/persist.rs +++ b/src/wallet/persist.rs @@ -5,6 +5,11 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. +use std::sync::Arc; + +use bdk_chain::Merge; +use bdk_wallet::{ChangeSet, WalletPersister}; + use crate::io::utils::{ read_bdk_wallet_change_set, write_bdk_wallet_change_descriptor, write_bdk_wallet_descriptor, write_bdk_wallet_indexer, write_bdk_wallet_local_chain, write_bdk_wallet_network, @@ -12,11 +17,6 @@ use crate::io::utils::{ }; use crate::logger::{log_error, LdkLogger, Logger}; use crate::types::DynStore; - -use bdk_chain::Merge; -use bdk_wallet::{ChangeSet, WalletPersister}; - -use std::sync::Arc; pub(crate) struct KVStoreWalletPersister { latest_change_set: Option, kv_store: Arc, diff --git a/src/wallet/ser.rs b/src/wallet/ser.rs index ae1509bdf..c1ad984e6 100644 --- a/src/wallet/ser.rs +++ b/src/wallet/ser.rs @@ -5,26 +5,23 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use lightning::ln::msgs::DecodeError; -use lightning::util::ser::{BigSize, Readable, RequiredWrapper, Writeable, Writer}; -use lightning::{decode_tlv_stream, encode_tlv_stream, read_tlv_fields, write_tlv_fields}; +use std::collections::{BTreeMap, BTreeSet}; +use std::str::FromStr; +use std::sync::Arc; use bdk_chain::bdk_core::{BlockId, ConfirmationBlockTime}; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; use bdk_chain::DescriptorId; - use bdk_wallet::descriptor::Descriptor; use bdk_wallet::keys::DescriptorPublicKey; - use bitcoin::hashes::sha256::Hash as Sha256Hash; use bitcoin::p2p::Magic; use bitcoin::{BlockHash, Network, OutPoint, Transaction, TxOut, Txid}; - -use std::collections::{BTreeMap, BTreeSet}; -use std::str::FromStr; -use std::sync::Arc; +use lightning::ln::msgs::DecodeError; +use lightning::util::ser::{BigSize, Readable, RequiredWrapper, Writeable, Writer}; +use lightning::{decode_tlv_stream, encode_tlv_stream, read_tlv_fields, write_tlv_fields}; const CHANGESET_SERIALIZATION_VERSION: u8 = 1; diff --git a/tests/common/logging.rs b/tests/common/logging.rs index 6bceac29a..3ff24d34d 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -1,11 +1,10 @@ +use std::sync::{Arc, Mutex}; + use chrono::Utc; -#[cfg(not(feature = "uniffi"))] -use ldk_node::logger::LogRecord; -use ldk_node::logger::{LogLevel, LogWriter}; +use ldk_node::logger::{LogLevel, LogRecord, LogWriter}; #[cfg(not(feature = "uniffi"))] use log::Record as LogFacadeRecord; use log::{Level as LogFacadeLevel, LevelFilter as LogFacadeLevelFilter, Log as LogFacadeLog}; -use std::sync::{Arc, Mutex}; #[derive(Clone)] pub(crate) enum TestLogWriter { @@ -143,3 +142,29 @@ pub(crate) fn validate_log_entry(entry: &String) { let msg = &path_and_msg[msg_start_index..]; assert!(!msg.is_empty()); } + +pub(crate) struct MultiNodeLogger { + node_id: String, +} + +impl MultiNodeLogger { + pub(crate) fn new(node_id: String) -> Self { + Self { node_id } + } +} + +impl LogWriter for MultiNodeLogger { + fn log(&self, record: LogRecord) { + let log = format!( + "[{}] {} {:<5} [{}:{}] {}\n", + self.node_id, + Utc::now().format("%Y-%m-%d %H:%M:%S%.3f"), + record.level.to_string(), + record.module_path, + record.line, + record.args + ); + + print!("{}", log); + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 337d034d7..4dc0b110c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -10,45 +10,46 @@ pub(crate) mod logging; -use logging::TestLogWriter; +use std::boxed::Box; +use std::collections::{HashMap, HashSet}; +use std::env; +use std::future::Future; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::{Arc, RwLock}; +use std::time::Duration; -use ldk_node::config::{Config, ElectrumSyncConfig, EsploraSyncConfig}; +use bitcoin::hashes::hex::FromHex; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; +use bitcoin::{ + Address, Amount, Network, OutPoint, ScriptBuf, Sequence, Transaction, Txid, Witness, +}; +use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; +use electrsd::{corepc_node, ElectrsD}; +use electrum_client::ElectrumApi; +use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, }; - +use lightning::io; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; -use lightning::util::persist::KVStore; +use lightning::util::persist::{KVStore, KVStoreSync}; use lightning::util::test_utils::TestStore; - use lightning_invoice::{Bolt11InvoiceDescription, Description}; -use lightning_types::payment::{PaymentHash, PaymentPreimage}; - use lightning_persister::fs_store::FilesystemStore; - -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; -use bitcoin::{Address, Amount, Network, OutPoint, Txid}; - -use electrsd::corepc_node::Client as BitcoindClient; -use electrsd::corepc_node::Node as BitcoinD; -use electrsd::{corepc_node, ElectrsD}; -use electrum_client::ElectrumApi; - -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; - -use std::env; -use std::path::PathBuf; -use std::sync::{Arc, RwLock}; -use std::time::Duration; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; +use logging::TestLogWriter; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; +use serde_json::{json, Value}; macro_rules! expect_event { - ($node: expr, $event_type: ident) => {{ - match $node.wait_next_event() { + ($node:expr, $event_type:ident) => {{ + match $node.next_event_async().await { ref e @ Event::$event_type { .. } => { println!("{} got event {:?}", $node.node_id(), e); $node.event_handled().unwrap(); @@ -63,8 +64,8 @@ macro_rules! expect_event { pub(crate) use expect_event; macro_rules! expect_channel_pending_event { - ($node: expr, $counterparty_node_id: expr) => {{ - match $node.wait_next_event() { + ($node:expr, $counterparty_node_id:expr) => {{ + match $node.next_event_async().await { ref e @ Event::ChannelPending { funding_txo, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, $counterparty_node_id); @@ -81,8 +82,8 @@ macro_rules! expect_channel_pending_event { pub(crate) use expect_channel_pending_event; macro_rules! expect_channel_ready_event { - ($node: expr, $counterparty_node_id: expr) => {{ - match $node.wait_next_event() { + ($node:expr, $counterparty_node_id:expr) => {{ + match $node.next_event_async().await { ref e @ Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, Some($counterparty_node_id)); @@ -98,9 +99,27 @@ macro_rules! expect_channel_ready_event { pub(crate) use expect_channel_ready_event; +macro_rules! expect_splice_pending_event { + ($node: expr, $counterparty_node_id: expr) => {{ + match $node.next_event_async().await { + ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { + println!("{} got event {:?}", $node.node_id(), e); + assert_eq!(counterparty_node_id, $counterparty_node_id); + $node.event_handled().unwrap(); + new_funding_txo + }, + ref e => { + panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); + }, + } + }}; +} + +pub(crate) use expect_splice_pending_event; + macro_rules! expect_payment_received_event { - ($node: expr, $amount_msat: expr) => {{ - match $node.wait_next_event() { + ($node:expr, $amount_msat:expr) => {{ + match $node.next_event_async().await { ref e @ Event::PaymentReceived { payment_id, amount_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(amount_msat, $amount_msat); @@ -121,8 +140,8 @@ macro_rules! expect_payment_received_event { pub(crate) use expect_payment_received_event; macro_rules! expect_payment_claimable_event { - ($node: expr, $payment_id: expr, $payment_hash: expr, $claimable_amount_msat: expr) => {{ - match $node.wait_next_event() { + ($node:expr, $payment_id:expr, $payment_hash:expr, $claimable_amount_msat:expr) => {{ + match $node.next_event_async().await { ref e @ Event::PaymentClaimable { payment_id, payment_hash, @@ -146,8 +165,8 @@ macro_rules! expect_payment_claimable_event { pub(crate) use expect_payment_claimable_event; macro_rules! expect_payment_successful_event { - ($node: expr, $payment_id: expr, $fee_paid_msat: expr) => {{ - match $node.wait_next_event() { + ($node:expr, $payment_id:expr, $fee_paid_msat:expr) => {{ + match $node.next_event_async().await { ref e @ Event::PaymentSuccessful { payment_id, fee_paid_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); if let Some(fee_msat) = $fee_paid_msat { @@ -174,6 +193,7 @@ pub(crate) fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) { ); let mut bitcoind_conf = corepc_node::Conf::default(); bitcoind_conf.network = "regtest"; + bitcoind_conf.args.push("-rest"); let bitcoind = BitcoinD::with_conf(bitcoind_exe, &bitcoind_conf).unwrap(); let electrs_exe = env::var("ELECTRS_EXE") @@ -189,15 +209,15 @@ pub(crate) fn setup_bitcoind_and_electrsd() -> (BitcoinD, ElectrsD) { pub(crate) fn random_storage_path() -> PathBuf { let mut temp_path = std::env::temp_dir(); - let mut rng = thread_rng(); + let mut rng = rng(); let rand_dir: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); temp_path.push(rand_dir); temp_path } pub(crate) fn random_port() -> u16 { - let mut rng = thread_rng(); - rng.gen_range(5000..65535) + let mut rng = rng(); + rng.random_range(5000..32768) } pub(crate) fn random_listening_addresses() -> Vec { @@ -214,8 +234,8 @@ pub(crate) fn random_listening_addresses() -> Vec { } pub(crate) fn random_node_alias() -> Option { - let mut rng = thread_rng(); - let rand_val = rng.gen_range(0..1000); + let mut rng = rng(); + let rand_val = rng.random_range(0..1000); let alias = format!("ldk-node-{}", rand_val); let mut bytes = [0u8; 32]; bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); @@ -256,17 +276,31 @@ type TestNode = Node; pub(crate) enum TestChainSource<'a> { Esplora(&'a ElectrsD), Electrum(&'a ElectrsD), - BitcoindRpc(&'a BitcoinD), + BitcoindRpcSync(&'a BitcoinD), + BitcoindRestSync(&'a BitcoinD), +} + +#[derive(Clone, Copy)] +pub(crate) enum TestStoreType { + TestSyncStore, + Sqlite, +} + +impl Default for TestStoreType { + fn default() -> Self { + TestStoreType::TestSyncStore + } } #[derive(Clone, Default)] pub(crate) struct TestConfig { pub node_config: Config, pub log_writer: TestLogWriter, + pub store_type: TestStoreType, } macro_rules! setup_builder { - ($builder: ident, $config: expr) => { + ($builder:ident, $config:expr) => { #[cfg(feature = "uniffi")] let $builder = Builder::from_config($config.clone()); #[cfg(not(feature = "uniffi"))] @@ -279,13 +313,28 @@ pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, +) -> (TestNode, TestNode) { + setup_two_nodes_with_store( + chain_source, + allow_0conf, + anchor_channels, + anchors_trusted_no_reserve, + TestStoreType::TestSyncStore, + ) +} + +pub(crate) fn setup_two_nodes_with_store( + chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, + anchors_trusted_no_reserve: bool, store_type: TestStoreType, ) -> (TestNode, TestNode) { println!("== Node A =="); - let config_a = random_config(anchor_channels); + let mut config_a = random_config(anchor_channels); + config_a.store_type = store_type; let node_a = setup_node(chain_source, config_a, None); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); + config_b.store_type = store_type; if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -304,6 +353,13 @@ pub(crate) fn setup_two_nodes( pub(crate) fn setup_node( chain_source: &TestChainSource, config: TestConfig, seed_bytes: Option>, +) -> TestNode { + setup_node_for_async_payments(chain_source, config, seed_bytes, None) +} + +pub(crate) fn setup_node_for_async_payments( + chain_source: &TestChainSource, config: TestConfig, seed_bytes: Option>, + async_payments_role: Option, ) -> TestNode { setup_builder!(builder, config.node_config); match chain_source { @@ -317,7 +373,7 @@ pub(crate) fn setup_node( let sync_config = ElectrumSyncConfig { background_sync_config: None }; builder.set_chain_source_electrum(electrum_url.clone(), Some(sync_config)); }, - TestChainSource::BitcoindRpc(bitcoind) => { + TestChainSource::BitcoindRpcSync(bitcoind) => { let rpc_host = bitcoind.params.rpc_socket.ip().to_string(); let rpc_port = bitcoind.params.rpc_socket.port(); let values = bitcoind.params.get_cookie_values().unwrap().unwrap(); @@ -325,6 +381,23 @@ pub(crate) fn setup_node( let rpc_password = values.password; builder.set_chain_source_bitcoind_rpc(rpc_host, rpc_port, rpc_user, rpc_password); }, + TestChainSource::BitcoindRestSync(bitcoind) => { + let rpc_host = bitcoind.params.rpc_socket.ip().to_string(); + let rpc_port = bitcoind.params.rpc_socket.port(); + let values = bitcoind.params.get_cookie_values().unwrap().unwrap(); + let rpc_user = values.user; + let rpc_password = values.password; + let rest_host = bitcoind.params.rpc_socket.ip().to_string(); + let rest_port = bitcoind.params.rpc_socket.port(); + builder.set_chain_source_bitcoind_rest( + rest_host, + rest_port, + rpc_host, + rpc_port, + rpc_user, + rpc_password, + ); + }, } match &config.log_writer { @@ -352,15 +425,23 @@ pub(crate) fn setup_node( } } - let test_sync_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); - let node = builder.build_with_store(test_sync_store).unwrap(); + builder.set_async_payments_role(async_payments_role).unwrap(); + + let node = match config.store_type { + TestStoreType::TestSyncStore => { + let kv_store = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.into())); + builder.build_with_store(kv_store).unwrap() + }, + TestStoreType::Sqlite => builder.build().unwrap(), + }; + node.start().unwrap(); assert!(node.status().is_running); assert!(node.status().latest_fee_rate_cache_update_timestamp.is_some()); node } -pub(crate) fn generate_blocks_and_wait( +pub(crate) async fn generate_blocks_and_wait( bitcoind: &BitcoindClient, electrs: &E, num: usize, ) { let _ = bitcoind.create_wallet("ldk_node_test"); @@ -371,19 +452,34 @@ pub(crate) fn generate_blocks_and_wait( let address = bitcoind.new_address().expect("failed to get new address"); // TODO: expect this Result once the WouldBlock issue is resolved upstream. let _block_hashes_res = bitcoind.generate_to_address(num, &address); - wait_for_block(electrs, cur_height as usize + num); + wait_for_block(electrs, cur_height as usize + num).await; print!(" Done!"); println!("\n"); } -pub(crate) fn wait_for_block(electrs: &E, min_height: usize) { +pub(crate) fn invalidate_blocks(bitcoind: &BitcoindClient, num_blocks: usize) { + let blockchain_info = bitcoind.get_blockchain_info().expect("failed to get blockchain info"); + let cur_height = blockchain_info.blocks as usize; + let target_height = cur_height - num_blocks + 1; + let block_hash = bitcoind + .get_block_hash(target_height as u64) + .expect("failed to get block hash") + .block_hash() + .expect("block hash should be present"); + bitcoind.invalidate_block(block_hash).expect("failed to invalidate block"); + let blockchain_info = bitcoind.get_blockchain_info().expect("failed to get blockchain info"); + let new_cur_height = blockchain_info.blocks as usize; + assert!(new_cur_height + num_blocks == cur_height); +} + +pub(crate) async fn wait_for_block(electrs: &E, min_height: usize) { let mut header = match electrs.block_headers_subscribe() { Ok(header) => header, Err(_) => { // While subscribing should succeed the first time around, we ran into some cases where // it didn't. Since we can't proceed without subscribing, we try again after a delay // and panic if it still fails. - std::thread::sleep(Duration::from_secs(3)); + tokio::time::sleep(Duration::from_secs(3)).await; electrs.block_headers_subscribe().expect("failed to subscribe to block headers") }, }; @@ -394,40 +490,42 @@ pub(crate) fn wait_for_block(electrs: &E, min_height: usize) { header = exponential_backoff_poll(|| { electrs.ping().expect("failed to ping electrs"); electrs.block_headers_pop().expect("failed to pop block header") - }); + }) + .await; } } -pub(crate) fn wait_for_tx(electrs: &E, txid: Txid) { - let mut tx_res = electrs.transaction_get(&txid); - loop { - if tx_res.is_ok() { - break; - } - tx_res = exponential_backoff_poll(|| { - electrs.ping().unwrap(); - Some(electrs.transaction_get(&txid)) - }); +pub(crate) async fn wait_for_tx(electrs: &E, txid: Txid) { + if electrs.transaction_get(&txid).is_ok() { + return; } + + exponential_backoff_poll(|| { + electrs.ping().unwrap(); + electrs.transaction_get(&txid).ok() + }) + .await; } -pub(crate) fn wait_for_outpoint_spend(electrs: &E, outpoint: OutPoint) { +pub(crate) async fn wait_for_outpoint_spend(electrs: &E, outpoint: OutPoint) { let tx = electrs.transaction_get(&outpoint.txid).unwrap(); let txout_script = tx.output.get(outpoint.vout as usize).unwrap().clone().script_pubkey; - let mut is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); - loop { - if is_spent { - break; - } - is_spent = exponential_backoff_poll(|| { - electrs.ping().unwrap(); - Some(!electrs.script_get_history(&txout_script).unwrap().is_empty()) - }); + let is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); + if is_spent { + return; } + + exponential_backoff_poll(|| { + electrs.ping().unwrap(); + + let is_spent = !electrs.script_get_history(&txout_script).unwrap().is_empty(); + is_spent.then_some(()) + }) + .await; } -pub(crate) fn exponential_backoff_poll(mut poll: F) -> T +pub(crate) async fn exponential_backoff_poll(mut poll: F) -> T where F: FnMut() -> Option, { @@ -444,36 +542,128 @@ where } assert!(tries < 20, "Reached max tries."); tries += 1; - std::thread::sleep(delay); + tokio::time::sleep(delay).await; } } -pub(crate) fn premine_and_distribute_funds( +pub(crate) async fn premine_and_distribute_funds( bitcoind: &BitcoindClient, electrs: &E, addrs: Vec
, amount: Amount, ) { + premine_blocks(bitcoind, electrs).await; + + distribute_funds_unconfirmed(bitcoind, electrs, addrs, amount).await; + generate_blocks_and_wait(bitcoind, electrs, 1).await; +} + +pub(crate) async fn premine_blocks(bitcoind: &BitcoindClient, electrs: &E) { let _ = bitcoind.create_wallet("ldk_node_test"); let _ = bitcoind.load_wallet("ldk_node_test"); - generate_blocks_and_wait(bitcoind, electrs, 101); + generate_blocks_and_wait(bitcoind, electrs, 101).await; +} + +pub(crate) async fn distribute_funds_unconfirmed( + bitcoind: &BitcoindClient, electrs: &E, addrs: Vec
, amount: Amount, +) -> Txid { + let mut amounts = HashMap::::new(); + for addr in &addrs { + amounts.insert(addr.to_string(), amount.to_btc()); + } + + let empty_account = json!(""); + let amounts_json = json!(amounts); + let txid = bitcoind + .call::("sendmany", &[empty_account, amounts_json]) + .unwrap() + .as_str() + .unwrap() + .parse() + .unwrap(); + + wait_for_tx(electrs, txid).await; + + txid +} + +pub(crate) fn prepare_rbf( + electrs: &E, txid: Txid, scripts_buf: &HashSet, +) -> (Transaction, usize) { + let tx = electrs.transaction_get(&txid).unwrap(); + + let fee_output_index = tx + .output + .iter() + .position(|output| !scripts_buf.contains(&output.script_pubkey)) + .expect("No output available for fee bumping"); + + (tx, fee_output_index) +} + +pub(crate) async fn bump_fee_and_broadcast( + bitcoind: &BitcoindClient, electrs: &E, mut tx: Transaction, fee_output_index: usize, + is_insert_block: bool, +) -> Transaction { + let mut bump_fee_amount_sat = tx.vsize() as u64; + let attempts = 5; + + for _ in 0..attempts { + let fee_output = &mut tx.output[fee_output_index]; + let new_fee_value = fee_output.value.to_sat().saturating_sub(bump_fee_amount_sat); + if new_fee_value < 546 { + panic!("Warning: Fee output approaching dust limit ({} sats)", new_fee_value); + } + fee_output.value = Amount::from_sat(new_fee_value); - for addr in addrs { - let txid = bitcoind.send_to_address(&addr, amount).unwrap().0.parse().unwrap(); - wait_for_tx(electrs, txid); + for input in &mut tx.input { + input.sequence = Sequence::ENABLE_RBF_NO_LOCKTIME; + input.script_sig = ScriptBuf::new(); + input.witness = Witness::new(); + } + + let signed_result = bitcoind.sign_raw_transaction_with_wallet(&tx).unwrap(); + assert!(signed_result.complete, "Failed to sign RBF transaction"); + + let tx_bytes = Vec::::from_hex(&signed_result.hex).unwrap(); + tx = bitcoin::consensus::encode::deserialize::(&tx_bytes).unwrap(); + + match bitcoind.send_raw_transaction(&tx) { + Ok(res) => { + if is_insert_block { + generate_blocks_and_wait(bitcoind, electrs, 1).await; + } + let new_txid: Txid = res.0.parse().unwrap(); + wait_for_tx(electrs, new_txid).await; + return tx; + }, + Err(_) => { + bump_fee_amount_sat += bump_fee_amount_sat * 5; + if tx.output[fee_output_index].value.to_sat() < bump_fee_amount_sat { + panic!("Insufficient funds to increase fee"); + } + }, + } } - generate_blocks_and_wait(bitcoind, electrs, 1); + panic!("Failed to bump fee after {} attempts", attempts); } -pub fn open_channel( +pub async fn open_channel( node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, electrsd: &ElectrsD, -) { +) -> OutPoint { + open_channel_push_amt(node_a, node_b, funding_amount_sat, None, should_announce, electrsd).await +} + +pub async fn open_channel_push_amt( + node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, push_amount_msat: Option, + should_announce: bool, electrsd: &ElectrsD, +) -> OutPoint { if should_announce { node_a .open_announced_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, - None, + push_amount_msat, None, ) .unwrap(); @@ -483,7 +673,7 @@ pub fn open_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, - None, + push_amount_msat, None, ) .unwrap(); @@ -493,10 +683,12 @@ pub fn open_channel( let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); let funding_txo_b = expect_channel_pending_event!(node_b, node_a.node_id()); assert_eq!(funding_txo_a, funding_txo_b); - wait_for_tx(&electrsd.client, funding_txo_a.txid); + wait_for_tx(&electrsd.client, funding_txo_a.txid).await; + + funding_txo_a } -pub(crate) fn do_channel_full_cycle( +pub(crate) async fn do_channel_full_cycle( node_a: TestNode, node_b: TestNode, bitcoind: &BitcoindClient, electrsd: &E, allow_0conf: bool, expect_anchor_channel: bool, force_close: bool, ) { @@ -510,7 +702,8 @@ pub(crate) fn do_channel_full_cycle( electrsd, vec![addr_a, addr_b], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); @@ -569,10 +762,10 @@ pub(crate) fn do_channel_full_cycle( let funding_txo_b = expect_channel_pending_event!(node_b, node_a.node_id()); assert_eq!(funding_txo_a, funding_txo_b); - wait_for_tx(electrsd, funding_txo_a.txid); + wait_for_tx(electrsd, funding_txo_a.txid).await; if !allow_0conf { - generate_blocks_and_wait(&bitcoind, electrsd, 6); + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; } node_a.sync_wallets().unwrap(); @@ -620,8 +813,8 @@ pub(crate) fn do_channel_full_cycle( node_b_anchor_reserve_sat ); - let user_channel_id = expect_channel_ready_event!(node_a, node_b.node_id()); - expect_channel_ready_event!(node_b, node_a.node_id()); + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); + let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); println!("\nB receive"); let invoice_amount_1_msat = 2500_000; @@ -702,7 +895,7 @@ pub(crate) fn do_channel_full_cycle( let payment_id = node_a.bolt11_payment().send_using_amount(&invoice, overpaid_amount_msat, None).unwrap(); expect_event!(node_a, PaymentSuccessful); - let received_amount = match node_b.wait_next_event() { + let received_amount = match node_b.next_event_async().await { ref e @ Event::PaymentReceived { amount_msat, .. } => { println!("{} got event {:?}", std::stringify!(node_b), e); node_b.event_handled().unwrap(); @@ -740,7 +933,7 @@ pub(crate) fn do_channel_full_cycle( .unwrap(); expect_event!(node_a, PaymentSuccessful); - let received_amount = match node_b.wait_next_event() { + let received_amount = match node_b.next_event_async().await { ref e @ Event::PaymentReceived { amount_msat, .. } => { println!("{} got event {:?}", std::stringify!(node_b), e); node_b.event_handled().unwrap(); @@ -877,7 +1070,7 @@ pub(crate) fn do_channel_full_cycle( .unwrap(); expect_event!(node_a, PaymentSuccessful); - let next_event = node_b.wait_next_event(); + let next_event = node_b.next_event_async().await; let (received_keysend_amount, received_custom_records) = match next_event { ref e @ Event::PaymentReceived { amount_msat, ref custom_records, .. } => { println!("{} got event {:?}", std::stringify!(node_b), e); @@ -925,20 +1118,68 @@ pub(crate) fn do_channel_full_cycle( 1 ); + // Mine a block to give time for the HTLC to resolve + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; + + println!("\nB splices out to pay A"); + let addr_a = node_a.onchain_payment().new_address().unwrap(); + let splice_out_sat = funding_amount_sat / 2; + node_b.splice_out(&user_channel_id_b, node_a.node_id(), &addr_a, splice_out_sat).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + assert_eq!( + node_a + .list_payments_with_filter(|p| p.direction == PaymentDirection::Inbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 2 + ); + + println!("\nA splices in the splice-out payment from B"); + let splice_in_sat = splice_out_sat; + node_a.splice_in(&user_channel_id_a, node_b.node_id(), splice_in_sat).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind, electrsd, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + assert_eq!( + node_a + .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 2 + ); + println!("\nB close_channel (force: {})", force_close); if force_close { - std::thread::sleep(Duration::from_secs(1)); - node_a.force_close_channel(&user_channel_id, node_b.node_id(), None).unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + node_a.force_close_channel(&user_channel_id_a, node_b.node_id(), None).unwrap(); } else { - node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); + node_a.close_channel(&user_channel_id_a, node_b.node_id()).unwrap(); } expect_event!(node_a, ChannelClosed); expect_event!(node_b, ChannelClosed); - wait_for_outpoint_spend(electrsd, funding_txo_b); + wait_for_outpoint_spend(electrsd, funding_txo_b).await; - generate_blocks_and_wait(&bitcoind, electrsd, 1); + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -954,7 +1195,7 @@ pub(crate) fn do_channel_full_cycle( assert_eq!(counterparty_node_id, node_a.node_id()); let cur_height = node_b.status().current_best_block.height; let blocks_to_go = confirmation_height - cur_height; - generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize); + generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize).await; node_b.sync_wallets().unwrap(); node_a.sync_wallets().unwrap(); }, @@ -967,7 +1208,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 1); + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; node_b.sync_wallets().unwrap(); node_a.sync_wallets().unwrap(); @@ -977,7 +1218,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 5); + generate_blocks_and_wait(&bitcoind, electrsd, 5).await; node_b.sync_wallets().unwrap(); node_a.sync_wallets().unwrap(); @@ -995,7 +1236,7 @@ pub(crate) fn do_channel_full_cycle( assert_eq!(counterparty_node_id, node_b.node_id()); let cur_height = node_a.status().current_best_block.height; let blocks_to_go = confirmation_height - cur_height; - generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize); + generate_blocks_and_wait(&bitcoind, electrsd, blocks_to_go as usize).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); }, @@ -1008,7 +1249,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 1); + generate_blocks_and_wait(&bitcoind, electrsd, 1).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1018,7 +1259,7 @@ pub(crate) fn do_channel_full_cycle( PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, _ => panic!("Unexpected balance state!"), } - generate_blocks_and_wait(&bitcoind, electrsd, 5); + generate_blocks_and_wait(&bitcoind, electrsd, 5).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); } @@ -1029,7 +1270,7 @@ pub(crate) fn do_channel_full_cycle( + invoice_amount_3_msat + determined_amount_msat + keysend_amount_msat) - / 1000; + / 1000 - splice_out_sat; let node_a_upper_bound_sat = (premine_amount_sat - funding_amount_sat) + (funding_amount_sat - sum_of_all_payments_sat); let node_a_lower_bound_sat = node_a_upper_bound_sat - onchain_fee_buffer_sat; @@ -1050,7 +1291,7 @@ pub(crate) fn do_channel_full_cycle( .list_payments_with_filter(|p| p.direction == PaymentDirection::Inbound && matches!(p.kind, PaymentKind::Onchain { .. })) .len(), - 2 + 3 ); assert_eq!( node_b @@ -1072,14 +1313,121 @@ pub(crate) fn do_channel_full_cycle( // A `KVStore` impl for testing purposes that wraps all our `KVStore`s and asserts their synchronicity. pub(crate) struct TestSyncStore { + inner: Arc, +} + +impl TestSyncStore { + pub(crate) fn new(dest_dir: PathBuf) -> Self { + let inner = Arc::new(TestSyncStoreInner::new(dest_dir)); + Self { inner } + } +} + +impl KVStore for TestSyncStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.read_internal(&primary_namespace, &secondary_namespace, &key) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> Pin> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.write_internal(&primary_namespace, &secondary_namespace, &key, buf) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> Pin> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> Pin, io::Error>> + Send>> { + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let inner = Arc::clone(&self.inner); + let fut = tokio::task::spawn_blocking(move || { + inner.list_internal(&primary_namespace, &secondary_namespace) + }); + Box::pin(async move { + fut.await.unwrap_or_else(|e| { + let msg = format!("Failed to IO operation due join error: {}", e); + Err(io::Error::new(io::ErrorKind::Other, msg)) + }) + }) + } +} + +impl KVStoreSync for TestSyncStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> lightning::io::Result> { + self.inner.read_internal(primary_namespace, secondary_namespace, key) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> lightning::io::Result<()> { + self.inner.write_internal(primary_namespace, secondary_namespace, key, buf) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> lightning::io::Result<()> { + self.inner.remove_internal(primary_namespace, secondary_namespace, key, lazy) + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> lightning::io::Result> { + self.inner.list_internal(primary_namespace, secondary_namespace) + } +} + +struct TestSyncStoreInner { serializer: RwLock<()>, test_store: TestStore, fs_store: FilesystemStore, sqlite_store: SqliteStore, } -impl TestSyncStore { - pub(crate) fn new(dest_dir: PathBuf) -> Self { +impl TestSyncStoreInner { + fn new(dest_dir: PathBuf) -> Self { let serializer = RwLock::new(()); let mut fs_dir = dest_dir.clone(); fs_dir.push("fs_store"); @@ -1090,6 +1438,7 @@ impl TestSyncStore { sql_dir, Some("test_sync_db".to_string()), Some("test_sync_table".to_string()), + None, ) .unwrap(); let test_store = TestStore::new(false); @@ -1099,9 +1448,10 @@ impl TestSyncStore { fn do_list( &self, primary_namespace: &str, secondary_namespace: &str, ) -> lightning::io::Result> { - let fs_res = self.fs_store.list(primary_namespace, secondary_namespace); - let sqlite_res = self.sqlite_store.list(primary_namespace, secondary_namespace); - let test_res = self.test_store.list(primary_namespace, secondary_namespace); + let fs_res = KVStoreSync::list(&self.fs_store, primary_namespace, secondary_namespace); + let sqlite_res = + KVStoreSync::list(&self.sqlite_store, primary_namespace, secondary_namespace); + let test_res = KVStoreSync::list(&self.test_store, primary_namespace, secondary_namespace); match fs_res { Ok(mut list) => { @@ -1124,17 +1474,17 @@ impl TestSyncStore { }, } } -} -impl KVStore for TestSyncStore { - fn read( + fn read_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); - let fs_res = self.fs_store.read(primary_namespace, secondary_namespace, key); - let sqlite_res = self.sqlite_store.read(primary_namespace, secondary_namespace, key); - let test_res = self.test_store.read(primary_namespace, secondary_namespace, key); + let fs_res = KVStoreSync::read(&self.fs_store, primary_namespace, secondary_namespace, key); + let sqlite_res = + KVStoreSync::read(&self.sqlite_store, primary_namespace, secondary_namespace, key); + let test_res = + KVStoreSync::read(&self.test_store, primary_namespace, secondary_namespace, key); match fs_res { Ok(read) => { @@ -1152,13 +1502,31 @@ impl KVStore for TestSyncStore { } } - fn write( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + fn write_internal( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); - let fs_res = self.fs_store.write(primary_namespace, secondary_namespace, key, buf); - let sqlite_res = self.sqlite_store.write(primary_namespace, secondary_namespace, key, buf); - let test_res = self.test_store.write(primary_namespace, secondary_namespace, key, buf); + let fs_res = KVStoreSync::write( + &self.fs_store, + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); + let sqlite_res = KVStoreSync::write( + &self.sqlite_store, + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); + let test_res = KVStoreSync::write( + &self.test_store, + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); assert!(self .do_list(primary_namespace, secondary_namespace) @@ -1179,14 +1547,26 @@ impl KVStore for TestSyncStore { } } - fn remove( + fn remove_internal( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); - let fs_res = self.fs_store.remove(primary_namespace, secondary_namespace, key, lazy); - let sqlite_res = - self.sqlite_store.remove(primary_namespace, secondary_namespace, key, lazy); - let test_res = self.test_store.remove(primary_namespace, secondary_namespace, key, lazy); + let fs_res = + KVStoreSync::remove(&self.fs_store, primary_namespace, secondary_namespace, key, lazy); + let sqlite_res = KVStoreSync::remove( + &self.sqlite_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); + let test_res = KVStoreSync::remove( + &self.test_store, + primary_namespace, + secondary_namespace, + key, + lazy, + ); assert!(!self .do_list(primary_namespace, secondary_namespace) @@ -1207,7 +1587,7 @@ impl KVStore for TestSyncStore { } } - fn list( + fn list_internal( &self, primary_namespace: &str, secondary_namespace: &str, ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index f77311fb2..e8eb72a1d 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -9,29 +9,24 @@ mod common; -use ldk_node::bitcoin::secp256k1::PublicKey; -use ldk_node::bitcoin::Amount; -use ldk_node::lightning::ln::msgs::SocketAddress; -use ldk_node::{Builder, Event}; -use lightning_invoice::{Bolt11InvoiceDescription, Description}; +use std::default::Default; +use std::str::FromStr; use clightningrpc::lightningrpc::LightningRPC; use clightningrpc::responses::NetworkAddress; - use electrsd::corepc_client::client_sync::Auth; use electrsd::corepc_node::Client as BitcoindClient; - use electrum_client::Client as ElectrumClient; -use lightning_invoice::Bolt11Invoice; - -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; - -use std::default::Default; -use std::str::FromStr; +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::bitcoin::Amount; +use ldk_node::lightning::ln::msgs::SocketAddress; +use ldk_node::{Builder, Event}; +use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; -#[test] -fn test_cln() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_cln() { // Setup bitcoind / electrs clients let bitcoind_client = BitcoindClient::new_with_auth( "http://127.0.0.1:18443", @@ -41,7 +36,7 @@ fn test_cln() { let electrs_client = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); // Give electrs a kick. - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1); + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1).await; // Setup LDK Node let config = common::random_config(true); @@ -59,7 +54,8 @@ fn test_cln() { &electrs_client, vec![address], premine_amount, - ); + ) + .await; // Setup CLN let sock = "/tmp/lightning-rpc"; @@ -72,7 +68,7 @@ fn test_cln() { if info.blockheight > 0 { break info; } - std::thread::sleep(std::time::Duration::from_millis(250)); + tokio::time::sleep(std::time::Duration::from_millis(250)).await; } }; let cln_node_id = PublicKey::from_str(&cln_info.id).unwrap(); @@ -97,13 +93,13 @@ fn test_cln() { .unwrap(); let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); - common::wait_for_tx(&electrs_client, funding_txo.txid); - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6); + common::wait_for_tx(&electrs_client, funding_txo.txid).await; + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6).await; node.sync_wallets().unwrap(); let user_channel_id = common::expect_channel_ready_event!(node, cln_node_id); // Send a payment to CLN - let mut rng = thread_rng(); + let mut rng = rng(); let rand_label: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); let cln_invoice = cln_client.invoice(Some(10_000_000), &rand_label, &rand_label, None, None, None).unwrap(); diff --git a/tests/integration_tests_lnd.rs b/tests/integration_tests_lnd.rs index 0232e8f2e..311a11c3c 100755 --- a/tests/integration_tests_lnd.rs +++ b/tests/integration_tests_lnd.rs @@ -2,29 +2,25 @@ mod common; +use std::default::Default; +use std::str::FromStr; + +use bitcoin::hex::DisplayHex; +use electrsd::corepc_client::client_sync::Auth; +use electrsd::corepc_node::Client as BitcoindClient; +use electrum_client::Client as ElectrumClient; use ldk_node::bitcoin::secp256k1::PublicKey; use ldk_node::bitcoin::Amount; use ldk_node::lightning::ln::msgs::SocketAddress; use ldk_node::{Builder, Event}; - +use lightning_invoice::{Bolt11InvoiceDescription, Description}; +use lnd_grpc_rust::lnrpc::invoice::InvoiceState::Settled as LndInvoiceStateSettled; use lnd_grpc_rust::lnrpc::{ - invoice::InvoiceState::Settled as LndInvoiceStateSettled, GetInfoRequest as LndGetInfoRequest, - GetInfoResponse as LndGetInfoResponse, Invoice as LndInvoice, - ListInvoiceRequest as LndListInvoiceRequest, QueryRoutesRequest as LndQueryRoutesRequest, - Route as LndRoute, SendRequest as LndSendRequest, + GetInfoRequest as LndGetInfoRequest, GetInfoResponse as LndGetInfoResponse, + Invoice as LndInvoice, ListInvoiceRequest as LndListInvoiceRequest, + QueryRoutesRequest as LndQueryRoutesRequest, Route as LndRoute, SendRequest as LndSendRequest, }; use lnd_grpc_rust::{connect, LndClient}; - -use electrsd::corepc_client::client_sync::Auth; -use electrsd::corepc_node::Client as BitcoindClient; - -use electrum_client::Client as ElectrumClient; -use lightning_invoice::{Bolt11InvoiceDescription, Description}; - -use bitcoin::hex::DisplayHex; - -use std::default::Default; -use std::str::FromStr; use tokio::fs; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -38,7 +34,7 @@ async fn test_lnd() { let electrs_client = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); // Give electrs a kick. - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1); + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1).await; // Setup LDK Node let config = common::random_config(true); @@ -56,7 +52,8 @@ async fn test_lnd() { &electrs_client, vec![address], premine_amount, - ); + ) + .await; // Setup LND let endpoint = "127.0.0.1:8081"; @@ -77,8 +74,8 @@ async fn test_lnd() { .unwrap(); let funding_txo = common::expect_channel_pending_event!(node, lnd_node_id); - common::wait_for_tx(&electrs_client, funding_txo.txid); - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6); + common::wait_for_tx(&electrs_client, funding_txo.txid).await; + common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6).await; node.sync_wallets().unwrap(); let user_channel_id = common::expect_channel_ready_event!(node, lnd_node_id); diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index b21387521..d6c7c9447 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -7,96 +7,112 @@ mod common; +use std::collections::HashSet; +use std::str::FromStr; +use std::sync::Arc; + +use bitcoin::address::NetworkUnchecked; +use bitcoin::hashes::sha256::Hash as Sha256Hash; +use bitcoin::hashes::Hash; +use bitcoin::{Address, Amount, ScriptBuf}; +use common::logging::{init_log_logger, validate_log_entry, MultiNodeLogger, TestLogWriter}; use common::{ - do_channel_full_cycle, expect_channel_pending_event, expect_channel_ready_event, expect_event, - expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, - logging::{init_log_logger, validate_log_entry, TestLogWriter}, - open_channel, premine_and_distribute_funds, random_config, random_listening_addresses, - setup_bitcoind_and_electrsd, setup_builder, setup_node, setup_two_nodes, wait_for_tx, - TestChainSource, TestSyncStore, + bump_fee_and_broadcast, distribute_funds_unconfirmed, do_channel_full_cycle, + expect_channel_pending_event, expect_channel_ready_event, expect_event, + expect_payment_claimable_event, expect_payment_received_event, expect_payment_successful_event, + expect_splice_pending_event, generate_blocks_and_wait, open_channel, open_channel_push_amt, + premine_and_distribute_funds, premine_blocks, prepare_rbf, random_config, + random_listening_addresses, setup_bitcoind_and_electrsd, setup_builder, setup_node, + setup_node_for_async_payments, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; - -use ldk_node::config::EsploraSyncConfig; +use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ - ConfirmationStatus, PaymentDirection, PaymentKind, PaymentStatus, QrPaymentResult, - SendingParameters, + ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, + QrPaymentResult, }; -use ldk_node::{Builder, Event, NodeError}; - +use ldk_node::{Builder, DynStore, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; -use lightning::util::persist::KVStore; - +use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11InvoiceDescription, Description}; - -use bitcoin::address::NetworkUnchecked; -use bitcoin::hashes::Hash; -use bitcoin::Address; -use bitcoin::Amount; +use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; -use std::str::FromStr; -use std::sync::Arc; - -#[test] -fn channel_full_cycle() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_electrum() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_electrum() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Electrum(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_bitcoind_rpc_sync() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::BitcoindRpcSync(&bitcoind); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_bitcoind() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_bitcoind_rest_sync() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let chain_source = TestChainSource::BitcoindRpc(&bitcoind); + let chain_source = TestChainSource::BitcoindRestSync(&bitcoind); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) + .await; } -#[test] -fn channel_full_cycle_force_close() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) + .await; } -#[test] -fn channel_full_cycle_force_close_trusted_no_reserve() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) + .await; } -#[test] -fn channel_full_cycle_0conf() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) + .await; } -#[test] -fn channel_full_cycle_legacy_staticremotekey() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false) + .await; } -#[test] -fn channel_open_fails_when_funds_insufficient() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -111,7 +127,8 @@ fn channel_open_fails_when_funds_insufficient() { &electrsd.client, vec![addr_a, addr_b], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); @@ -130,8 +147,8 @@ fn channel_open_fails_when_funds_insufficient() { ); } -#[test] -fn multi_hop_sending() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn multi_hop_sending() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -154,7 +171,8 @@ fn multi_hop_sending() { &electrsd.client, addresses, Amount::from_sat(premine_amount_sat), - ); + ) + .await; for n in &nodes { n.sync_wallets().unwrap(); @@ -169,18 +187,18 @@ fn multi_hop_sending() { // \ / // (1M:0)- N3 -(1M:0) - open_channel(&nodes[0], &nodes[1], 100_000, true, &electrsd); - open_channel(&nodes[1], &nodes[2], 1_000_000, true, &electrsd); + open_channel(&nodes[0], &nodes[1], 100_000, true, &electrsd).await; + open_channel(&nodes[1], &nodes[2], 1_000_000, true, &electrsd).await; // We need to sync wallets in-between back-to-back channel opens from the same node so BDK // wallet picks up on the broadcast funding tx and doesn't double-spend itself. // // TODO: Remove once fixed in BDK. nodes[1].sync_wallets().unwrap(); - open_channel(&nodes[1], &nodes[3], 1_000_000, true, &electrsd); - open_channel(&nodes[2], &nodes[4], 1_000_000, true, &electrsd); - open_channel(&nodes[3], &nodes[4], 1_000_000, true, &electrsd); + open_channel(&nodes[1], &nodes[3], 1_000_000, true, &electrsd).await; + open_channel(&nodes[2], &nodes[4], 1_000_000, true, &electrsd).await; + open_channel(&nodes[3], &nodes[4], 1_000_000, true, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; for n in &nodes { n.sync_wallets().unwrap(); @@ -198,13 +216,13 @@ fn multi_hop_sending() { expect_event!(nodes[4], ChannelReady); // Sleep a bit for gossip to propagate. - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; - let sending_params = SendingParameters { - max_total_routing_fee_msat: Some(Some(75_000).into()), - max_total_cltv_expiry_delta: Some(1000), - max_path_count: Some(10), - max_channel_saturation_power_of_half: Some(2), + let route_params = RouteParametersConfig { + max_total_routing_fee_msat: Some(75_000), + max_total_cltv_expiry_delta: 1000, + max_path_count: 10, + max_channel_saturation_power_of_half: 2, }; let invoice_description = @@ -213,7 +231,7 @@ fn multi_hop_sending() { .bolt11_payment() .receive(2_500_000, &invoice_description.clone().into(), 9217) .unwrap(); - nodes[0].bolt11_payment().send(&invoice, Some(sending_params)).unwrap(); + nodes[0].bolt11_payment().send(&invoice, Some(route_params)).unwrap(); expect_event!(nodes[1], PaymentForwarded); @@ -227,14 +245,14 @@ fn multi_hop_sending() { expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); } -#[test] -fn start_stop_reinit() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn start_stop_reinit() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); - let test_sync_store: Arc = + let test_sync_store: Arc = Arc::new(TestSyncStore::new(config.node_config.storage_dir_path.clone().into())); let sync_config = EsploraSyncConfig { background_sync_config: None }; @@ -257,7 +275,8 @@ fn start_stop_reinit() { &electrsd.client, vec![funding_address], expected_amount, - ); + ) + .await; node.sync_wallets().unwrap(); assert_eq!(node.list_balances().spendable_onchain_balance_sats, expected_amount.to_sat()); @@ -296,8 +315,8 @@ fn start_stop_reinit() { reinitialized_node.stop().unwrap(); } -#[test] -fn onchain_send_receive() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn onchain_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -315,7 +334,8 @@ fn onchain_send_receive() { &electrsd.client, vec![addr_a.clone(), addr_b.clone()], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -342,8 +362,8 @@ fn onchain_send_receive() { let channel_amount_sat = 1_000_000; let reserve_amount_sat = 25_000; - open_channel(&node_b, &node_a, channel_amount_sat, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_b, &node_a, channel_amount_sat, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -385,7 +405,7 @@ fn onchain_send_receive() { let amount_to_send_sats = 54321; let txid = node_b.onchain_payment().send_to_address(&addr_a, amount_to_send_sats, None).unwrap(); - wait_for_tx(&electrsd.client, txid); + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -412,7 +432,7 @@ fn onchain_send_receive() { assert_eq!(payment_a.amount_msat, payment_b.amount_msat); assert_eq!(payment_a.fee_paid_msat, payment_b.fee_paid_msat); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -450,8 +470,8 @@ fn onchain_send_receive() { let addr_b = node_b.onchain_payment().new_address().unwrap(); let txid = node_a.onchain_payment().send_all_to_address(&addr_b, true, None).unwrap(); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -473,8 +493,8 @@ fn onchain_send_receive() { let addr_b = node_b.onchain_payment().new_address().unwrap(); let txid = node_a.onchain_payment().send_all_to_address(&addr_b, false, None).unwrap(); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -496,8 +516,8 @@ fn onchain_send_receive() { assert_eq!(node_b_payments.len(), 5); } -#[test] -fn onchain_send_all_retains_reserve() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn onchain_send_all_retains_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -514,7 +534,8 @@ fn onchain_send_all_retains_reserve() { &electrsd.client, vec![addr_a.clone(), addr_b.clone()], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -524,8 +545,8 @@ fn onchain_send_all_retains_reserve() { // Send all over, with 0 reserve as we don't have any channels open. let txid = node_a.onchain_payment().send_all_to_address(&addr_b, true, None).unwrap(); - wait_for_tx(&electrsd.client, txid); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -542,15 +563,15 @@ fn onchain_send_all_retains_reserve() { .0 .parse() .unwrap(); - wait_for_tx(&electrsd.client, txid); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, reserve_amount_sat); // Open a channel. - open_channel(&node_b, &node_a, premine_amount_sat, false, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_b, &node_a, premine_amount_sat, false, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); expect_channel_ready_event!(node_a, node_b.node_id()); @@ -565,8 +586,8 @@ fn onchain_send_all_retains_reserve() { // Send all over again, this time ensuring the reserve is accounted for let txid = node_b.onchain_payment().send_all_to_address(&addr_a, true, None).unwrap(); - wait_for_tx(&electrsd.client, txid); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -579,8 +600,8 @@ fn onchain_send_all_retains_reserve() { .contains(&node_a.list_balances().spendable_onchain_balance_sats)); } -#[test] -fn onchain_wallet_recovery() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn onchain_wallet_recovery() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -599,7 +620,8 @@ fn onchain_wallet_recovery() { &electrsd.client, vec![addr_1], Amount::from_sat(premine_amount_sat), - ); + ) + .await; original_node.sync_wallets().unwrap(); assert_eq!(original_node.list_balances().spendable_onchain_balance_sats, premine_amount_sat); @@ -612,9 +634,9 @@ fn onchain_wallet_recovery() { .0 .parse() .unwrap(); - wait_for_tx(&electrsd.client, txid); + wait_for_tx(&electrsd.client, txid).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; original_node.sync_wallets().unwrap(); assert_eq!( @@ -648,9 +670,9 @@ fn onchain_wallet_recovery() { .0 .parse() .unwrap(); - wait_for_tx(&electrsd.client, txid); + wait_for_tx(&electrsd.client, txid).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; recovered_node.sync_wallets().unwrap(); assert_eq!( @@ -659,8 +681,144 @@ fn onchain_wallet_recovery() { ); } -#[test] -fn sign_verify_msg() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_rbf_via_mempool() { + run_rbf_test(false).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_rbf_via_direct_block_insertion() { + run_rbf_test(true).await; +} + +// `is_insert_block`: +// - `true`: transaction is mined immediately (no mempool), testing confirmed-Tx handling. +// - `false`: transaction stays in mempool until confirmation, testing unconfirmed-Tx handling. +async fn run_rbf_test(is_insert_block: bool) { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); + let chain_source_electrsd = TestChainSource::Electrum(&electrsd); + let chain_source_esplora = TestChainSource::Esplora(&electrsd); + + macro_rules! config_node { + ($chain_source:expr, $anchor_channels:expr) => {{ + let config_a = random_config($anchor_channels); + let node = setup_node(&$chain_source, config_a, None); + node + }}; + } + let anchor_channels = false; + let nodes = vec![ + config_node!(chain_source_electrsd, anchor_channels), + config_node!(chain_source_bitcoind, anchor_channels), + config_node!(chain_source_esplora, anchor_channels), + ]; + + let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); + premine_blocks(bitcoind, electrs).await; + + // Helpers declaration before starting the test + let all_addrs = + nodes.iter().map(|node| node.onchain_payment().new_address().unwrap()).collect::>(); + let amount_sat = 2_100_000; + let mut txid; + macro_rules! distribute_funds_all_nodes { + () => { + txid = distribute_funds_unconfirmed( + bitcoind, + electrs, + all_addrs.clone(), + Amount::from_sat(amount_sat), + ) + .await; + }; + } + macro_rules! validate_balances { + ($expected_balance_sat:expr, $is_spendable:expr) => { + let spend_balance = if $is_spendable { $expected_balance_sat } else { 0 }; + for node in &nodes { + node.sync_wallets().unwrap(); + let balances = node.list_balances(); + assert_eq!(balances.spendable_onchain_balance_sats, spend_balance); + assert_eq!(balances.total_onchain_balance_sats, $expected_balance_sat); + } + }; + } + + let scripts_buf: HashSet = + all_addrs.iter().map(|addr| addr.script_pubkey()).collect(); + let mut tx; + let mut fee_output_index; + + // Modify the output to the nodes + distribute_funds_all_nodes!(); + validate_balances!(amount_sat, false); + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + tx.output.iter_mut().for_each(|output| { + if scripts_buf.contains(&output.script_pubkey) { + let new_addr = bitcoind.new_address().unwrap(); + output.script_pubkey = new_addr.script_pubkey(); + } + }); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; + validate_balances!(0, is_insert_block); + + // Not modifying the output scripts, but still bumping the fee. + distribute_funds_all_nodes!(); + validate_balances!(amount_sat, false); + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; + validate_balances!(amount_sat, is_insert_block); + + let mut final_amount_sat = amount_sat * 2; + let value_sat = 21_000; + + // Increase the value of the nodes' outputs + distribute_funds_all_nodes!(); + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + tx.output.iter_mut().for_each(|output| { + if scripts_buf.contains(&output.script_pubkey) { + output.value = Amount::from_sat(output.value.to_sat() + value_sat); + } + }); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; + final_amount_sat += value_sat; + validate_balances!(final_amount_sat, is_insert_block); + + // Decreases the value of the nodes' outputs + distribute_funds_all_nodes!(); + final_amount_sat += amount_sat; + (tx, fee_output_index) = prepare_rbf(electrs, txid, &scripts_buf); + tx.output.iter_mut().for_each(|output| { + if scripts_buf.contains(&output.script_pubkey) { + output.value = Amount::from_sat(output.value.to_sat() - value_sat); + } + }); + bump_fee_and_broadcast(bitcoind, electrs, tx, fee_output_index, is_insert_block).await; + final_amount_sat -= value_sat; + validate_balances!(final_amount_sat, is_insert_block); + + if !is_insert_block { + generate_blocks_and_wait(bitcoind, electrs, 1).await; + validate_balances!(final_amount_sat, true); + } + + // Check if it is possible to send all funds from the node + let mut txids = Vec::new(); + let addr = bitcoind.new_address().unwrap(); + nodes.iter().for_each(|node| { + let txid = node.onchain_payment().send_all_to_address(&addr, true, None).unwrap(); + txids.push(txid); + }); + for txid in txids { + wait_for_tx(electrs, txid).await; + } + generate_blocks_and_wait(bitcoind, electrs, 6).await; + validate_balances!(0, true); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); let chain_source = TestChainSource::Esplora(&electrsd); @@ -673,13 +831,28 @@ fn sign_verify_msg() { assert!(node.verify_signature(msg, sig.as_str(), &pkey)); } -#[test] -fn connection_restart_behavior() { - do_connection_restart_behavior(true); - do_connection_restart_behavior(false); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn connection_multi_listen() { + let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); + + let node_id_b = node_b.node_id(); + + let node_addrs_b = node_b.listening_addresses().unwrap(); + for node_addr_b in &node_addrs_b { + node_a.connect(node_id_b, node_addr_b.clone(), false).unwrap(); + node_a.disconnect(node_id_b).unwrap(); + } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn connection_restart_behavior() { + do_connection_restart_behavior(true).await; + do_connection_restart_behavior(false).await; } -fn do_connection_restart_behavior(persist: bool) { +async fn do_connection_restart_behavior(persist: bool) { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); @@ -688,11 +861,6 @@ fn do_connection_restart_behavior(persist: bool) { let node_id_b = node_b.node_id(); let node_addr_b = node_b.listening_addresses().unwrap().first().unwrap().clone(); - - while !node_b.status().is_listening { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - node_a.connect(node_id_b, node_addr_b, persist).unwrap(); let peer_details_a = node_a.list_peers().first().unwrap().clone(); @@ -712,7 +880,7 @@ fn do_connection_restart_behavior(persist: bool) { node_a.start().unwrap(); // Sleep a bit to allow for the reconnect to happen. - std::thread::sleep(std::time::Duration::from_secs(5)); + tokio::time::sleep(std::time::Duration::from_secs(5)).await; if persist { let peer_details_a = node_a.list_peers().first().unwrap().clone(); @@ -730,8 +898,8 @@ fn do_connection_restart_behavior(persist: bool) { } } -#[test] -fn concurrent_connections_succeed() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); @@ -742,10 +910,6 @@ fn concurrent_connections_succeed() { let node_id_b = node_b.node_id(); let node_addr_b = node_b.listening_addresses().unwrap().first().unwrap().clone(); - while !node_b.status().is_listening { - std::thread::sleep(std::time::Duration::from_millis(10)); - } - let mut handles = Vec::new(); for _ in 0..10 { let thread_node = Arc::clone(&node_a); @@ -761,25 +925,155 @@ fn concurrent_connections_succeed() { } } -#[test] -fn simple_bolt12_send_receive() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn splice_channel() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); + let address_b = node_b.onchain_payment().new_address().unwrap(); let premine_amount_sat = 5_000_000; premine_and_distribute_funds( &bitcoind.client, &electrsd.client, - vec![address_a], + vec![address_a, address_b], Amount::from_sat(premine_amount_sat), + ) + .await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + assert_eq!(node_a.list_balances().total_onchain_balance_sats, premine_amount_sat); + assert_eq!(node_b.list_balances().total_onchain_balance_sats, premine_amount_sat); + + open_channel(&node_a, &node_b, 4_000_000, false, &electrsd).await; + + // Open a channel with Node A contributing the funding + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); + let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); + + let opening_transaction_fee_sat = 156; + let closing_transaction_fee_sat = 614; + let anchor_output_sat = 330; + + assert_eq!( + node_a.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - opening_transaction_fee_sat + ); + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 0); + + // Test that splicing and payments fail when there are insufficient funds + let address = node_b.onchain_payment().new_address().unwrap(); + let amount_msat = 400_000_000; + + assert_eq!( + node_b.splice_in(&user_channel_id_b, node_b.node_id(), 5_000_000), + Err(NodeError::ChannelSplicingFailed), + ); + assert_eq!( + node_b.splice_out(&user_channel_id_b, node_b.node_id(), &address, amount_msat / 1000), + Err(NodeError::ChannelSplicingFailed), ); + assert_eq!( + node_b.spontaneous_payment().send(amount_msat, node_a.node_id(), None), + Err(NodeError::PaymentSendingFailed) + ); + + // Splice-in funds for Node B so that it has outbound liquidity to make a payment + node_b.splice_in(&user_channel_id_b, node_a.node_id(), 4_000_000).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let splice_in_fee_sat = 252; + + assert_eq!( + node_b.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - splice_in_fee_sat + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000); + + let payment_id = + node_b.spontaneous_payment().send(amount_msat, node_a.node_id(), None).unwrap(); + + expect_payment_successful_event!(node_b, Some(payment_id), None); + expect_payment_received_event!(node_a, amount_msat); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + // Mine a block to give time for the HTLC to resolve + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 1).await; + + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat + amount_msat / 1000 + ); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 4_000_000 - amount_msat / 1000); + + // Splice-out funds for Node A from the payment sent by Node B + let address = node_a.onchain_payment().new_address().unwrap(); + node_a.splice_out(&user_channel_id_a, node_b.node_id(), &address, amount_msat / 1000).unwrap(); + + expect_splice_pending_event!(node_a, node_b.node_id()); + expect_splice_pending_event!(node_b, node_a.node_id()); + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let splice_out_fee_sat = 183; + + assert_eq!( + node_a.list_balances().total_onchain_balance_sats, + premine_amount_sat - 4_000_000 - opening_transaction_fee_sat + amount_msat / 1000 + ); + assert_eq!( + node_a.list_balances().total_lightning_balance_sats, + 4_000_000 - closing_transaction_fee_sat - anchor_output_sat - splice_out_fee_sat + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn simple_bolt12_send_receive() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premine_amount_sat = 5_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premine_amount_sat), + ) + .await; + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -789,11 +1083,11 @@ fn simple_bolt12_send_receive() { // Sleep until we broadcasted a node announcement. while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + tokio::time::sleep(std::time::Duration::from_millis(10)).await; } // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let expected_amount_msat = 100_000_000; let offer = @@ -802,7 +1096,7 @@ fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); let payment_id = node_a .bolt12_payment() - .send(&offer, expected_quantity, expected_payer_note.clone()) + .send(&offer, expected_quantity, expected_payer_note.clone(), None) .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); @@ -823,8 +1117,8 @@ fn simple_bolt12_send_receive() { assert_eq!(offer_id, offer.id()); assert_eq!(&expected_quantity, qty); assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. + // TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + // API currently doesn't allow to do that. }, _ => { panic!("Unexpected payment kind"); @@ -858,7 +1152,7 @@ fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); assert!(node_a .bolt12_payment() - .send_using_amount(&offer, less_than_offer_amount, None, None) + .send_using_amount(&offer, less_than_offer_amount, None, None, None) .is_err()); let payment_id = node_a .bolt12_payment() @@ -867,6 +1161,7 @@ fn simple_bolt12_send_receive() { expected_amount_msat, expected_quantity, expected_payer_note.clone(), + None, ) .unwrap(); @@ -889,8 +1184,8 @@ fn simple_bolt12_send_receive() { assert_eq!(offer_id, offer.id()); assert_eq!(&expected_quantity, qty); assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. + // TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + // API currently doesn't allow to do that. hash.unwrap() }, _ => { @@ -924,7 +1219,13 @@ fn simple_bolt12_send_receive() { let expected_payer_note = Some("Test".to_string()); let refund = node_b .bolt12_payment() - .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .initiate_refund( + overpaid_amount, + 3600, + expected_quantity, + expected_payer_note.clone(), + None, + ) .unwrap(); let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); expect_payment_received_event!(node_a, overpaid_amount); @@ -955,8 +1256,8 @@ fn simple_bolt12_send_receive() { assert!(preimage.is_some()); assert_eq!(&expected_quantity, qty); assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) - //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 - //API currently doesn't allow to do that. + // TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 + // API currently doesn't allow to do that. }, _ => { panic!("Unexpected payment kind"); @@ -982,8 +1283,146 @@ fn simple_bolt12_send_receive() { assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } -#[test] -fn test_node_announcement_propagation() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn async_payment() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + + let mut config_sender = random_config(true); + config_sender.node_config.listening_addresses = None; + config_sender.node_config.node_alias = None; + config_sender.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender ".to_string()))); + let node_sender = setup_node_for_async_payments( + &chain_source, + config_sender, + None, + Some(AsyncPaymentsRole::Client), + ); + + let mut config_sender_lsp = random_config(true); + config_sender_lsp.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("sender_lsp ".to_string()))); + let node_sender_lsp = setup_node_for_async_payments( + &chain_source, + config_sender_lsp, + None, + Some(AsyncPaymentsRole::Server), + ); + + let mut config_receiver_lsp = random_config(true); + config_receiver_lsp.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver_lsp".to_string()))); + + let node_receiver_lsp = setup_node_for_async_payments( + &chain_source, + config_receiver_lsp, + None, + Some(AsyncPaymentsRole::Server), + ); + + let mut config_receiver = random_config(true); + config_receiver.node_config.listening_addresses = None; + config_receiver.node_config.node_alias = None; + config_receiver.log_writer = + TestLogWriter::Custom(Arc::new(MultiNodeLogger::new("receiver ".to_string()))); + let node_receiver = setup_node(&chain_source, config_receiver, None); + + let address_sender = node_sender.onchain_payment().new_address().unwrap(); + let address_sender_lsp = node_sender_lsp.onchain_payment().new_address().unwrap(); + let address_receiver_lsp = node_receiver_lsp.onchain_payment().new_address().unwrap(); + let address_receiver = node_receiver.onchain_payment().new_address().unwrap(); + let premine_amount_sat = 4_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_sender, address_sender_lsp, address_receiver_lsp, address_receiver], + Amount::from_sat(premine_amount_sat), + ) + .await; + + node_sender.sync_wallets().unwrap(); + node_sender_lsp.sync_wallets().unwrap(); + node_receiver_lsp.sync_wallets().unwrap(); + node_receiver.sync_wallets().unwrap(); + + open_channel(&node_sender, &node_sender_lsp, 400_000, false, &electrsd).await; + open_channel(&node_sender_lsp, &node_receiver_lsp, 400_000, true, &electrsd).await; + open_channel_push_amt( + &node_receiver, + &node_receiver_lsp, + 400_000, + Some(200_000_000), + false, + &electrsd, + ) + .await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_sender.sync_wallets().unwrap(); + node_sender_lsp.sync_wallets().unwrap(); + node_receiver_lsp.sync_wallets().unwrap(); + node_receiver.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_sender, node_sender_lsp.node_id()); + expect_channel_ready_event!(node_sender_lsp, node_sender.node_id()); + expect_channel_ready_event!(node_sender_lsp, node_receiver_lsp.node_id()); + expect_channel_ready_event!(node_receiver_lsp, node_sender_lsp.node_id()); + expect_channel_ready_event!(node_receiver_lsp, node_receiver.node_id()); + expect_channel_ready_event!(node_receiver, node_receiver_lsp.node_id()); + + let has_node_announcements = |node: &ldk_node::Node| { + node.network_graph() + .list_nodes() + .iter() + .filter(|n| { + node.network_graph().node(n).map_or(false, |info| info.announcement_info.is_some()) + }) + .count() >= 2 + }; + + // Wait for everyone to see all channels and node announcements. + while node_sender.network_graph().list_channels().len() < 1 + || node_sender_lsp.network_graph().list_channels().len() < 1 + || node_receiver_lsp.network_graph().list_channels().len() < 1 + || node_receiver.network_graph().list_channels().len() < 1 + || !has_node_announcements(&node_sender) + || !has_node_announcements(&node_sender_lsp) + || !has_node_announcements(&node_receiver_lsp) + || !has_node_announcements(&node_receiver) + { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + let recipient_id = vec![1, 2, 3]; + let blinded_paths = + node_receiver_lsp.bolt12_payment().blinded_paths_for_async_recipient(recipient_id).unwrap(); + node_receiver.bolt12_payment().set_paths_to_static_invoice_server(blinded_paths).unwrap(); + + let offer = loop { + if let Ok(offer) = node_receiver.bolt12_payment().receive_async() { + break offer; + } + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + }; + + node_receiver.stop().unwrap(); + + let payment_id = + node_sender.bolt12_payment().send_using_amount(&offer, 5_000, None, None, None).unwrap(); + + // Sleep to allow the payment reach a state where the htlc is held and waiting for the receiver to come online. + tokio::time::sleep(std::time::Duration::from_millis(3000)).await; + + node_receiver.start().unwrap(); + + expect_payment_successful_event!(node_sender, Some(payment_id), None); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_node_announcement_propagation() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1021,14 +1460,15 @@ fn test_node_announcement_propagation() { &electrsd.client, vec![address_a], Amount::from_sat(premine_amount_sat), - ); + ) + .await; node_a.sync_wallets().unwrap(); // Open an announced channel from node_a to node_b - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1038,11 +1478,11 @@ fn test_node_announcement_propagation() { // Wait until node_b broadcasts a node announcement while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + tokio::time::sleep(std::time::Duration::from_millis(10)).await; } // Sleep to make sure the node announcement propagates - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Get node info from the other node's perspective let node_a_info = node_b.network_graph().node(&NodeId::from_pubkey(&node_a.node_id())).unwrap(); @@ -1073,8 +1513,8 @@ fn test_node_announcement_propagation() { assert_eq!(node_b_announcement_info.addresses, node_b_listening_addresses); } -#[test] -fn generate_bip21_uri() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn generate_bip21_uri() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1103,11 +1543,12 @@ fn generate_bip21_uri() { &electrsd.client, vec![address_a], Amount::from_sat(premined_sats), - ); + ) + .await; node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1127,8 +1568,8 @@ fn generate_bip21_uri() { assert!(uqr_payment.contains("lno=")); } -#[test] -fn unified_qr_send_receive() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn unified_qr_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1142,11 +1583,12 @@ fn unified_qr_send_receive() { &electrsd.client, vec![address_a], Amount::from_sat(premined_sats), - ); + ) + .await; node_a.sync_wallets().unwrap(); - open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1156,18 +1598,18 @@ fn unified_qr_send_receive() { // Sleep until we broadcast a node announcement. while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { - std::thread::sleep(std::time::Duration::from_millis(10)); + tokio::time::sleep(std::time::Duration::from_millis(10)).await; } // Sleep one more sec to make sure the node announcement propagates. - std::thread::sleep(std::time::Duration::from_secs(1)); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let expected_amount_sats = 100_000; let expiry_sec = 4_000; let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); let uri_str = uqr_payment.clone().unwrap(); - let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str, None) { Ok(QrPaymentResult::Bolt12 { payment_id }) => { println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); payment_id @@ -1188,7 +1630,7 @@ fn unified_qr_send_receive() { // Cut off the BOLT12 part to fallback to BOLT11. let uri_str_without_offer = uri_str.split("&lno=").next().unwrap(); let invoice_payment_id: PaymentId = - match node_a.unified_qr_payment().send(uri_str_without_offer) { + match node_a.unified_qr_payment().send(uri_str_without_offer, None) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected Bolt11 payment but got Bolt12"); }, @@ -1211,7 +1653,7 @@ fn unified_qr_send_receive() { // Cut off any lightning part to fallback to on-chain only. let uri_str_without_lightning = onchain_uqr_payment.split("&lightning=").next().unwrap(); - let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning) { + let txid = match node_a.unified_qr_payment().send(&uri_str_without_lightning, None) { Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { panic!("Expected on-chain payment but got Bolt12") }, @@ -1227,8 +1669,8 @@ fn unified_qr_send_receive() { }, }; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); - wait_for_tx(&electrsd.client, txid); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + wait_for_tx(&electrsd.client, txid).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); @@ -1237,10 +1679,14 @@ fn unified_qr_send_receive() { assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); } -#[test] -fn lsps2_client_service_integration() { - let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_client_service_integration() { + do_lsps2_client_service_integration(true).await; + do_lsps2_client_service_integration(false).await; +} +async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); let sync_config = EsploraSyncConfig { background_sync_config: None }; @@ -1258,6 +1704,7 @@ fn lsps2_client_service_integration() { min_channel_lifetime: 100, min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, + client_trusts_lsp, }; let service_config = random_config(true); @@ -1294,16 +1741,17 @@ fn lsps2_client_service_integration() { &electrsd.client, vec![service_addr, client_addr, payer_addr], Amount::from_sat(premine_amount_sat), - ); + ) + .await; service_node.sync_wallets().unwrap(); client_node.sync_wallets().unwrap(); payer_node.sync_wallets().unwrap(); // Open a channel payer -> service that will allow paying the JIT invoice println!("Opening channel payer_node -> service_node!"); - open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd); + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; - generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; service_node.sync_wallets().unwrap(); payer_node.sync_wallets().unwrap(); expect_channel_ready_event!(payer_node, service_node.node_id()); @@ -1324,6 +1772,7 @@ fn lsps2_client_service_integration() { let payment_id = payer_node.bolt11_payment().send(&jit_invoice, None).unwrap(); expect_channel_pending_event!(service_node, client_node.node_id()); expect_channel_ready_event!(service_node, client_node.node_id()); + expect_event!(service_node, PaymentForwarded); expect_channel_pending_event!(client_node, service_node.node_id()); expect_channel_ready_event!(client_node, service_node.node_id()); @@ -1349,23 +1798,116 @@ fn lsps2_client_service_integration() { println!("Generating regular invoice!"); let invoice_description = - Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()).into(); let amount_msat = 5_000_000; - let invoice = client_node - .bolt11_payment() - .receive(amount_msat, &invoice_description.into(), 1024) - .unwrap(); + let invoice = + client_node.bolt11_payment().receive(amount_msat, &invoice_description, 1024).unwrap(); // Have the payer_node pay the invoice, to check regular forwards service_node -> client_node // are working as expected. println!("Paying regular invoice!"); let payment_id = payer_node.bolt11_payment().send(&invoice, None).unwrap(); expect_payment_successful_event!(payer_node, Some(payment_id), None); + expect_event!(service_node, PaymentForwarded); expect_payment_received_event!(client_node, amount_msat); + + //////////////////////////////////////////////////////////////////////////// + // receive_via_jit_channel_for_hash and claim_for_hash + //////////////////////////////////////////////////////////////////////////// + println!("Generating JIT invoice!"); + // Increase the amount to make sure it does not fit into the existing channels. + let jit_amount_msat = 200_000_000; + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let jit_invoice = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description, + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&jit_invoice, None).unwrap(); + expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + let claimable_amount_msat = expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + println!("Claiming payment!"); + client_node + .bolt11_payment() + .claim_for_hash(manual_payment_hash, claimable_amount_msat, manual_preimage) + .unwrap(); + + expect_event!(service_node, PaymentForwarded); + expect_payment_successful_event!(payer_node, Some(payment_id), None); + let client_payment_id = + expect_payment_received_event!(client_node, expected_received_amount_msat).unwrap(); + let client_payment = client_node.payment(&client_payment_id).unwrap(); + match client_payment.kind { + PaymentKind::Bolt11Jit { counterparty_skimmed_fee_msat, .. } => { + assert_eq!(counterparty_skimmed_fee_msat, Some(service_fee_msat)); + }, + _ => panic!("Unexpected payment kind"), + } + + //////////////////////////////////////////////////////////////////////////// + // receive_via_jit_channel_for_hash and fail_for_hash + //////////////////////////////////////////////////////////////////////////// + println!("Generating JIT invoice!"); + // Increase the amount to make sure it does not fit into the existing channels. + let jit_amount_msat = 400_000_000; + let manual_preimage = PaymentPreimage([43u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let jit_invoice = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description, + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&jit_invoice, None).unwrap(); + expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + println!("Failing payment!"); + client_node.bolt11_payment().fail_for_hash(manual_payment_hash).unwrap(); + + expect_event!(payer_node, PaymentFailed); + assert_eq!(client_node.payment(&payment_id).unwrap().status, PaymentStatus::Failed); } -#[test] -fn facade_logging() { +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn facade_logging() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = TestChainSource::Esplora(&electrsd); @@ -1382,6 +1924,73 @@ fn facade_logging() { } } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn spontaneous_send_with_custom_preimage() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premine_sat = 1_000_000; + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premine_sat), + ) + .await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 500_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let seed = b"test_payment_preimage"; + let bytes: Sha256Hash = Sha256Hash::hash(seed); + let custom_bytes = bytes.to_byte_array(); + let custom_preimage = PaymentPreimage(custom_bytes); + + let amount_msat = 100_000; + let payment_id = node_a + .spontaneous_payment() + .send_with_preimage(amount_msat, node_b.node_id(), custom_preimage, None) + .unwrap(); + + // check payment status and verify stored preimage + expect_payment_successful_event!(node_a, Some(payment_id), None); + let details: PaymentDetails = + node_a.list_payments_with_filter(|p| p.id == payment_id).first().unwrap().clone(); + assert_eq!(details.status, PaymentStatus::Succeeded); + if let PaymentKind::Spontaneous { preimage: Some(pi), .. } = details.kind { + assert_eq!(pi.0, custom_bytes); + } else { + panic!("Expected a spontaneous PaymentKind with a preimage"); + } + + // Verify receiver side (node_b) + expect_payment_received_event!(node_b, amount_msat); + let receiver_payments: Vec = node_b.list_payments_with_filter(|p| { + p.direction == PaymentDirection::Inbound + && matches!(p.kind, PaymentKind::Spontaneous { .. }) + }); + + assert_eq!(receiver_payments.len(), 1); + let receiver_details = &receiver_payments[0]; + assert_eq!(receiver_details.status, PaymentStatus::Succeeded); + assert_eq!(receiver_details.amount_msat, Some(amount_msat)); + assert_eq!(receiver_details.direction, PaymentDirection::Inbound); + + // Verify receiver also has the same preimage + if let PaymentKind::Spontaneous { preimage: Some(pi), .. } = &receiver_details.kind { + assert_eq!(pi.0, custom_bytes); + } else { + panic!("Expected receiver to have spontaneous PaymentKind with preimage"); + } +} + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn drop_in_async_context() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -1392,3 +2001,303 @@ async fn drop_in_async_context() { let node = setup_node(&chain_source, config, Some(seed_bytes)); node.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_client_trusts_lsp() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let sync_config = EsploraSyncConfig { background_sync_config: None }; + + // Setup three nodes: service, client, and payer + let channel_opening_fee_ppm = 10_000; + let channel_over_provisioning_ppm = 100_000; + let lsps2_service_config = LSPS2ServiceConfig { + require_token: None, + advertise_service: false, + channel_opening_fee_ppm, + channel_over_provisioning_ppm, + max_payment_size_msat: 1_000_000_000, + min_payment_size_msat: 0, + min_channel_lifetime: 100, + min_channel_opening_fee_msat: 0, + max_client_to_self_delay: 1024, + client_trusts_lsp: true, + }; + + let service_config = random_config(true); + setup_builder!(service_builder, service_config.node_config); + service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + service_builder.set_liquidity_provider_lsps2(lsps2_service_config); + let service_node = service_builder.build().unwrap(); + service_node.start().unwrap(); + let service_node_id = service_node.node_id(); + let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); + + let client_config = random_config(true); + setup_builder!(client_builder, client_config.node_config); + client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); + let client_node = client_builder.build().unwrap(); + client_node.start().unwrap(); + let client_node_id = client_node.node_id(); + + let payer_config = random_config(true); + setup_builder!(payer_builder, payer_config.node_config); + payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + let payer_node = payer_builder.build().unwrap(); + payer_node.start().unwrap(); + + let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); + let client_addr_onchain = client_node.onchain_payment().new_address().unwrap(); + let payer_addr_onchain = payer_node.onchain_payment().new_address().unwrap(); + + let premine_amount_sat = 10_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![service_addr_onchain, client_addr_onchain, payer_addr_onchain], + Amount::from_sat(premine_amount_sat), + ) + .await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + println!("Premine complete!"); + // Open a channel payer -> service that will allow paying the JIT invoice + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + expect_channel_ready_event!(payer_node, service_node.node_id()); + expect_channel_ready_event!(service_node, payer_node.node_id()); + + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + let jit_amount_msat = 100_000_000; + + println!("Generating JIT invoice!"); + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let res = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description.into(), + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let payment_id = payer_node.bolt11_payment().send(&res, None).unwrap(); + println!("Payment ID: {:?}", payment_id); + let funding_txo = expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + + // Check the funding transaction hasn't been broadcasted yet and nodes aren't seeing it. + println!("Try to find funding tx... It won't be found yet, as the client has not claimed it."); + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + let mempool = bitcoind.client.get_raw_mempool().unwrap().into_model().unwrap(); + let funding_tx_found = mempool.0.iter().any(|txid| *txid == funding_txo.txid); + assert!(!funding_tx_found, "Funding transaction should NOT be broadcast yet"); + + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(0) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(0) + ); + + // Now claim the JIT payment, which should release the funding transaction + let service_fee_msat = (jit_amount_msat * channel_opening_fee_ppm as u64) / 1_000_000; + let expected_received_amount_msat = jit_amount_msat - service_fee_msat; + + let _ = expect_payment_claimable_event!( + client_node, + payment_id, + manual_payment_hash, + expected_received_amount_msat + ); + + client_node + .bolt11_payment() + .claim_for_hash(manual_payment_hash, jit_amount_msat, manual_preimage) + .unwrap(); + + expect_payment_successful_event!(payer_node, Some(payment_id), None); + + let _ = expect_payment_received_event!(client_node, expected_received_amount_msat).unwrap(); + + // Check the nodes pick up on the confirmed funding tx now. + wait_for_tx(&electrsd.client, funding_txo.txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(6) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(6) + ); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + + let sync_config = EsploraSyncConfig { background_sync_config: None }; + + // Setup three nodes: service, client, and payer + let channel_opening_fee_ppm = 10_000; + let channel_over_provisioning_ppm = 100_000; + let lsps2_service_config = LSPS2ServiceConfig { + require_token: None, + advertise_service: false, + channel_opening_fee_ppm, + channel_over_provisioning_ppm, + max_payment_size_msat: 1_000_000_000, + min_payment_size_msat: 0, + min_channel_lifetime: 100, + min_channel_opening_fee_msat: 0, + max_client_to_self_delay: 1024, + client_trusts_lsp: false, + }; + + let service_config = random_config(true); + setup_builder!(service_builder, service_config.node_config); + service_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + service_builder.set_liquidity_provider_lsps2(lsps2_service_config); + let service_node = service_builder.build().unwrap(); + service_node.start().unwrap(); + + let service_node_id = service_node.node_id(); + let service_addr = service_node.listening_addresses().unwrap().first().unwrap().clone(); + + let client_config = random_config(true); + setup_builder!(client_builder, client_config.node_config); + client_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + client_builder.set_liquidity_source_lsps2(service_node_id, service_addr.clone(), None); + let client_node = client_builder.build().unwrap(); + client_node.start().unwrap(); + + let client_node_id = client_node.node_id(); + + let payer_config = random_config(true); + setup_builder!(payer_builder, payer_config.node_config); + payer_builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + let payer_node = payer_builder.build().unwrap(); + payer_node.start().unwrap(); + + let service_addr_onchain = service_node.onchain_payment().new_address().unwrap(); + let client_addr_onchain = client_node.onchain_payment().new_address().unwrap(); + let payer_addr_onchain = payer_node.onchain_payment().new_address().unwrap(); + + let premine_amount_sat = 10_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![service_addr_onchain, client_addr_onchain, payer_addr_onchain], + Amount::from_sat(premine_amount_sat), + ) + .await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + println!("Premine complete!"); + // Open a channel payer -> service that will allow paying the JIT invoice + open_channel(&payer_node, &service_node, 5_000_000, false, &electrsd).await; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + payer_node.sync_wallets().unwrap(); + expect_channel_ready_event!(payer_node, service_node.node_id()); + expect_channel_ready_event!(service_node, payer_node.node_id()); + + let invoice_description = + Bolt11InvoiceDescription::Direct(Description::new(String::from("asdf")).unwrap()); + let jit_amount_msat = 100_000_000; + + println!("Generating JIT invoice!"); + let manual_preimage = PaymentPreimage([42u8; 32]); + let manual_payment_hash: PaymentHash = manual_preimage.into(); + let res = client_node + .bolt11_payment() + .receive_via_jit_channel_for_hash( + jit_amount_msat, + &invoice_description.into(), + 1024, + None, + manual_payment_hash, + ) + .unwrap(); + + // Have the payer_node pay the invoice, therby triggering channel open service_node -> client_node. + println!("Paying JIT invoice!"); + let _payment_id = payer_node.bolt11_payment().send(&res, None).unwrap(); + let funding_txo = expect_channel_pending_event!(service_node, client_node.node_id()); + expect_channel_ready_event!(service_node, client_node.node_id()); + expect_channel_pending_event!(client_node, service_node.node_id()); + expect_channel_ready_event!(client_node, service_node.node_id()); + println!("Waiting for funding transaction to be broadcast..."); + + // Check the nodes pick up on the confirmed funding tx now. + wait_for_tx(&electrsd.client, funding_txo.txid).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + service_node.sync_wallets().unwrap(); + client_node.sync_wallets().unwrap(); + assert_eq!( + client_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == service_node_id) + .unwrap() + .confirmations, + Some(6) + ); + assert_eq!( + service_node + .list_channels() + .iter() + .find(|c| c.counterparty_node_id == client_node_id) + .unwrap() + .confirmations, + Some(6) + ); +} diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 9d6ec158c..3b384ec45 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -9,11 +9,13 @@ mod common; -use ldk_node::Builder; use std::collections::HashMap; -#[test] -fn channel_full_cycle_with_vss_store() { +use ldk_node::Builder; +use rand::{rng, Rng}; + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_with_vss_store() { let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); println!("== Node A =="); let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -51,5 +53,146 @@ fn channel_full_cycle_with_vss_store() { false, true, false, - ); + ) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn vss_v0_schema_backwards_compatibility() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + + let rand_suffix: String = + (0..7).map(|_| rng().sample(rand::distr::Alphanumeric) as char).collect(); + let store_id = format!("v0_compat_test_{}", rand_suffix); + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup a v0.6.2 `Node` persisted with the v0 scheme. + let (old_balance, old_node_id) = { + let mut builder_old = ldk_node_062::Builder::new(); + builder_old.set_network(bitcoin::Network::Regtest); + builder_old.set_storage_dir_path(storage_path.clone()); + builder_old.set_entropy_seed_bytes(seed_bytes); + builder_old.set_chain_source_esplora(esplora_url.clone(), None); + let node_old = builder_old + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + store_id.clone(), + HashMap::new(), + ) + .unwrap(); + + node_old.start().unwrap(); + let addr_old = node_old.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr_old], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node_old.sync_wallets().unwrap(); + + let balance = node_old.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node_old.node_id(); + + // Workaround necessary as v0.6.2's VSS runtime wasn't dropsafe in a tokio context. + tokio::task::block_in_place(move || { + node_old.stop().unwrap(); + drop(node_old); + }); + + (balance, node_id) + }; + + // Now ensure we can still reinit from the same backend. + let mut builder_new = Builder::new(); + builder_new.set_network(bitcoin::Network::Regtest); + builder_new.set_storage_dir_path(storage_path); + builder_new.set_entropy_seed_bytes(seed_bytes); + builder_new.set_chain_source_esplora(esplora_url, None); + + let node_new = builder_new + .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .unwrap(); + + node_new.start().unwrap(); + node_new.sync_wallets().unwrap(); + + let new_balance = node_new.list_balances().spendable_onchain_balance_sats; + let new_node_id = node_new.node_id(); + + assert_eq!(old_node_id, new_node_id); + assert_eq!(old_balance, new_balance); + + node_new.stop().unwrap(); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn vss_node_restart() { + let (bitcoind, electrsd) = common::setup_bitcoind_and_electrsd(); + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); + + let rand_suffix: String = + (0..7).map(|_| rng().sample(rand::distr::Alphanumeric) as char).collect(); + let store_id = format!("restart_test_{}", rand_suffix); + let storage_path = common::random_storage_path().to_str().unwrap().to_owned(); + let seed_bytes = [42u8; 64]; + + // Setup initial node and fund it. + let (expected_balance_sats, expected_node_id) = { + let mut builder = Builder::new(); + builder.set_network(bitcoin::Network::Regtest); + builder.set_storage_dir_path(storage_path.clone()); + builder.set_entropy_seed_bytes(seed_bytes); + builder.set_chain_source_esplora(esplora_url.clone(), None); + let node = builder + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + store_id.clone(), + HashMap::new(), + ) + .unwrap(); + + node.start().unwrap(); + let addr = node.onchain_payment().new_address().unwrap(); + common::premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![addr], + bitcoin::Amount::from_sat(100_000), + ) + .await; + node.sync_wallets().unwrap(); + + let balance = node.list_balances().spendable_onchain_balance_sats; + assert!(balance > 0); + let node_id = node.node_id(); + + node.stop().unwrap(); + (balance, node_id) + }; + + // Verify node can be restarted from VSS backend. + let mut builder = Builder::new(); + builder.set_network(bitcoin::Network::Regtest); + builder.set_storage_dir_path(storage_path); + builder.set_entropy_seed_bytes(seed_bytes); + builder.set_chain_source_esplora(esplora_url, None); + + let node = builder + .build_with_vss_store_and_fixed_headers(vss_base_url, store_id, HashMap::new()) + .unwrap(); + + node.start().unwrap(); + node.sync_wallets().unwrap(); + + assert_eq!(expected_node_id, node.node_id()); + assert_eq!(expected_balance_sats, node.list_balances().spendable_onchain_balance_sats); + + node.stop().unwrap(); } diff --git a/tests/reorg_test.rs b/tests/reorg_test.rs new file mode 100644 index 000000000..491a37fd4 --- /dev/null +++ b/tests/reorg_test.rs @@ -0,0 +1,203 @@ +mod common; +use std::collections::HashMap; + +use bitcoin::Amount; +use ldk_node::payment::{PaymentDirection, PaymentKind}; +use ldk_node::{Event, LightningBalance, PendingSweepBalance}; +use proptest::prelude::prop; +use proptest::proptest; + +use crate::common::{ + expect_event, generate_blocks_and_wait, invalidate_blocks, open_channel, + premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_node, + wait_for_outpoint_spend, TestChainSource, +}; + +proptest! { + #![proptest_config(proptest::test_runner::Config::with_cases(5))] + #[test] + fn reorg_test(reorg_depth in 1..=6usize, force_close in prop::bool::ANY) { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(async { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + + let chain_source_bitcoind = TestChainSource::BitcoindRpcSync(&bitcoind); + let chain_source_electrsd = TestChainSource::Electrum(&electrsd); + let chain_source_esplora = TestChainSource::Esplora(&electrsd); + + macro_rules! config_node { + ($chain_source: expr, $anchor_channels: expr) => {{ + let config_a = random_config($anchor_channels); + let node = setup_node(&$chain_source, config_a, None); + node + }}; + } + let anchor_channels = true; + let nodes = vec![ + config_node!(chain_source_electrsd, anchor_channels), + config_node!(chain_source_bitcoind, anchor_channels), + config_node!(chain_source_esplora, anchor_channels), + ]; + + let (bitcoind, electrs) = (&bitcoind.client, &electrsd.client); + macro_rules! reorg { + ($reorg_depth: expr) => {{ + invalidate_blocks(bitcoind, $reorg_depth); + generate_blocks_and_wait(bitcoind, electrs, $reorg_depth).await; + }}; + } + + let amount_sat = 2_100_000; + let addr_nodes = + nodes.iter().map(|node| node.onchain_payment().new_address().unwrap()).collect::>(); + premine_and_distribute_funds(bitcoind, electrs, addr_nodes, Amount::from_sat(amount_sat)).await; + + macro_rules! sync_wallets { + () => { + for node in &nodes { + node.sync_wallets().unwrap(); + } + }; + } + sync_wallets!(); + nodes.iter().for_each(|node| { + assert_eq!(node.list_balances().spendable_onchain_balance_sats, amount_sat); + assert_eq!(node.list_balances().total_onchain_balance_sats, amount_sat); + }); + + + let mut nodes_funding_tx = HashMap::new(); + let funding_amount_sat = 2_000_000; + for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { + let funding_txo = open_channel(node, next_node, funding_amount_sat, true, &electrsd).await; + nodes_funding_tx.insert(node.node_id(), funding_txo); + } + + generate_blocks_and_wait(bitcoind, electrs, 6).await; + sync_wallets!(); + + reorg!(reorg_depth); + sync_wallets!(); + + macro_rules! collect_channel_ready_events { + ($node:expr, $expected:expr) => {{ + let mut user_channels = HashMap::new(); + for _ in 0..$expected { + match $node.next_event_async().await { + Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { + $node.event_handled().unwrap(); + user_channels.insert(counterparty_node_id, user_channel_id); + }, + other => panic!("Unexpected event: {:?}", other), + } + } + user_channels + }}; + } + + let mut node_channels_id = HashMap::new(); + for (i, node) in nodes.iter().enumerate() { + assert_eq!( + node + .list_payments_with_filter(|p| p.direction == PaymentDirection::Outbound + && matches!(p.kind, PaymentKind::Onchain { .. })) + .len(), + 1 + ); + + let user_channels = collect_channel_ready_events!(node, 2); + let next_node = nodes.get((i + 1) % nodes.len()).unwrap(); + let prev_node = nodes.get((i + nodes.len() - 1) % nodes.len()).unwrap(); + + assert!(user_channels.get(&Some(next_node.node_id())) != None); + assert!(user_channels.get(&Some(prev_node.node_id())) != None); + + let user_channel_id = + user_channels.get(&Some(next_node.node_id())).expect("Missing user channel for node"); + node_channels_id.insert(node.node_id(), *user_channel_id); + } + + + for (node, next_node) in nodes.iter().zip(nodes.iter().cycle().skip(1)) { + let user_channel_id = node_channels_id.get(&node.node_id()).expect("user channel id not exist"); + let funding = nodes_funding_tx.get(&node.node_id()).expect("Funding tx not exist"); + + if force_close { + node.force_close_channel(&user_channel_id, next_node.node_id(), None).unwrap(); + } else { + node.close_channel(&user_channel_id, next_node.node_id()).unwrap(); + } + + expect_event!(node, ChannelClosed); + expect_event!(next_node, ChannelClosed); + + wait_for_outpoint_spend(electrs, *funding).await; + } + + reorg!(reorg_depth); + sync_wallets!(); + + generate_blocks_and_wait(bitcoind, electrs, 1).await; + sync_wallets!(); + + if force_close { + for node in &nodes { + node.sync_wallets().unwrap(); + // If there is no more balance, there is nothing to process here. + if node.list_balances().lightning_balances.len() < 1 { + return; + } + match node.list_balances().lightning_balances[0] { + LightningBalance::ClaimableAwaitingConfirmations { + confirmation_height, + .. + } => { + let cur_height = node.status().current_best_block.height; + let blocks_to_go = confirmation_height - cur_height; + generate_blocks_and_wait(bitcoind, electrs, blocks_to_go as usize).await; + node.sync_wallets().unwrap(); + }, + _ => panic!("Unexpected balance state for node_hub!"), + } + + assert!(node.list_balances().lightning_balances.len() < 2); + assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); + match node.list_balances().pending_balances_from_channel_closures[0] { + PendingSweepBalance::BroadcastAwaitingConfirmation { .. } => {}, + _ => panic!("Unexpected balance state!"), + } + + generate_blocks_and_wait(&bitcoind, electrs, 1).await; + node.sync_wallets().unwrap(); + assert!(node.list_balances().lightning_balances.len() < 2); + assert!(node.list_balances().pending_balances_from_channel_closures.len() > 0); + match node.list_balances().pending_balances_from_channel_closures[0] { + PendingSweepBalance::AwaitingThresholdConfirmations { .. } => {}, + _ => panic!("Unexpected balance state!"), + } + } + } + + generate_blocks_and_wait(bitcoind, electrs, 6).await; + sync_wallets!(); + + reorg!(reorg_depth); + sync_wallets!(); + + let fee_sat = 7000; + // Check balance after close channel + nodes.iter().for_each(|node| { + assert!(node.list_balances().spendable_onchain_balance_sats > amount_sat - fee_sat); + assert!(node.list_balances().spendable_onchain_balance_sats < amount_sat); + + assert_eq!(node.list_balances().total_anchor_channels_reserve_sats, 0); + assert!(node.list_balances().lightning_balances.is_empty()); + + assert_eq!(node.next_event(), None); + }); + }) + } +}